aboutsummaryrefslogtreecommitdiff
path: root/src/vendorcode/cavium
diff options
context:
space:
mode:
authorDavid Hendricks <dhendricks@fb.com>2018-03-09 14:30:38 -0800
committerPhilipp Deppenwiese <zaolin.daisuki@gmail.com>2018-07-03 15:53:32 +0000
commit7d48ac5c7dfb52fc470bbad1013b4d460bc6a1e0 (patch)
tree42002ba1e86627339ff4a6cf38efb4b3f00033bb /src/vendorcode/cavium
parentd837e660074e0621d63f59515f933c209441b653 (diff)
soc/cavium: Integrate BDK files into coreboot
* Make it compile. * Fix whitespace errors. * Fix printf formats. * Add missing headers includes * Guard headers with ifdefs Compile DRAM init code in romstage. Compile QLM, PCIe, RNG, PHY, GPIO, MDIO init code in ramstage. Change-Id: I0a93219a14bfb6ebe41103a825d5032b11e7f2c6 Signed-off-by: David Hendricks <dhendricks@fb.com> Reviewed-on: https://review.coreboot.org/25089 Reviewed-by: Philipp Deppenwiese <zaolin.daisuki@gmail.com> Tested-by: build bot (Jenkins) <no-reply@coreboot.org>
Diffstat (limited to 'src/vendorcode/cavium')
-rw-r--r--src/vendorcode/cavium/Kconfig70
-rw-r--r--src/vendorcode/cavium/Makefile.inc92
-rw-r--r--src/vendorcode/cavium/bdk/lame_string.c149
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-arch/bdk-csr.c9
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-arch/bdk-model.c789
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-arch/bdk-numa.c28
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-gpio.c (renamed from src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-platform.h)66
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-pcie.c (renamed from src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-bgx.h)40
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-qlm.c515
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-status.c2
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-usb.c (renamed from src/vendorcode/cavium/bdk/libbdk-arch/bdk-platform.c)33
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot.c (renamed from src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-info.h)94
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-boot/bdk-watchdog.c108
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-address.c6
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-config.c24
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-size.c33
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-addrbus.c1
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-databus.c6
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-fastscan.c1
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-patfil.c48
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test.c188
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-mdio.c351
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-rnm.c36
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-access.c (renamed from src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-version.h)37
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-clock.c133
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-config.c1420
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-ecam-io.c373
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-ecam.c216
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-gpio.c1
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-l2c.c152
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-nic.c1090
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-pcie-cn8xxx.c1263
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-pcie.c221
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-qlm.c423
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-sata.c1117
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-twsi.c318
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/bdk-usb.c683
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/device/bdk-device.c721
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-marvell.c115
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse-8514.c224
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse-xfi.c395
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse.c372
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy.c445
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-cn81xx.c1003
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-common-sata.c625
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-common.c1636
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.c398
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-margin-cn8xxx.c271
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-os/bdk-init.c500
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-os/bdk-thread.c384
-rw-r--r--src/vendorcode/cavium/bdk/libbdk-trust/bdk-trust.c238
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-env.c7
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-env.h4
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.c4398
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.h5
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-internal.h4
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-l2c.c4
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-print.h19
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-spd.c44
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-tune-ddr3.c1365
-rw-r--r--src/vendorcode/cavium/bdk/libdram/dram-util.h3
-rw-r--r--src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.c39
-rw-r--r--src/vendorcode/cavium/bdk/libdram/libdram-config-load.c10
-rw-r--r--src/vendorcode/cavium/bdk/libdram/libdram.c95
-rw-r--r--src/vendorcode/cavium/include/bdk/bdk-devicetree.h20
-rw-r--r--src/vendorcode/cavium/include/bdk/bdk-minimal.h60
-rw-r--r--src/vendorcode/cavium/include/bdk/bdk.h86
-rw-r--r--src/vendorcode/cavium/include/bdk/lame_string.h19
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-arch.h30
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csr.h60
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-bgx.h17565
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-dap.h1010
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-dtx.h12470
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ecam.h1245
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gic.h8178
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gser.h12340
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gsern.h20807
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-iobn.h7054
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmc.h6
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_fus.h1
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccbr.h1000
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccpf.h1
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pcierc.h29162
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rst.h2
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rvu.h4167
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sata.h4896
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sli.h7573
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-smi.h551
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-usbdrd.h14020
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-usbh.h7606
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-fuse.h4
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-lmt.h3
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-model.h17
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-numa.h11
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-require.h6
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-swap.h130
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-warn.h43
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-bist/bist.h43
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-bist/efuse-read.h41
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-ccpi.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-dram.h60
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-mdio.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-status.h3
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-twsi.h54
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-usb.h5
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot.h14
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-image.h105
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-watchdog.h3
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-xmodem.h59
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-config.h9
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-test.h8
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram.h5
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access-native.h155
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access.h81
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-atomic.h74
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-clock.h38
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-config.h154
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-crc.h53
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-ecam.h2
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-error-report.h62
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-fpa.h162
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-gpio.h3
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-hal.h98
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-key.h86
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-l2c.h6
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mdio.h5
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mmc.h89
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mpi.h105
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nix.h105
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pbus-flash.h111
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie-flash.h109
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie.h36
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pki.h83
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pko.h126
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-power-burn.h67
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-qlm.h66
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rng.h5
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rvu.h3
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-spinlock.h3
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sso.h69
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-tns.h109
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-twsi.h14
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-usb.h9
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-utils.h5
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-vrm.h3
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/device/bdk-device.h8
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/if/bdk-if.h326
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/qlm/bdk-qlm-common.h326
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.h152
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-os/bdk-init.h110
-rw-r--r--src/vendorcode/cavium/include/bdk/libbdk-os/bdk-thread.h3
-rw-r--r--src/vendorcode/cavium/include/bdk/libdram/libdram-config.h2
152 files changed, 167079 insertions, 10064 deletions
diff --git a/src/vendorcode/cavium/Kconfig b/src/vendorcode/cavium/Kconfig
new file mode 100644
index 0000000000..80377620ae
--- /dev/null
+++ b/src/vendorcode/cavium/Kconfig
@@ -0,0 +1,70 @@
+##
+## This file is part of the coreboot project.
+##
+## Copyright 2017-present Facebook, Inc.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; version 2 of the License.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+
+config CAVIUM_BDK
+ def_bool n
+ select HAVE_DEBUG_RAM_SETUP
+ help
+ Build Cavium's BDK in romstage.
+
+if CAVIUM_BDK
+
+menu "BDK"
+
+config CAVIUM_BDK_VERBOSE_INIT
+ bool "Enable verbose init"
+ depends on CAVIUM_BDK
+ help
+ Build Cavium's BDK with verbose init code.
+
+config CAVIUM_BDK_VERBOSE_DRAM
+ bool "Enable verbose dram init"
+ default y if DEBUG_RAM_SETUP
+ depends on CAVIUM_BDK
+ help
+ Build Cavium's BDK with verbose dram init code.
+
+config CAVIUM_BDK_VERBOSE_DRAM_TEST
+ bool "Enable verbose raminit tests"
+ depends on CAVIUM_BDK
+ help
+ Build Cavium's BDK with verbose DRAM testing code.
+
+config CAVIUM_BDK_VERBOSE_QLM
+ bool "Enable verbose qlm init"
+ depends on CAVIUM_BDK
+ help
+ Build Cavium's BDK with verbose QLM code.
+
+config CAVIUM_BDK_VERBOSE_PCIE_CONFIG
+ bool "Enable verbose pcie config"
+ depends on CAVIUM_BDK
+ help
+ Build Cavium's BDK with verbose PCIe config code.
+
+config CAVIUM_BDK_VERBOSE_PCIE
+ bool "Enable verbose pcie init"
+ depends on CAVIUM_BDK
+ help
+ Build Cavium's BDK with verbose PCIe code.
+
+config CAVIUM_BDK_VERBOSE_PHY
+ bool "Enable verbose phy init"
+ depends on CAVIUM_BDK
+ help
+ Build Cavium's BDK with verbose PHY code.
+endmenu
+
+endif
diff --git a/src/vendorcode/cavium/Makefile.inc b/src/vendorcode/cavium/Makefile.inc
new file mode 100644
index 0000000000..855b3c6218
--- /dev/null
+++ b/src/vendorcode/cavium/Makefile.inc
@@ -0,0 +1,92 @@
+##
+## This file is part of the coreboot project.
+##
+## Copyright 2017-present Facebook, Inc.
+##
+## This program is free software; you can redistribute it and/or modify
+## it under the terms of the GNU General Public License as published by
+## the Free Software Foundation; version 2 of the License.
+##
+## This program is distributed in the hope that it will be useful,
+## but WITHOUT ANY WARRANTY; without even the implied warranty of
+## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+## GNU General Public License for more details.
+##
+
+ifeq ($(CONFIG_CAVIUM_BDK),y)
+
+romstage-y += bdk/libbdk-arch/bdk-csr.c
+romstage-y += bdk/libbdk-arch/bdk-model.c
+romstage-y += bdk/libbdk-arch/bdk-numa.c
+romstage-y += bdk/libbdk-boot/bdk-boot-status.c
+romstage-y += bdk/libbdk-dram/bdk-dram-address.c
+romstage-y += bdk/libbdk-dram/bdk-dram-config.c
+romstage-y += bdk/libbdk-dram/bdk-dram-size.c
+romstage-y += bdk/libbdk-dram/bdk-dram-test.c
+romstage-y += bdk/libbdk-dram/bdk-dram-test-addrbus.c
+romstage-y += bdk/libbdk-dram/bdk-dram-test-databus.c
+romstage-y += bdk/libbdk-dram/bdk-dram-test-fastscan.c
+romstage-y += bdk/libbdk-dram/bdk-dram-test-patfil.c
+romstage-y += bdk/libbdk-driver/bdk-driver-rnm.c
+romstage-y += bdk/libbdk-hal/bdk-clock.c
+romstage-y += bdk/libbdk-hal/bdk-config.c
+romstage-y += bdk/libbdk-hal/bdk-gpio.c
+romstage-y += bdk/libbdk-hal/bdk-l2c.c
+romstage-y += bdk/libbdk-os/bdk-init.c
+romstage-y += bdk/libbdk-trust/bdk-trust.c
+romstage-y += bdk/libdram/dram-env.c
+romstage-y += bdk/libdram/dram-init-ddr3.c
+romstage-y += bdk/libdram/dram-l2c.c
+romstage-y += bdk/libdram/dram-spd.c
+romstage-y += bdk/libdram/dram-tune-ddr3.c
+romstage-y += bdk/libdram/lib_octeon_shared.c
+romstage-y += bdk/libdram/libdram.c
+romstage-y += bdk/libdram/libdram-config-load.c
+romstage-y += bdk/libbdk-hal/bdk-access.c
+
+# FIXME: Get rid of lame_string.c
+romstage-y += bdk/lame_string.c
+
+CPPFLAGS_common += -Isrc/vendorcode/cavium/include/bdk
+
+# For bdk_dram_get_size_mbytes()
+ramstage-y += bdk/libbdk-dram/bdk-dram-size.c
+
+ramstage-y += bdk/libbdk-hal/bdk-config.c
+ramstage-y += bdk/libbdk-hal/bdk-qlm.c
+ramstage-y += bdk/libbdk-hal/bdk-pcie-cn8xxx.c
+ramstage-y += bdk/libbdk-hal/bdk-pcie.c
+ramstage-y += bdk/libbdk-hal/bdk-gpio.c
+ramstage-y += bdk/libbdk-hal/bdk-ecam-io.c
+ramstage-y += bdk/libbdk-hal/bdk-usb.c
+ramstage-y += bdk/libbdk-hal/bdk-access.c
+
+ramstage-y += bdk/libbdk-arch/bdk-csr.c
+ramstage-y += bdk/libbdk-arch/bdk-model.c
+ramstage-y += bdk/libbdk-arch/bdk-numa.c
+ramstage-y += bdk/libbdk-hal/qlm/bdk-qlm-common.c
+ramstage-y += bdk/libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.c
+ramstage-y += bdk/libbdk-hal/qlm/bdk-qlm-common-sata.c
+ramstage-y += bdk/libbdk-hal/qlm/bdk-qlm-margin-cn8xxx.c
+
+ramstage-y += bdk/libbdk-boot/bdk-boot-qlm.c
+ramstage-y += bdk/libbdk-boot/bdk-boot-pcie.c
+ramstage-y += bdk/libbdk-boot/bdk-boot-usb.c
+ramstage-y += bdk/libbdk-boot/bdk-boot-gpio.c
+ramstage-y += bdk/libbdk-boot/bdk-boot.c
+
+ramstage-y += bdk/libbdk-hal/if/bdk-if-phy.c
+ramstage-y += bdk/libbdk-hal/if/bdk-if-phy-marvell.c
+ramstage-y += bdk/libbdk-hal/if/bdk-if-phy-vetesse-8514.c
+ramstage-y += bdk/libbdk-hal/if/bdk-if-phy-vetesse.c
+ramstage-y += bdk/libbdk-driver/bdk-driver-mdio.c
+ramstage-y += bdk/libbdk-driver/bdk-driver-rnm.c
+ramstage-y += bdk/libbdk-hal/device/bdk-device.c
+ramstage-y += bdk/libbdk-hal/bdk-ecam.c
+
+# FIXME: Get rid of lame_string.c
+ramstage-y += bdk/lame_string.c
+
+ramstage-$(CONFIG_SOC_CAVIUM_CN81XX) += bdk/libbdk-hal/qlm/bdk-qlm-cn81xx.c
+
+endif
diff --git a/src/vendorcode/cavium/bdk/lame_string.c b/src/vendorcode/cavium/bdk/lame_string.c
new file mode 100644
index 0000000000..11c5add209
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/lame_string.c
@@ -0,0 +1,149 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ * Copyright 2018-present Facebook, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * string.c: hastily cobbled-together string functions
+ */
+
+#include <assert.h>
+#include <string.h>
+#include <lame_string.h>
+
+static int char_to_val(char c)
+{
+ if (c >= '0' && c <= '9') /* digits */
+ return c - '0';
+ if (c >= 'A' && c <= 'F') /* uppercase */
+ return c - 'A' + 10;
+ if (c >= 'a' && c <= 'f') /* lowercase */
+ return c - 'a' + 10;
+ return -1;
+}
+
+unsigned long long int strtoull(const char *nptr, char **endptr, int base)
+{
+ unsigned long long int val;
+ size_t i, error = 0;
+
+ /* TODO: enforce lameness of this API for now... */
+ assert((base == 0) || (base == 16) || base == 10);
+
+ if (!nptr)
+ return 0;
+
+ /* Trim whitespace */
+ for (i = 0; i < strlen(nptr); i++)
+ if (nptr[i] != ' ')
+ break;
+
+ if (base == 0) {
+ /* Autodetect base */
+ if (strlen(&nptr[i]) >= 2 && ((nptr[i] == '0') &&
+ ((nptr[i + 1] == 'x') || (nptr[i + 1] == 'X')))) {
+ base = 16;
+ i += 2; /* start loop after prefix */
+ } else
+ base = 10;
+ }
+
+ val = 0;
+ for (; i < strlen(nptr); i++) {
+ if (base == 16) {
+ if (!isxdigit(nptr[i])) {
+ if (*endptr)
+ *endptr = (char *)&nptr[i];
+ error = 1;
+ break;
+ }
+ } else {
+ if (!isdigit(nptr[i])) {
+ if (*endptr)
+ *endptr = (char *)&nptr[i];
+ error = 1;
+ break;
+ }
+ }
+
+ val *= base;
+ val += char_to_val(nptr[i]);
+ }
+
+ if (error) {
+ printk(BIOS_ERR, "Failed to convert string '%s', base %d to "
+ "int\n", nptr, base);
+ return 0;
+ }
+ return val;
+}
+
+unsigned long int strtoul(const char *nptr, char **endptr, int base)
+{
+ unsigned long long int u = strtol(nptr, endptr, base);
+ /* FIXME: check for overflow (u > max) */
+ return (unsigned long int)u;
+}
+
+long int strtol(const char *nptr, char **endptr, int base)
+{
+ unsigned long long int u;
+ int is_neg = 0;
+ const char *p;
+ long int ret;
+
+ if (nptr[0] == '-') {
+ is_neg = 1;
+ p = &nptr[1];
+ } else {
+ p = &nptr[0];
+ }
+ u = strtoull(p, NULL, base);
+ /* FIXME: check for overflow (u > max) */
+ if (is_neg)
+ ret = 0 - (long int)u;
+ else
+ ret = (long int)u;
+ return ret;
+}
+
+long long int strtoll(const char *nptr, char **endptr, int base)
+{
+ unsigned long long int u;
+ int is_neg = 0;
+ const char *p;
+ long long int ret;
+
+ if (nptr[0] == '-') {
+ is_neg = 1;
+ p = &nptr[1];
+ } else {
+ p = &nptr[0];
+ }
+ u = strtoull(p, NULL, base);
+ /* FIXME: check for overflow (sign-bit set) */
+ if (is_neg)
+ ret = 0 - (long long int)u;
+ else
+ ret = (long long int)u;
+ return ret;
+}
+
+/* FIXME: replace sscanf() usage for bdk_config_get_int. returns number of
+ * strings converted, so 1 if successful and 0 if not */
+int str_to_int(const char *str, int64_t *val)
+{
+ *val = strtol(str, NULL, 10);
+ return 1;
+}
+
+/* FIXME: replace sscanf() usage for bdk_config_get_int. returns number of
+ * strings converted, so 1 if successful and 0 if not */
+int str_to_hex(const char *str, int64_t *val)
+{
+ *val = strtol(str, NULL, 16);
+ return 1;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-csr.c b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-csr.c
index 981ad231dc..fc9ac35735 100644
--- a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-csr.c
+++ b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-csr.c
@@ -37,9 +37,10 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <bdk.h>
-#include <stdio.h>
#include "libbdk-arch/bdk-csrs-pccpf.h"
#include "libbdk-arch/bdk-csrs-pem.h"
+#include "libbdk-arch/bdk-csrs-rst.h"
+#include "libbdk-hal/bdk-pcie.h"
#ifndef BDK_BUILD_HOST
@@ -87,9 +88,6 @@ uint64_t __bdk_csr_read_slow(bdk_node_t node, bdk_csr_type_t type, int busnum, i
case BDK_CSR_TYPE_PCICONFIGRC:
{
- /* Don't allow PCIe register access if PCIe wasn't linked in */
- if (!bdk_pcie_config_read32)
- bdk_fatal("PCIe CSR access not supported when PCIe not linked in\n");
union bdk_pcc_dev_con_s dev_con;
switch (busnum)
{
@@ -201,9 +199,6 @@ void __bdk_csr_write_slow(bdk_node_t node, bdk_csr_type_t type, int busnum, int
case BDK_CSR_TYPE_PCICONFIGRC:
{
- /* Don't allow PCIe register access if PCIe wasn't linked in */
- if (!bdk_pcie_config_write32)
- bdk_fatal("PCIe CSR access not supported when PCIe not linked in\n");
union bdk_pcc_dev_con_s dev_con;
switch (busnum)
{
diff --git a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-model.c b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-model.c
index f2b4a0c803..fc4053e1a4 100644
--- a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-model.c
+++ b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-model.c
@@ -41,6 +41,8 @@
#include "libbdk-arch/bdk-csrs-mio_fus.h"
#include "libbdk-arch/bdk-csrs-fus.h"
#include "libbdk-arch/bdk-csrs-fusf.h"
+#include <libbdk-hal/bdk-clock.h>
+#include <libbdk-hal/bdk-utils.h>
/*
Format of a SKU
@@ -100,562 +102,6 @@ typedef struct
6, checking for trusted boot */
#define FUSES_CHECK_FUSF 0xffff
-/***************************************************/
-/* SKU table for t88 */
-/* From "Thunder Part Number fuse overview Rev 16.xlsx" */
-/***************************************************/
-static const model_sku_info_t t88_sku_info[] =
-{
- /* Index zero reserved for no fuses programmed */
- { 0x01, "CN", 88, 2601, "AAP", /* 48, 32 cores */
- { /* List of fuses for this SKU */
- 0 /* End of fuse list marker */
- }
- },
- { 0x02, "CN", 88, 2601, "AAS", /* 24 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS, /* Disable CCPI */
- 0 /* End of fuse list marker */
- }
- },
- { 0x03, "CN", 88, 2601, "ST", /* 48, 32 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
- BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(0), /* Disable PEM0-1 */
- BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(2), /* Disable PEM4-5 */
- 0 /* End of fuse list marker */
- }
- },
- { 0x04, "CN", 88, 2601, "STT", /* 48 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(0), /* Disable PEM0-1 */
- BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(2), /* Disable PEM4-5 */
- 0 /* End of fuse list marker */
- }
- },
- { 0x05, "CN", 88, 2601, "STS", /* 24 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable LMC2-3 */
- BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS, /* Disable CCPI */
- BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
- BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(0), /* Disable PEM0-1 */
- BDK_MIO_FUS_FUSE_NUM_E_PEM_DISX(2), /* Disable PEM4-5 */
- BDK_MIO_FUS_FUSE_NUM_E_BGX_DISX(1), /* Disable BGX1 */
- 0 /* End of fuse list marker */
- }
- },
- { 0x06, "CN", 88, 2601, "STP", /* 48, 32 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
- 0 /* End of fuse list marker */
- }
- },
- { 0x07, "CN", 88, 2601, "NT", /* 48, 32 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(0),/* Disable SATA0-3 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(2),/* Disable SATA8-11 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(3),/* Disable SATA12-15 */
- 0 /* End of fuse list marker */
- }
- },
- { 0x08, "CN", 88, 2601, "NTS", /* 24 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable LMC2-3 */
- BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS, /* Disable CCPI */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(0),/* Disable SATA0-3 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(2),/* Disable SATA8-11 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(3),/* Disable SATA12-15 */
- BDK_MIO_FUS_FUSE_NUM_E_BGX_DISX(1), /* Disable BGX1 */
- 0 /* End of fuse list marker */
- }
- },
- { 0x09, "CN", 88, 2601, "NTP", /* 48, 32 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(0),/* Disable SATA0-3 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(1),/* Disable SATA4-7 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(2),/* Disable SATA8-11 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(3),/* Disable SATA12-15 */
- 0 /* End of fuse list marker */
- }
- },
- { 0x0a, "CN", 88, 2601, "CP", /* 48,32 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_NODFA_CP2, /* Disable HFA */
- BDK_MIO_FUS_FUSE_NUM_E_RSVD134X(0), /* Disable HNA */
- BDK_MIO_FUS_FUSE_NUM_E_NOZIP, /* Disable Compression */
- BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(0),/* Disable SATA0-3 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(2),/* Disable SATA8-11 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(3),/* Disable SATA12-15 */
- 0 /* End of fuse list marker */
- }
- },
- { 0x0b, "CN", 88, 2601, "CPS", /* 24 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_NODFA_CP2, /* Disable HFA */
- BDK_MIO_FUS_FUSE_NUM_E_RSVD134X(0), /* Disable HNA */
- BDK_MIO_FUS_FUSE_NUM_E_NOZIP, /* Disable Compression */
- BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable LMC2-3 */
- BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS, /* Disable CCPI */
- BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(0),/* Disable SATA0-3 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(2),/* Disable SATA8-11 */
- BDK_MIO_FUS_FUSE_NUM_E_SATA_DISX(3),/* Disable SATA12-15 */
- BDK_MIO_FUS_FUSE_NUM_E_BGX_DISX(1), /* Disable BGX1 */
- 0 /* End of fuse list marker */
- }
- },
- { 0x0c, "CN", 88, 2601, "SNT", /* 48,32 cores, Nitrox connects to PEM2x8, QLM4-5 */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_RSVD231X(0), /* Nitrox 3 is present */
- 0 /* End of fuse list marker */
- }
- },
- { 0x0d, "CN", 88, 2601, "SC", /* 48,32 cores, Nitrox connects to PEM2x8, QLM4-5 */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_RSVD231X(0), /* Nitrox 3 is present */
- BDK_MIO_FUS_FUSE_NUM_E_NODFA_CP2, /* Disable HFA */
- BDK_MIO_FUS_FUSE_NUM_E_RSVD134X(0), /* Disable HNA */
- BDK_MIO_FUS_FUSE_NUM_E_NOZIP, /* Disable Compression */
- BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
- 0 /* End of fuse list marker */
- }
- },
- /* Index gap for adding more CN88 variants */
- { 0x20, "CN", 86, 1676, "AAP", /* No part, match unfused CN86XX */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(6), /* Alternate package fuse */
- 0 /* End of fuse list marker */
- }
- },
- { 0x21, "CN", 86, 1676, "SCP", /* 8 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(6), /* Alternate package fuse */
- BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1),/* L2C is half size */
- BDK_MIO_FUS_FUSE_NUM_E_NODFA_CP2, /* Disable HFA */
- BDK_MIO_FUS_FUSE_NUM_E_RSVD134X(0), /* Disable HNA */
- BDK_MIO_FUS_FUSE_NUM_E_NOZIP, /* Disable Compression */
- BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable LMC2-3 */
- BDK_MIO_FUS_FUSE_NUM_E_OCX_DIS, /* Disable CCPI */
- BDK_MIO_FUS_FUSE_NUM_E_TNS_CRIPPLE, /* Disable TNS */
- 0 /* End of fuse list marker */
- }
- },
- {} /* End of SKU list marker */
-};
-
-/***************************************************/
-/* SKU table for t83 */
-/* From "Thunder Part Number fuse overview Rev 16.xlsx" */
-/***************************************************/
-static const model_sku_info_t t83_sku_info[] =
-{
- /* Index zero reserved for no fuses programmed */
- { 0x01, "CN", 83, 1676, "SCP", /* 24, 20, 16, 12, 8 cores */
- { /* List of fuses for this SKU */
- 0 /* End of fuse list marker */
- }
- },
- { 0x02, "CN", 83, 1676, "CP", /* 24, 20, 16, 12, 8 cores */
- { /* List of fuses for this SKU */
- /* Disable all Nitrox cores, CPT0 and CPT1 */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(0), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(1), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(2), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(3), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(4), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(5), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(6), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(7), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(8), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(9), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(10), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(11), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(12), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(13), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(14), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(15), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(16), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(17), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(18), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(19), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(20), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(21), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(22), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(23), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(24), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(25), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(26), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(27), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(28), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(29), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(30), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(31), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(32), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(33), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(34), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(35), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(36), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(37), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(38), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(39), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(40), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(41), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(42), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(43), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(44), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(45), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(46), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(47), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(0), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(1), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(2), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(3), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(4), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(5), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(6), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(7), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(8), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(9), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(10), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(11), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(12), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(13), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(14), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(15), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(16), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(17), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(18), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(19), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(20), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(21), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(22), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(23), /* Nitrox */
- 0 /* End of fuse list marker */
- }
- },
- { 0x03, "CN", 83, 1676, "AUS", /* 24, 20, 16, 12, 8 cores */
- { /* List of fuses for this SKU */
- FUSES_CHECK_FUSF, /* Trusted boot */
- 0 /* End of fuse list marker */
- }
- },
- { 0x04, "CN", 82, 1676, "SCP", /* 12, 8 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1),/* L2C is half size */
- BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable upper LMC */
- /* Disable Nitrox cores CPT0[24-47] and CPT1[12-23] */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(24), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(25), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(26), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(27), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(28), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(29), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(30), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(31), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(32), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(33), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(34), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(35), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(36), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(37), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(38), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(39), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(40), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(41), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(42), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(43), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(44), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(45), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(46), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(47), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(12), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(13), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(14), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(15), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(16), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(17), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(18), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(19), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(20), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(21), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(22), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(23), /* Nitrox */
- 0 /* End of fuse list marker */
- }
- },
- { 0x05, "CN", 82, 1676, "CP", /* 12, 8 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1),/* L2C is half size */
- BDK_MIO_FUS_FUSE_NUM_E_LMC_DIS, /* Disable upper LMC */
- /* Disable all Nitrox cores, CPT0 and CPT1 */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(0), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(1), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(2), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(3), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(4), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(5), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(6), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(7), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(8), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(9), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(10), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(11), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(12), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(13), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(14), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(15), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(16), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(17), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(18), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(19), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(20), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(21), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(22), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(23), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(24), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(25), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(26), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(27), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(28), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(29), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(30), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(31), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(32), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(33), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(34), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(35), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(36), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(37), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(38), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(39), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(40), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(41), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(42), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(43), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(44), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(45), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(46), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT0_ENG_DISX(47), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(0), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(1), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(2), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(3), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(4), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(5), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(6), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(7), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(8), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(9), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(10), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(11), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(12), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(13), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(14), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(15), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(16), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(17), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(18), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(19), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(20), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(21), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(22), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT1_ENG_DISX(23), /* Nitrox */
- 0 /* End of fuse list marker */
- }
- },
- {} /* End of SKU list marker */
-};
-
-/***************************************************/
-/* SKU table for t81 */
-/* From "Thunder Part Number fuse overview Rev 16.xlsx" */
-/***************************************************/
-static const model_sku_info_t t81_sku_info[] =
-{
- /* Index zero reserved for no fuses programmed */
- { 0x01, "CN", 81, 676, "SCP", /* 4, 2 cores */
- { /* List of fuses for this SKU */
- /* No fuses */
- 0 /* End of fuse list marker */
- }
- },
- { 0x02, "CN", 81, 676, "CP", /* 4, 2 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(1), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(2), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(3), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(4), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(5), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(6), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(7), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(8), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(9), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(10), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(11), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(12), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(13), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(14), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(15), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(16), /* Nitrox */
- 0 /* End of fuse list marker */
- }
- },
- { 0x07, "CN", 81, 676, "AUS", /* 4, 2 cores */
- { /* List of fuses for this SKU */
- FUSES_CHECK_FUSF, /* Trusted boot */
- 0 /* End of fuse list marker */
- }
- },
- { 0x08, "CN", 81, 676, "AUC", /* 4, 2 cores */
- { /* List of fuses for this SKU */
- FUSES_CHECK_FUSF, /* Trusted boot */
- BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(1), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(2), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(3), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(4), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(5), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(6), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(7), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(8), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(9), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(10), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(11), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(12), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(13), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(14), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(15), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(16), /* Nitrox */
- 0 /* End of fuse list marker */
- }
- },
- { 0x03, "CN", 80, 676, "SCP", /* 4, 2 cores */
- { /* List of fuses for this SKU */
- /* Note that CHIP_ID(7) is suppose to be blown, but a few chips
- have incorrect fuses. We allow CN80XX SKUs with or without
- CHIP_ID(7) */
- //BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(7), /* Alternate package fuse 2? */
- BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1), /* L2C is half size */
- BDK_MIO_FUS_FUSE_NUM_E_LMC_HALF, /* LMC is half width */
- 0 /* End of fuse list marker */
- }
- },
- { 0x04, "CN", 80, 676, "CP", /* 4, 2 cores */
- { /* List of fuses for this SKU */
- /* Note that CHIP_ID(7) is suppose to be blown, but a few chips
- have incorrect fuses. We allow CN80XX SKUs with or without
- CHIP_ID(7) */
- //BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(7), /* Alternate package fuse 2? */
- BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1), /* L2C is half size */
- BDK_MIO_FUS_FUSE_NUM_E_LMC_HALF, /* LMC is half width */
- BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(1), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(2), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(3), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(4), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(5), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(6), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(7), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(8), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(9), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(10), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(11), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(12), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(13), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(14), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(15), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(16), /* Nitrox */
- 0 /* End of fuse list marker */
- }
- },
- { 0x05, "CN", 80, 555, "SCP", /* 4, 2 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(6), /* Alternate package fuse */
- BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1), /* L2C is half size */
- BDK_MIO_FUS_FUSE_NUM_E_LMC_HALF, /* LMC is half width */
- 0 /* End of fuse list marker */
- }
- },
- { 0x06, "CN", 80, 555, "CP", /* 4, 2 cores */
- { /* List of fuses for this SKU */
- BDK_MIO_FUS_FUSE_NUM_E_CHIP_IDX(6), /* Alternate package fuse */
- BDK_MIO_FUS_FUSE_NUM_E_L2C_CRIPX(1), /* L2C is half size */
- BDK_MIO_FUS_FUSE_NUM_E_LMC_HALF, /* LMC is half width */
- BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(1), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(2), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(3), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(4), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(5), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(6), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(7), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(8), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(9), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(10), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(11), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(12), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(13), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(14), /* Nitrox */
- //BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(15), /* Nitrox */
- BDK_MIO_FUS_FUSE_NUM_E_CPT_ENG_DISX(16), /* Nitrox */
- 0 /* End of fuse list marker */
- }
- },
- {} /* End of SKU list marker */
-};
-
-/***************************************************/
-/* SKU table for t93 */
-/***************************************************/
-static const model_sku_info_t t93_sku_info[] =
-{
- /* Index zero reserved for no fuses programmed */
- { 0x01, "CN", 93, 1676, "SCP", /* 24, 20, 16, 12, 8 cores */
- { /* List of fuses for this SKU */
- /* No fuses */
- 0 /* End of fuse list marker */
- }
- },
- {} /* End of SKU list marker */
-};
-
-/**
- * Given a core count, return the last two digits of a model number
- *
- * @param cores Number of cores
- *
- * @return Two digit model number
- */
-static int model_digits_for_cores(int cores)
-{
- /* If the number of cores is between two model levels, use the lower
- level. This assumes that a model guarantees a minimum number of
- cores. This should never happen, but you never know */
- switch (cores)
- {
- case 1: return 10; /* CNxx10 = 1 core */
- case 2: return 20; /* CNxx20 = 2 cores */
- case 3: return 25; /* CNxx25 = 3 cores */
- case 4: return 30; /* CNxx30 = 4 cores */
- case 5: return 32; /* CNxx32 = 5 cores */
- case 6: return 34; /* CNxx34 = 6 cores */
- case 7: return 38; /* CNxx38 = 7 cores */
- case 8: return 40; /* CNxx40 = 8 cores */
- case 9: return 42; /* CNxx42 = 9 cores */
- case 10: return 45; /* CNxx45 = 10 cores */
- case 11: return 48; /* CNxx48 = 11 cores */
- case 12: return 50; /* CNxx50 = 12 cores */
- case 13: return 52; /* CNxx52 = 13 cores */
- case 14: return 55; /* CNxx55 = 14 cores */
- case 15: return 58; /* CNxx58 = 15 cores */
- case 16 ... 19: return 60; /* CNxx60 = 16 cores */
- case 20 ... 23: return 65; /* CNxx65 = 20 cores */
- case 24 ... 31: return 70; /* CNxx70 = 24 cores */
- case 32 ... 39: return 80; /* CNxx80 = 32 cores */
- case 40 ... 43: return 85; /* CNxx85 = 40 cores */
- case 44 ... 47: return 88; /* CNxx88 = 44 cores */
- default: return 90; /* CNxx90 = 48 cores */
- }
-}
-
/**
* Return non-zero if the die is in an alternate package. The
* normal is_model() checks will treat alternate package parts
@@ -694,234 +140,3 @@ int cavium_is_altpkg(uint32_t arg_model)
else
return 0;
}
-
-/**
- * Return the SKU string for a chip
- *
- * @param node Node to get SKU for
- *
- * @return Chip's SKU
- */
-const char* bdk_model_get_sku(int node)
-{
- /* Storage for SKU is per node. Static variable stores the value
- so we don't decode on every call */
- static char chip_sku[BDK_NUMA_MAX_NODES][32] = { { 0, }, };
-
- /* Return the cached string if we've already filled it in */
- if (chip_sku[node][0])
- return chip_sku[node];
-
- /* Figure out which SKU list to use */
- const model_sku_info_t *sku_info;
- uint64_t result;
- asm ("mrs %[rd],MIDR_EL1" : [rd] "=r" (result));
- result = bdk_extract(result, 4, 12);
- switch (result)
- {
- case 0xa1:
- sku_info = t88_sku_info;
- break;
- case 0xa2:
- sku_info = t81_sku_info;
- break;
- case 0xa3:
- sku_info = t83_sku_info;
- break;
- case 0xb2:
- sku_info = t93_sku_info;
- break;
- default:
- bdk_fatal("SKU detect: Unknown die\n");
- }
-
- /* Read the SKU index from the PNAME fuses */
- int match_index = -1;
- // FIXME: Implement PNAME reads
-
- /* Search the SKU list for the best match, where all the fuses match.
- Only needed if the PNAME fuses don't specify the index */
- if (match_index == -1)
- {
- match_index = 0;
- int match_score = -1;
- int index = 0;
- while (sku_info[index].fuse_index)
- {
- int score = 0;
- int fuse_index = 0;
- /* Count the number of fuses that match. A mismatch forces the worst
- score (-1) */
- while (sku_info[index].fuses[fuse_index])
- {
- int fuse;
- /* FUSES_CHECK_FUSF is special for trusted parts */
- if (sku_info[index].fuses[fuse_index] == FUSES_CHECK_FUSF)
- {
- BDK_CSR_INIT(fusf_ctl, node, BDK_FUSF_CTL);
- fuse = (fusf_ctl.u >> 6) & 1;
- }
- else
- {
- fuse = bdk_fuse_read(node, sku_info[index].fuses[fuse_index]);
- }
- if (fuse)
- {
- /* Match, improve the score */
- score++;
- }
- else
- {
- /* Mismatch, force score bad */
- score = -1;
- break;
- }
- fuse_index++;
- }
- /* If this score is better than the last match, use this index as the
- match */
- if (score > match_score)
- {
- match_score = score;
- match_index = index;
- }
- index++;
- }
- }
-
- /* Use the SKU table to determine the defaults for the SKU parts */
- const char *prefix = sku_info[match_index].prefix;
- int model = 100 * sku_info[match_index].model_base;
- int cores = bdk_get_num_cores(node);
- const char *customer_code = "";
- int rclk_limit = bdk_clock_get_rate(node, BDK_CLOCK_RCLK) / 1000000;
- const char *bg_str = "BG"; /* Default Ball Grid array */
- int balls = sku_info[match_index].num_balls; /* Num package balls */
- const char *segment = sku_info[match_index].segment; /* Market segment */
- char prod_phase[4]; /* Blank = production, PR = Prototype, ES = Engineering sample */
- char prod_rev[5]; /* Product revision */
- const char *rohs_option = "G"; /* RoHS is always G for current parts */
-
- /* Update the model number with the number of cores */
- model = (model / 100) * 100 + model_digits_for_cores(cores);
-
- /* Update the RCLK setting based on MIO_FUS_DAT3[core_pll_mul] */
- uint64_t core_pll_mul;
- if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
- {
- BDK_CSR_INIT(mio_fus_dat3, node, BDK_MIO_FUS_DAT3);
- core_pll_mul = mio_fus_dat3.s.core_pll_mul;
- }
- else
- core_pll_mul = bdk_fuse_read_range(bdk_numa_local(), BDK_FUS_FUSE_NUM_E_CORE_MAX_MULX(0), 7);
-
- if (core_pll_mul)
- {
- /* CORE_PLL_MUL covers bits 5:1, so we need to multiple by 2. The
- documentation doen't mention this clearly: There is a 300Mhz
- addition to the base multiplier */
- rclk_limit = core_pll_mul * 2 * 50 + 300;
- }
-
- /* FIXME: Hardcode production as there is no way to tell */
- prod_phase[0] = 0;
-
- /* Read the Pass information from fuses. Note that pass info in
- MIO_FUS_DAT2[CHIP_ID] is encoded as
- bit[7] = Unused, zero
- bit[6] = Alternate package
- bit[5..3] = Major pass
- bit[2..0] = Minor pass */
- int major_pass;
- int minor_pass;
- if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
- {
- BDK_CSR_INIT(mio_fus_dat2, node, BDK_MIO_FUS_DAT2);
- major_pass = ((mio_fus_dat2.s.chip_id >> 3) & 7) + 1;
- minor_pass = mio_fus_dat2.s.chip_id & 7;
- }
- else
- {
- /* FIXME: We don't support getting the pass for other node on CN9XXX */
- bdk_ap_midr_el1_t midr_el1;
- BDK_MRS(MIDR_EL1, midr_el1.u);
- major_pass = (midr_el1.s.variant & 7) + 1;
- minor_pass = midr_el1.s.revision;
- }
-
- if (major_pass == 1)
- {
- /* Pass 1.x is special in that we don't show the implied 'X' */
- if (minor_pass == 0)
- {
- /* Completely blank for 1.0 */
- prod_rev[0] = 0;
- }
- else
- {
- /* If we are production and not pass 1.0, the product phase
- changes from blank to "-P". The product revision then
- follows the product phase without a '-' */
- if (prod_phase[0] == 0)
- {
- /* Change product phase to "-P" */
- prod_phase[0] = '-';
- prod_phase[1] = 'P';
- prod_phase[2] = 0;
- }
- /* No separator between phase and revision */
- prod_rev[0] = '1';
- prod_rev[1] = '0' + minor_pass;
- prod_rev[2] = 0;
- }
- }
- else
- {
- /* Pass 2.0 and above 12345678 */
- const char pass_letter[8] = "XYWVUTSR";
- prod_rev[0] = '-';
- prod_rev[1] = pass_letter[major_pass-1];
- if (minor_pass == 0)
- {
- /* Nothing after the letter code */
- prod_rev[2] = 0;
- }
- else
- {
- /* Add major and minor after the letter code */
- prod_rev[2] = '0' + major_pass;
- prod_rev[3] = '0' + minor_pass;
- prod_rev[4] = 0;
- }
- }
-
- /* Special check for CN88XX pass 2.0 and 2.1. Documentation mistakenly
- specified 2.0 as -PR and 2.1 as -Y. Rather than fix the docs, OPs has
- decided to special case this SKU */
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (major_pass == 2))
- {
- if (minor_pass == 0)
- {
- prod_phase[0] = '-'; /* SKU ends with -PR-Y-G */
- prod_phase[1] = 'P';
- prod_phase[2] = 'R';
- prod_phase[3] = 0;
- }
- else if (minor_pass == 1)
- {
- prod_rev[0] = '-'; /* SKU ends with -Y-G */
- prod_rev[1] = 'Y';
- prod_rev[2] = 0;
- }
- }
-
- /* Read PNAME fuses, looking for SKU overrides */
- // FIXME: Implement PNAME reads
-
- /* Build the SKU string */
- snprintf(chip_sku[node], sizeof(chip_sku[node]), "%s%d%s-%d%s%d-%s%s%s-%s",
- prefix, model, customer_code, rclk_limit, bg_str, balls, segment,
- prod_phase, prod_rev, rohs_option);
-
- return chip_sku[node];
-}
diff --git a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-numa.c b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-numa.c
index 33d34ba669..ede3b10170 100644
--- a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-numa.c
+++ b/src/vendorcode/cavium/bdk/libbdk-arch/bdk-numa.c
@@ -37,11 +37,18 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <bdk.h>
-#include <stdio.h>
+#include <libbdk-hal/bdk-atomic.h>
-int __bdk_numa_master_node = -1; /* Which node is the master */
-static int __bdk_numa_exists_mask = 0; /* Bitmask of nodes that exist */
-static bdk_spinlock_t __bdk_numa_lock;
+/*
+ * FIXME(dhendrix): can't include bdk-spinlock.h, compile complains:
+ * {standard input}:40: Error: selected processor does not support `ldadda x3,x5,[x2]'
+ */
+
+//int __bdk_numa_master_node = -1; /* Which node is the master */
+int __bdk_numa_master_node = 0; /* FIXME(dhendrix): assume 0 */
+//static int __bdk_numa_exists_mask = 0; /* Bitmask of nodes that exist */
+static int __bdk_numa_exists_mask = 1; /* FIXME(dhendrix): assume 0x01 */
+//static bdk_spinlock_t __bdk_numa_lock;
/**
* Get a bitmask of the nodes that exist
@@ -60,11 +67,8 @@ uint64_t bdk_numa_get_exists_mask(void)
*/
void bdk_numa_set_exists(bdk_node_t node)
{
- bdk_spinlock_lock(&__bdk_numa_lock);
- __bdk_numa_exists_mask |= 1 << node;
- if (__bdk_numa_master_node == -1)
- __bdk_numa_master_node = node;
- bdk_spinlock_unlock(&__bdk_numa_lock);
+ /* FIXME(dhendrix): stub. */
+ return;
}
/**
@@ -76,7 +80,8 @@ void bdk_numa_set_exists(bdk_node_t node)
*/
int bdk_numa_exists(bdk_node_t node)
{
- return __bdk_numa_exists_mask & (1 << node);
+ /* FIXME(dhendrix): stub */
+ return node == 0;
}
/**
@@ -86,6 +91,7 @@ int bdk_numa_exists(bdk_node_t node)
*/
extern int bdk_numa_is_only_one()
{
- return __bdk_numa_exists_mask == 1;
+ /* FIXME(dhendrix): stub */
+ return 1;
}
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-platform.h b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-gpio.c
index 6b6e340d39..330a23ba49 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-platform.h
+++ b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-gpio.c
@@ -36,47 +36,35 @@
* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-gpio.h"
+#include "libbdk-hal/bdk-config.h"
+#include "libbdk-hal/bdk-gpio.h"
+#include "libbdk-boot/bdk-boot-gpio.h"
/**
- * @file
- *
- * Functions for information about the run platform.
- *
- * <hr>$Revision: 49448 $<hr>
- * @addtogroup hal
- * @{
+ * Configure GPIO on all nodes as part of booting
*/
-
-/**
- * This typedef defines the possible platforms for the BDK. The
- * numbers represent fuse setting in Fuses[197:195].
- */
-typedef enum
+void bdk_boot_gpio(void)
{
- BDK_PLATFORM_HW = 0,
- BDK_PLATFORM_EMULATOR = 1,
- BDK_PLATFORM_RTL = 2,
- BDK_PLATFORM_ASIM = 3,
-} bdk_platform_t;
-
-/**
- * Check which platform we are currently running on. This allows a BDK binary to
- * run on various platforms without a recompile.
- *
- * @param platform Platform to check for
- *
- * @return Non zero if we are on the platform
- */
-static inline int bdk_is_platform(bdk_platform_t platform) __attribute__ ((pure, always_inline));
-static inline int bdk_is_platform(bdk_platform_t platform)
-{
- extern bdk_platform_t __bdk_platform;
- return (__bdk_platform == platform);
+ const int NUM_GPIO = bdk_gpio_get_num();
+ for (bdk_node_t n = BDK_NODE_0; n < BDK_NUMA_MAX_NODES; n++)
+ {
+ if (bdk_numa_exists(n))
+ {
+ for (int gpio = 0; gpio < NUM_GPIO; gpio++)
+ {
+ int pin_sel = bdk_config_get_int(BDK_CONFIG_GPIO_PIN_SELECT, gpio, n);
+ if (pin_sel >= 0)
+ {
+ BDK_TRACE(INIT, "Connecting N%d.GPIO%d to pin select 0x%x\n",
+ n, gpio, pin_sel);
+ bdk_gpio_select_pin(n, gpio, pin_sel);
+ }
+ int invert = bdk_config_get_int(BDK_CONFIG_GPIO_POLARITY, gpio, n);
+ if (invert)
+ BDK_CSR_MODIFY(c, n, BDK_GPIO_BIT_CFGX(gpio), c.s.pin_xor = 1);
+ }
+ }
+ }
}
-
-/**
- * Call to initialize the platform state
- */
-extern void __bdk_platform_init();
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-bgx.h b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-pcie.c
index f3ea6a41d5..b03c2e03ee 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-bgx.h
+++ b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-pcie.c
@@ -36,19 +36,33 @@
* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <bdk.h>
+#include <string.h>
+#include "libbdk-hal/if/bdk-if.h"
+#include "libbdk-hal/bdk-qlm.h"
+#include "libbdk-arch/bdk-csrs-pem.h"
+#include "libbdk-boot/bdk-boot-pcie.h"
+#include "libbdk-hal/bdk-pcie.h"
/**
- * @file
- *
- * Boot services for BGX
- *
- * @addtogroup boot
- * @{
+ * Configure PCIe on all nodes as part of booting
*/
-
-/**
- * Configure BGX on all nodes as part of booting
- */
-extern void bdk_boot_bgx(void);
-
-/** @} */
+void bdk_boot_pcie(void)
+{
+ /* Initialize PCIe and bring up the link */
+ for (bdk_node_t n = BDK_NODE_0; n < BDK_NUMA_MAX_NODES; n++)
+ {
+ if (bdk_numa_exists(n))
+ {
+ for (int p = 0; p < bdk_pcie_get_num_ports(n); p++)
+ {
+ /* Only init PCIe that are attached to QLMs */
+ if (bdk_qlm_get_qlm_num(n, BDK_IF_PCIE, p, 0) != -1)
+ {
+ BDK_TRACE(INIT, "Initializing PCIe%d on Node %d\n", p, n);
+ bdk_pcie_rc_initialize(n, p);
+ }
+ }
+ }
+ }
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-qlm.c b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-qlm.c
new file mode 100644
index 0000000000..73bdca0363
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-qlm.c
@@ -0,0 +1,515 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <string.h>
+#include "libbdk-hal/if/bdk-if.h"
+#include "libbdk-hal/bdk-qlm.h"
+#include "libbdk-hal/bdk-utils.h"
+#include "libbdk-boot/bdk-boot-qlm.h"
+#include "libbdk-hal/bdk-config.h"
+#include "libbdk-hal/bdk-twsi.h"
+
+static void boot_init_qlm_clk(void)
+{
+ /* Setup reference clocks */
+ for (bdk_node_t n = BDK_NODE_0; n < BDK_NUMA_MAX_NODES; n++)
+ {
+ if (!bdk_numa_exists(n))
+ continue;
+
+ int num_qlms = bdk_qlm_get_num(n);
+
+ BDK_TRACE(INIT, "Initializing QLM clocks on Node %d\n", n);
+ for (int qlm = 0; qlm < num_qlms; qlm++)
+ {
+ bdk_qlm_clock_t clk = bdk_config_get_int(BDK_CONFIG_QLM_CLK, n, qlm);
+ if (BDK_QLM_CLK_LAST == clk) /* no entry */
+ continue;
+
+ if (clk > BDK_QLM_CLK_LAST)
+ {
+ bdk_warn("Invalid clock source %d for QLM%d on node %d. Not configuring.\n",
+ clk, qlm, n);
+ continue;
+ }
+
+ if (0 != bdk_qlm_set_clock(n, qlm, clk))
+ {
+ bdk_error("Error setting clock source %d for QLM%d on node %d. Ignoring.\n",
+ clk, qlm, n);
+ }
+ }
+ }
+}
+
+/**
+ * Given a node and DLM/QLM, return the possible BGX lanes connected to it. This
+ * is needed to determine which PHY address to use for SFP/SFP+ detection.
+ *
+ * @param node Node the DLM/QLM is on
+ * @param qlm DLM/QLM to find the BGX for
+ * @param bgx Output: The BGX instance number, or -1 on failure
+ * @param bgx_lane_mask
+ * Output: Which BGX indexes may be connected to this port
+ */
+static void find_bgx(int node, int qlm, int *bgx, int *bgx_lane_mask)
+{
+ *bgx = -1;
+ *bgx_lane_mask = 0;
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ switch (qlm)
+ {
+ case 0: /* BGX0 -> QLM0 */
+ case 1: /* BGX1 -> QLM1 */
+ *bgx = qlm;
+ *bgx_lane_mask = 0xf;
+ return;
+ default:
+ BDK_TRACE(INIT, "N%d.QLM%d: No BGX for this QLM, illegal config\n", node, qlm);
+ return;
+ }
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ {
+ switch (qlm)
+ {
+ case 2: /* BGX0 -> QLM2 */
+ *bgx = 0;
+ *bgx_lane_mask = 0xf;
+ return;
+ case 3: /* BGX1 -> QLM3 */
+ *bgx = 1;
+ *bgx_lane_mask = 0xf;
+ return;
+ case 4: /* BGX3 -> DLM4 */
+ *bgx = 3;
+ *bgx_lane_mask = 0x3;
+ return;
+ case 5: /* BGX2 -> DLM5 */
+ *bgx = 2;
+ *bgx_lane_mask = 0x3;
+ return;
+ case 6: /* BGX2 -> DLM6 */
+ *bgx = 2;
+ *bgx_lane_mask = 0xc;
+ return;
+ default:
+ BDK_TRACE(INIT, "N%d.QLM%d: No BGX for this QLM, illegal config\n", node, qlm);
+ return;
+ }
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ {
+ switch (qlm)
+ {
+ case 0: /* BGX0 -> DLM0 */
+ *bgx = 0;
+ *bgx_lane_mask = 0x3;
+ return;
+ case 1: /* BGX0 -> DLM1 */
+ *bgx = 0;
+ *bgx_lane_mask = 0xc;
+ return;
+ case 2: /* BGX1 -> DLM2 */
+ *bgx = 1;
+ *bgx_lane_mask = 0x3;
+ return;
+ case 3: /* BGX1 -> DLM3 */
+ *bgx = 1;
+ *bgx_lane_mask = 0xc;
+ return;
+ default:
+ BDK_TRACE(INIT, "N%d.QLM%d: No BGX for this QLM, illegal config\n", node, qlm);
+ return;
+ }
+ }
+ else
+ bdk_error("N%d.QLM%d: Unsupported chip, update %s()\n", node, qlm, __FUNCTION__);
+}
+
+/**
+ * Determine the DLM/QLM mode based on a SFP/SFP+ connected to the port. Note that
+ * the CN8XXX parts can't control mode per lane, so all SFP/SFP+ on a DLM/QLM must
+ * be the same mode. This code is sloppy about finding the BGX PHY for the DLM/QLM
+ * because not all lanes may be used.
+ *
+ * @param node Node to determine mode for
+ * @param qlm DLM/QLM the SFP/SFP+ is connected to
+ *
+ * @return QLM mode or -1 on failure
+ */
+static int init_sfp(int node, int qlm)
+{
+ int mode = BDK_QLM_MODE_XFI_4X1; /* Default to XFI if detection fails */
+ int bgx = -1;
+ int bgx_lane_mask = 0;
+
+ find_bgx(node, qlm, &bgx, &bgx_lane_mask);
+ if (bgx == -1)
+ return mode;
+
+ BDK_TRACE(INIT, "N%d.QLM%d: Checking for SFP/SFP+\n", node, qlm);
+
+ for (int index = 0; index < 4; index++)
+ {
+ /* Skip BGX indexes that aren't applicable */
+ if ((bgx_lane_mask & (1 << index)) == 0)
+ continue;
+ /* Lookup the PHY address for this BGX port */
+ int phy_addr = bdk_config_get_int(BDK_CONFIG_PHY_ADDRESS, node, bgx, index);
+ /* SFP/SFP+ are connected with TWSI, so only check ports with
+ PHYs connected with TWSI */
+ if ((phy_addr & BDK_IF_PHY_TYPE_MASK) != BDK_IF_PHY_TWSI)
+ continue;
+
+ /* For TWSI:
+ Bits[31:24]: Node ID, 0xff for device node
+ Bits[23:16]: TWSI internal address width in bytes (0-2)
+ Bits[15:12]: 2=TWSI
+ Bits[11:8]: TWSI bus number
+ Bits[7:0]: TWSI address */
+ int n = (phy_addr >> 24) & 0xff;
+ int twsi_ia_width = (phy_addr >> 16) & 0xff;
+ int twsi_bus = (phy_addr >> 8) & 0xf;
+ int twsi_addr = 0x50; /* From SFP spec */
+ if (n == 0xff)
+ n = node;
+
+ /* Read bytes 0-3 from eeprom. Note read is big endian, so byte 0 is
+ bits 31:24 in the result */
+ int64_t eeprom_00_03 = bdk_twsix_read_ia(n, twsi_bus, twsi_addr, 0, 4, twsi_ia_width);
+ if (eeprom_00_03 == -1)
+ {
+ BDK_TRACE(INIT, "N%d.QLM%d: BGX%d.%d SFP/SFP+ eeprom access failed\n", node, qlm, bgx, index);
+ continue;
+ }
+ int64_t eeprom_04_07 = bdk_twsix_read_ia(n, twsi_bus, twsi_addr, 4, 4, twsi_ia_width);
+ if (eeprom_04_07 == -1)
+ {
+ BDK_TRACE(INIT, "N%d.QLM%d: BGX%d.%d SFP/SFP+ eeprom access failed\n", node, qlm, bgx, index);
+ continue;
+ }
+ int64_t eeprom_08_11 = bdk_twsix_read_ia(n, twsi_bus, twsi_addr, 8, 4, twsi_ia_width);
+ if (eeprom_08_11 == -1)
+ {
+ BDK_TRACE(INIT, "N%d.QLM%d: BGX%d.%d SFP/SFP+ eeprom access failed\n", node, qlm, bgx, index);
+ continue;
+ }
+ int64_t eeprom_12 = bdk_twsix_read_ia(n, twsi_bus, twsi_addr, 12, 1, twsi_ia_width);
+ if (eeprom_12 == -1)
+ {
+ BDK_TRACE(INIT, "N%d.QLM%d: BGX%d.%d SFP/SFP+ eeprom access failed\n", node, qlm, bgx, index);
+ continue;
+ }
+
+ /* Byte 0: Identifier, should be 0x03 for SFP/SFP+
+ 0x03 = SFP of SFP+
+ 0x0c = QSFP
+ 0x0d = QSFP+ */
+ if (bdk_extract(eeprom_00_03, 24, 8) != 0x03)
+ {
+ /* Byte 0 of eeprom should be 0x03 for SFP/SFP+ */
+ BDK_TRACE(INIT, "N%d.QLM%d: BGX%d.%d SFP/SFP+ not detected\n", node, qlm, bgx, index);
+ continue;
+ }
+ /* Byte 1: Extended Identifier, should be 0x04 */
+ if (bdk_extract(eeprom_00_03, 16, 8) != 0x04)
+ {
+ BDK_TRACE(INIT, "N%d.QLM%d: BGX%d.%d SFP/SFP+ incorrect extended identifier\n", node, qlm, bgx, index);
+ continue;
+ }
+ /* Byte 2: Connector
+ Value Description of connector
+ 00h Unknown or unspecified
+ 01h SC
+ 02h Fibre Channel Style 1 copper connector
+ 03h Fibre Channel Style 2 copper connector
+ 04h BNC/TNC
+ 05h Fibre Channel coaxial headers
+ 06h FiberJack
+ 07h LC
+ 08h MT-RJ
+ 09h MU
+ 0Ah SG
+ 0Bh Optical pigtail
+ 0Ch MPO Parallel Optic
+ 0Dh-1Fh Reserved, Unallocated
+ 20h HSSDC II
+ 21h Copper Pigtail
+ 22h RJ45
+ 23h-7Fh Reserved, Unallocated
+ 80-FFh Vendor specific */
+ bool isOptical = false;
+ switch (bdk_extract(eeprom_00_03, 8, 8))
+ {
+ case 0x01: /* SC - Short channel */
+ case 0x07: /* LC - Long channel */
+ case 0x0B: /* Optical pigtail */
+ isOptical = true;
+ break;
+ }
+ BDK_TRACE(INIT, "N%d.QLM%d: SFP/SFP+ eeprom Bytes[0:3] 0x%0llx, Bytes[4:7] 0x%08llx, [8:11] 0x%08llx [12] 0x%02llx\n",
+ node, qlm, eeprom_00_03, eeprom_04_07, eeprom_08_11, eeprom_12);
+ /* Byte 3: Transceiver info first byte. See comments below */
+ /* Byte 3, bits 4-7 correspond to 10G Ethernet speeds */
+ /* 10G Ethernet Compliance Codes
+ Byte 3[7] 10G BASE-ER (Fiber - Extended Reach)
+ Byte 3[6] 10G BASE-LRM (Fiber - Long reach multi-mode)
+ Byte 3[5] 10G BASE-LR (Fiber - Long reach)
+ Byte 3[4] 10G BASE-SR (Fiber - Short reach) */
+ bool isXFI = bdk_extract(eeprom_00_03, 0, 8) != 0;
+ /* Byte 6, bits 0-7 correspond to Gigabit Ethernet speeds */
+ /* Gigabit Ethernet Compliance Codes
+ Byte 6[7] BASE-PX
+ Byte 6[6] BASE-BX10
+ Byte 6[5] 100BASE-FX
+ Byte 6[4] 100BASE-LX/LX10 (Fiber)
+ Byte 6[3] 1000BASE-T (Twisted pair)
+ Byte 6[2] 1000BASE-CX (Shielded balanced copper)
+ Byte 6[1] 1000BASE-LX (Fiber)
+ Byte 6[0] 1000BASE-SX (Fiber) */
+ bool isSGMII = bdk_extract(eeprom_04_07, 8, 8) != 0;
+ /* Byte 12 is the nominal bit rate, units of 100 MBits/sec. */
+ int bit_rate = eeprom_12 * 100;
+ if (bit_rate)
+ {
+ BDK_TRACE(INIT, "N%d.QLM%d: Nominal bit rate %d MBits/sec\n",
+ node, qlm, bit_rate);
+ isXFI = (bit_rate >= 10000);
+ isSGMII = (bit_rate <= 2500);
+ }
+
+ if (isXFI)
+ {
+ mode = BDK_QLM_MODE_XFI_4X1;
+ if (isOptical)
+ BDK_TRACE(INIT, "N%d.QLM%d: SFP+ selecting XFI Optical\n", node, qlm);
+ else
+ BDK_TRACE(INIT, "N%d.QLM%d: SFP+ selecting XFI Copper\n", node, qlm);
+ }
+ else if (isSGMII)
+ {
+ mode = BDK_QLM_MODE_SGMII_4X1;
+ if (isOptical)
+ {
+ /* This should be 1000BASE-X, gigabit over fiber */
+ BDK_TRACE(INIT, "N%d.QLM%d: SFP selecting SGMII Optical\n", node, qlm);
+ }
+ else /* This should be SGMII, gigabit over copper */
+ BDK_TRACE(INIT, "N%d.QLM%d: SFP selecting SGMII Copper\n", node, qlm);
+ }
+ }
+ return mode;
+}
+
+/**
+ * Determine the DLM/QLM mode based on a QSFP/QSFP+ connected to
+ * the port. This code is sloppy about finding the BGX PHY for
+ * the DLM/QLM because not all lanes may be used.
+ *
+ * @param node Node to determine mode for
+ * @param qlm DLM/QLM the SFP/SFP+ is connected to
+ *
+ * @return QLM mode or -1 on failure
+ */
+static int init_qsfp(int node, int qlm)
+{
+ int mode = BDK_QLM_MODE_XLAUI_1X4; /* Default to XLAUI if detection fails */
+ int bgx = -1;
+ int bgx_lane_mask = 0;
+
+ find_bgx(node, qlm, &bgx, &bgx_lane_mask);
+ if (bgx == -1)
+ return mode;
+
+ BDK_TRACE(INIT, "N%d.QLM%d: Checking for QSFP/QSFP+\n", node, qlm);
+ int index = 0;
+
+ /* Lookup the PHY address for this BGX port */
+ int phy_addr = bdk_config_get_int(BDK_CONFIG_PHY_ADDRESS, node, bgx, index);
+ /* QSFP/QSFP+ are connected with TWSI, so only check ports with
+ PHYs connected with TWSI */
+ if ((phy_addr & BDK_IF_PHY_TYPE_MASK) != BDK_IF_PHY_TWSI)
+ return mode;
+
+ /* For TWSI:
+ Bits[31:24]: Node ID, 0xff for device node
+ Bits[23:16]: TWSI internal address width in bytes (0-2)
+ Bits[15:12]: 2=TWSI
+ Bits[11:8]: TWSI bus number
+ Bits[7:0]: TWSI address */
+ int n = (phy_addr >> 24) & 0xff;
+ int twsi_ia_width = (phy_addr >> 16) & 0xff;
+ int twsi_bus = (phy_addr >> 8) & 0xf;
+ int twsi_addr = 0x50; /* From SFP spec */
+ if (n == 0xff)
+ n = node;
+
+ /* Byte 0: Identifier, should be 0x03 for SFP/SFP+
+ 0x03 = SFP of SFP+
+ 0x0c = QSFP
+ 0x0d = QSFP+ */
+ int64_t eeprom_00 = bdk_twsix_read_ia(n, twsi_bus, twsi_addr, 0, 1, twsi_ia_width);
+ switch (eeprom_00)
+ {
+ case 0x03:
+ BDK_TRACE(INIT, "N%d.QLM%d: BGX%d QSFP/QSFP+ contains a SFP+\n", node, qlm, bgx);
+ mode = init_sfp(node, qlm);
+ break;
+ case 0x0c:
+ case 0x0d:
+ BDK_TRACE(INIT, "N%d.QLM%d: BGX%d Found a QSFP/QSFP+, assuming 40G\n", node, qlm, bgx);
+ mode = BDK_QLM_MODE_XLAUI_1X4;
+ break;
+ default:
+ BDK_TRACE(INIT, "N%d.QLM%d: BGX%d QSFP/QSFP+ not detected\n", node, qlm, bgx);
+ break;
+ }
+ return mode;
+}
+
+static void boot_init_qlm_mode(void)
+{
+ /* Check if QLM autoconfig is requested */
+ int qlm_auto = bdk_config_get_int(BDK_CONFIG_QLM_AUTO_CONFIG);
+ if (qlm_auto)
+ {
+ /* Auto configuration of QLMs
+ */
+ for (bdk_node_t n = BDK_NODE_0; n < BDK_NUMA_MAX_NODES; n++)
+ {
+ if (bdk_numa_exists(n))
+ {
+ BDK_TRACE(INIT, "Initializing QLMs on Node %d\n", n);
+ bdk_qlm_auto_config(n);
+ }
+ }
+ }
+ /*
+ * Check if QLM autoconfig from DIP switch settings is requested
+ */
+ else if (bdk_config_get_int(BDK_CONFIG_QLM_DIP_AUTO_CONFIG))
+ {
+ BDK_TRACE(INIT, "Reading DIP Switch settings for QLM Auto configuration\n");
+
+ /* Auto configuration of QLMs
+ */
+ for (bdk_node_t n = BDK_NODE_0; n < BDK_NUMA_MAX_NODES; n++)
+ {
+ if (bdk_numa_exists(n))
+ {
+ BDK_TRACE(INIT, "Initializing QLMs on Node %d\n", n);
+ if (bdk_qlm_dip_auto_config(n))
+ bdk_error("QLM Auto configuration failed!\n");
+ }
+ }
+
+ }
+ else
+ {
+ /* Initialize the QLMs based on configuration file settings
+ */
+
+ boot_init_qlm_clk();
+
+ for (bdk_node_t n = BDK_NODE_0; n < BDK_NUMA_MAX_NODES; n++)
+ {
+ if (!bdk_numa_exists(n))
+ continue;
+
+ int num_qlms = bdk_qlm_get_num(n);
+
+ BDK_TRACE(INIT, "Initializing QLMs on Node %d\n", n);
+ for (int qlm = 0; qlm < num_qlms; qlm++)
+ {
+ const char *cfg_val;
+
+ cfg_val = bdk_config_get_str(BDK_CONFIG_QLM_MODE, n, qlm);
+ if (!cfg_val)
+ continue;
+
+ int mode;
+ int freq;
+ /* Check for special token telling us to configure the QLM
+ based on the SFP/SFP+/QSFP/QSFP+ plugged into the system. */
+ if ((strcmp(cfg_val, "SFP+") == 0) || (strcmp(cfg_val, "QSFP+") == 0))
+ {
+ if (strcmp(cfg_val, "SFP+") == 0)
+ mode = init_sfp(n, qlm);
+ else
+ mode = init_qsfp(n, qlm);
+
+ if (mode == BDK_QLM_MODE_SGMII_4X1)
+ freq = 1250;
+ else
+ freq = 10321;
+ }
+ else
+ {
+ mode = bdk_qlm_cfg_string_to_mode(cfg_val);
+ freq = bdk_config_get_int(BDK_CONFIG_QLM_FREQ, n, qlm);
+ }
+ if (-1 == mode)
+ {
+ bdk_error("Invalid QLM mode string '%s' for QLM%d on node %d. "
+ "Not configuring.\n", cfg_val, qlm, n);
+ continue;
+ }
+ if (-1 == freq)
+ {
+ bdk_error("No frequency setting for QLM%d on node %d. "
+ "Not configuring.\n", qlm, n);
+ continue;
+ }
+
+ bdk_qlm_set_mode(n, qlm, mode, freq, 0);
+ }
+ }
+ }
+}
+
+/**
+ * Configure QLM on all nodes as part of booting
+ */
+void bdk_boot_qlm(void)
+{
+ boot_init_qlm_mode();
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-status.c b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-status.c
index 83ab14cbc7..c91f2dd1bb 100644
--- a/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-status.c
+++ b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-status.c
@@ -38,6 +38,8 @@
***********************license end**************************************/
#include <bdk.h>
#include "libbdk-arch/bdk-csrs-mio_tws.h"
+#include "libbdk-boot/bdk-boot-status.h"
+#include <libbdk-hal/bdk-config.h>
/**
* Report boot status to the BMC or whomever might care. This function
diff --git a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-platform.c b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-usb.c
index 8cac04a214..70ed44af4f 100644
--- a/src/vendorcode/cavium/bdk/libbdk-arch/bdk-platform.c
+++ b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot-usb.c
@@ -37,23 +37,26 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <bdk.h>
-#include "libbdk-arch/bdk-csrs-ocla.h"
+#include <libbdk-hal/bdk-usb.h>
+#include <libbdk-hal/bdk-config.h>
+#include <libbdk-boot/bdk-boot-usb.h>
-bdk_platform_t __bdk_platform;
-
-void __bdk_platform_init()
+/**
+ * Configure USB on all nodes as part of booting
+ */
+void bdk_boot_usb(void)
{
- BDK_CSR_INIT(c, bdk_numa_master(), BDK_OCLAX_CONST(0));
- if (c.u == 0)
- {
- __bdk_platform = BDK_PLATFORM_ASIM;
- }
- else
+ /* Initialize USB, ready for standard XHCI driver */
+ for (bdk_node_t n = BDK_NODE_0; n < BDK_NUMA_MAX_NODES; n++)
{
- int plat2 = bdk_fuse_read(bdk_numa_master(), 197);
- int plat1 = bdk_fuse_read(bdk_numa_master(), 196);
- int plat0 = bdk_fuse_read(bdk_numa_master(), 195);
- __bdk_platform = (plat2 << 2) | (plat1 << 1) | plat0;
+ if (bdk_numa_exists(n))
+ {
+ for (int p = 0; p < 2; p++)
+ {
+ int usb_refclock = bdk_config_get_int(BDK_CONFIG_USB_REFCLK_SRC, n,p);
+ BDK_TRACE(INIT, "Initializing USB%d on Node %d clock type %d\n", p, n, usb_refclock);
+ bdk_usb_initialize(n, p, usb_refclock);
+ }
+ }
}
}
-
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-info.h b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot.c
index 4ba814ce77..15bcf4aa8c 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-info.h
+++ b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-boot.c
@@ -36,51 +36,63 @@
* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <bdk.h>
+#include <string.h>
+#include "libbdk-hal/if/bdk-if.h"
+#include "libbdk-arch/bdk-csrs-pem.h"
+#include "libbdk-boot/bdk-boot-pcie.h"
+#include "libbdk-boot/bdk-boot-qlm.h"
+#include "libbdk-boot/bdk-boot-usb.h"
+#include "libbdk-hal/bdk-pcie.h"
+#include "libbdk-hal/bdk-mdio.h"
+#include "libbdk-hal/bdk-qlm.h"
+#include "libbdk-hal/bdk-ecam.h"
+#include "libbdk-hal/bdk-rng.h"
+#include "libbdk-boot/bdk-boot-gpio.h"
+#include "libbdk-arch/bdk-csrs-iobn.h"
+#include "libbdk-arch/bdk-csrs-dap.h"
/**
- * @file
- *
- * Functions for displaying and retrieving infomration about the
- * boot environment
- *
- * @addtogroup boot
- * @{
+ * Configure hardware
*/
+void bdk_boot(void)
+{
+ for (bdk_node_t n = BDK_NODE_0; n < BDK_NUMA_MAX_NODES; n++)
+ {
+ if (bdk_numa_exists(n))
+ {
+ /* Allow CAP access from cores so we can read system registers through
+ memory mapped addresses. See bdk_sysreg_read() */
+ BDK_CSR_MODIFY(c, n, BDK_DAP_IMP_DAR, c.s.caben = 1);
+ /* Enable IOBN */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) || CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ {
+ BDK_CSR_MODIFY(c, n, BDK_IOBNX_NCB0_HP(0),
+ c.s.hp = 1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ BDK_CSR_MODIFY(c, n, BDK_IOBNX_NCB0_HP(1),
+ c.s.hp = 0);
+ }
-/**
- * Display information about strapping and other hard configuration items for
- * the specified node
- *
- * @param node Node to display
- */
-void bdk_boot_info_strapping(bdk_node_t node);
-
-/**
- * Return a string containing information about the chip's manufacture wafer
- *
- * @param node Node to query
- *
- * @return Static string, reused on each call
- */
-const char* bdk_boot_info_wafer(bdk_node_t node);
+ bdk_ecam_scan_all(n);
+ bdk_mdio_init(n);
+ bdk_qlm_init(n);
+ bdk_rng_init(n);
+ }
+ }
-/**
- * Return a string containing the chip's unique serial number
- *
- * @param node Node to query
- *
- * @return Static string, reused on each call
- */
-const char* bdk_boot_info_serial(bdk_node_t node);
-
-/**
- * Return a string containing the chip's unique ID
- *
- * @param node Node to query
- *
- * @return Static string, reused on each call
- */
-const char* bdk_boot_info_unique_id(bdk_node_t node);
+ bdk_boot_gpio();
+ bdk_boot_usb();
+ bdk_boot_qlm();
+ bdk_boot_pcie();
-/** @} */
+ /* Initialize PHYs */
+ for (bdk_node_t n = BDK_NODE_0; n < BDK_NUMA_MAX_NODES; n++)
+ {
+ if (bdk_numa_exists(n))
+ {
+ bdk_if_phy_setup(n);
+ }
+ }
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-boot/bdk-watchdog.c b/src/vendorcode/cavium/bdk/libbdk-boot/bdk-watchdog.c
deleted file mode 100644
index 48f955a7ef..0000000000
--- a/src/vendorcode/cavium/bdk/libbdk-boot/bdk-watchdog.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-#include <bdk.h>
-#include "libbdk-arch/bdk-csrs-gti.h"
-
-/**
- * Setup the watchdog to expire in timeout_ms milliseconds. When the watchdog
- * expires, the chip three things happen:
- * 1) Expire 1: interrupt that is ignored by the BDK
- * 2) Expire 2: DEL3T interrupt, which is disabled and ignored
- * 3) Expire 3: Soft reset of the chip
- *
- * Since we want a soft reset, we actually program the watchdog to expire at
- * the timeout / 3.
- *
- * @param timeout_ms Timeout in milliseconds. If this is zero, the timeout is taken from the
- * global configuration option BDK_BRD_CFG_WATCHDOG_TIMEOUT
- */
-void bdk_watchdog_set(unsigned int timeout_ms)
-{
- if (timeout_ms == 0)
- timeout_ms = bdk_config_get_int(BDK_CONFIG_WATCHDOG_TIMEOUT);
-
- if (timeout_ms > 0)
- {
- uint64_t sclk = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_SCLK);
- uint64_t timeout_sclk = sclk * timeout_ms / 1000;
- /* Per comment above, we want the watchdog to expire at 3x the rate specified */
- timeout_sclk /= 3;
- /* Watchdog counts in 1024 cycle steps */
- uint64_t timeout_wdog = timeout_sclk >> 10;
- /* We can only specify the upper 16 bits of a 24 bit value. Round up */
- timeout_wdog = (timeout_wdog + 0xff) >> 8;
- /* If the timeout overflows the hardware limit, set max */
- if (timeout_wdog >= 0x10000)
- timeout_wdog = 0xffff;
-
- BDK_TRACE(INIT, "Watchdog: Set to expire %lu SCLK cycles\n", timeout_wdog << 18);
- BDK_CSR_MODIFY(c, bdk_numa_local(), BDK_GTI_CWD_WDOGX(bdk_get_core_num()),
- c.s.len = timeout_wdog;
- c.s.mode = 3);
- }
-}
-
-/**
- * Signal the watchdog that we are still running
- */
-void bdk_watchdog_poke(void)
-{
- BDK_CSR_WRITE(bdk_numa_local(), BDK_GTI_CWD_POKEX(bdk_get_core_num()), 0);
-}
-
-/**
- * Disable the hardware watchdog
- */
-void bdk_watchdog_disable(void)
-{
- BDK_CSR_WRITE(bdk_numa_local(), BDK_GTI_CWD_WDOGX(bdk_get_core_num()), 0);
- BDK_TRACE(INIT, "Watchdog: Disabled\n");
-}
-
-/**
- * Return true if the watchdog is configured and running
- *
- * @return Non-zero if watchdog is running
- */
-int bdk_watchdog_is_running(void)
-{
- BDK_CSR_INIT(wdog, bdk_numa_local(), BDK_GTI_CWD_WDOGX(bdk_get_core_num()));
- return wdog.s.mode != 0;
-}
-
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-address.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-address.c
index 94d7d76752..acefe1751c 100644
--- a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-address.c
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-address.c
@@ -72,9 +72,9 @@ bdk_dram_address_extract_info(uint64_t address, int *node, int *lmc, int *dimm,
/* LMC number is probably aliased */
if (l2c_ctl.s.disidxalias)
- *lmc = EXTRACT(address, 7, xbits);
+ *lmc = EXTRACT(address, 7, xbits);
else
- *lmc = EXTRACT(address, 7, xbits) ^ EXTRACT(address, bitno, xbits) ^ EXTRACT(address, 12, xbits);
+ *lmc = EXTRACT(address, 7, xbits) ^ EXTRACT(address, bitno, xbits) ^ EXTRACT(address, 12, xbits);
/* Figure out the bank field width */
BDK_CSR_INIT(lmcx_config, *node, BDK_LMCX_CONFIG(*lmc));
@@ -176,7 +176,7 @@ bdk_dram_address_construct_info(bdk_node_t node, int lmc, int dimm,
BDK_CSR_INIT(l2c_ctl, node, BDK_L2C_CTL);
int new_lmc = lmc;
if (!l2c_ctl.s.disidxalias)
- new_lmc ^= EXTRACT(address, bitno, xbits) ^ EXTRACT(address, 12, xbits);
+ new_lmc ^= EXTRACT(address, bitno, xbits) ^ EXTRACT(address, 12, xbits);
INSERT(address, new_lmc, 7, xbits);
return address;
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-config.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-config.c
index 3465c5d98b..5c104231dc 100644
--- a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-config.c
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-config.c
@@ -37,7 +37,9 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <bdk.h>
-#include <unistd.h>
+#include <string.h>
+#include <libbdk-hal/bdk-config.h>
+#include <libbdk-hal/bdk-l2c.h>
BDK_REQUIRE_DEFINE(DRAM_CONFIG);
@@ -73,22 +75,6 @@ int bdk_dram_config(int node, int ddr_clock_override)
}
/**
- * Do DRAM configuration tuning
- *
- * @param node Node to tune
- *
- * @return Success or Fail
- */
-int bdk_dram_tune(int node)
-{
- int ret;
- BDK_TRACE(DRAM, "N%d: Starting DRAM tuning\n", node);
- ret = libdram_tune(node);
- BDK_TRACE(DRAM, "N%d: DRAM tuning returned %d\n", node, ret);
- return ret;
-}
-
-/**
* Do all the DRAM Margin tests
*
* @param node Node to test
@@ -144,7 +130,9 @@ uint64_t bdk_dram_get_top_of_bdk(void)
* the address to make it a physical offset. Doing this simplifies the
* address checks and calculations which only work with physical offsets.
*/
- uint64_t top_of_bdk = (bdk_ptr_to_phys(sbrk(0)) & bdk_build_mask(40));
+ /* FIXME(dhendrix): we only care about node 0 */
+// uint64_t top_of_bdk = (bdk_ptr_to_phys(sbrk(0)) & bdk_build_mask(40));
+ uint64_t top_of_bdk = 0;
uint64_t l2_size = bdk_l2c_get_cache_size_bytes(bdk_numa_master());
if (top_of_bdk <= l2_size)
{
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-size.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-size.c
index 122afb2a18..8cd4594818 100644
--- a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-size.c
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-size.c
@@ -37,6 +37,8 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <bdk.h>
+#include <libbdk-hal/bdk-utils.h>
+
/**
* Return the number of LMC controllers in use
@@ -92,7 +94,7 @@ static int __bdk_dram_is_lmc_in_dreset(bdk_node_t node, int lmc)
*
* @param node Node to probe
*
- */
+ */
uint32_t __bdk_dram_get_row_mask(bdk_node_t node, int lmc)
{
// PROTECT!!!
@@ -108,7 +110,7 @@ uint32_t __bdk_dram_get_row_mask(bdk_node_t node, int lmc)
*
* @param node Node to probe
*
- */
+ */
uint32_t __bdk_dram_get_col_mask(bdk_node_t node, int lmc)
{
// PROTECT!!!
@@ -124,7 +126,7 @@ uint32_t __bdk_dram_get_col_mask(bdk_node_t node, int lmc)
*
* @param node Node to probe
*
- */
+ */
// all DDR3, and DDR4 x16 today, use only 3 bank bits; DDR4 x4 and x8 always have 4 bank bits
// NOTE: this will change in the future, when DDR4 x16 devices can come with 16 banks!! FIXME!!
int __bdk_dram_get_num_bank_bits(bdk_node_t node, int lmc)
@@ -181,9 +183,6 @@ int __bdk_dram_is_rdimm(bdk_node_t node, int lmc)
*/
uint64_t bdk_dram_get_size_mbytes(int node)
{
- if (bdk_is_platform(BDK_PLATFORM_EMULATOR))
- return 2 << 10; /* 2GB is available on t88 and t81
- ** some t83 models have 8gb, but it is too long to init */
/* Return zero if dram isn't enabled */
if (!__bdk_is_dram_enabled(node))
return 0;
@@ -192,21 +191,13 @@ uint64_t bdk_dram_get_size_mbytes(int node)
const int num_dram_controllers = __bdk_dram_get_num_lmc(node);
for (int lmc = 0; lmc < num_dram_controllers; lmc++)
{
- if (bdk_is_platform(BDK_PLATFORM_ASIM))
- {
- /* Asim doesn't simulate the rank detection, fake 4GB per controller */
- memsize += 4ull << 30;
- }
- else
- {
- // PROTECT!!!
- if (__bdk_dram_is_lmc_in_dreset(node, lmc)) // check LMCn
- return 0;
- BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(lmc));
- int num_ranks = bdk_pop(lmcx_config.s.init_status);
- uint64_t rank_size = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena);
- memsize += rank_size * num_ranks;
- }
+ // PROTECT!!!
+ if (__bdk_dram_is_lmc_in_dreset(node, lmc)) // check LMCn
+ return 0;
+ BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(lmc));
+ int num_ranks = bdk_pop(lmcx_config.s.init_status);
+ uint64_t rank_size = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena);
+ memsize += rank_size * num_ranks;
}
return memsize >> 20;
}
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-addrbus.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-addrbus.c
index 9fe8570454..834ade4c40 100644
--- a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-addrbus.c
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-addrbus.c
@@ -37,6 +37,7 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include "bdk.h"
+#include <libbdk-hal/bdk-utils.h>
/* Used for all memory reads/writes related to the test */
#define READ64(address) __bdk_dram_read64(address)
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-databus.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-databus.c
index c3fa1ffd8d..b7a6f96880 100644
--- a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-databus.c
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-databus.c
@@ -38,6 +38,8 @@
***********************license end**************************************/
#include "bdk.h"
+#include <libbdk-hal/bdk-utils.h>
+
/* Used for all memory reads/writes related to the test */
#define READ64(address) __bdk_dram_read64(address)
#define WRITE64(address, data) __bdk_dram_write64(address, data)
@@ -97,7 +99,7 @@ static int read_data_bus_burst(uint64_t address, int bursts)
*/
static int write_data_bus_burst(uint64_t address, int bursts)
{
- BDK_TRACE(DRAM_TEST, "[0x%016lx:0x%016lx] Writing incrementing digits\n",
+ BDK_TRACE(DRAM_TEST, "[0x%016llx:0x%016llx] Writing incrementing digits\n",
address, address + 127);
/* Loop over the burst so people using a scope have time to capture
traces */
@@ -164,7 +166,7 @@ static int read_data_bus_walk(uint64_t address, int burst, uint64_t pattern)
*/
static void write_data_bus_walk(uint64_t address, int burst, uint64_t pattern)
{
- BDK_TRACE(DRAM_TEST, "[0x%016lx:0x%016lx] Writing walking pattern 0x%016lx\n",
+ BDK_TRACE(DRAM_TEST, "[0x%016llx:0x%016llx] Writing walking pattern 0x%016llx\n",
address, address + 127, pattern);
uint64_t a = address;
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-fastscan.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-fastscan.c
index 46e205dd80..c89ef76103 100644
--- a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-fastscan.c
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-fastscan.c
@@ -37,6 +37,7 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include "bdk.h"
+#include <libbdk-hal/bdk-utils.h>
/* Used for all memory reads/writes related to the test */
#define READ64(address) __bdk_dram_read64(address)
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-patfil.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-patfil.c
index e6c4b57721..6315172101 100644
--- a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-patfil.c
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test-patfil.c
@@ -37,6 +37,8 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include "bdk.h"
+#include <libbdk-hal/bdk-rng.h>
+#include <libbdk-hal/bdk-utils.h>
// choose prediction-based algorithms for mem_xor and mem_rows tests
#define USE_PREDICTION_CODE_VERSIONS 1 // change to 0 to go back to the original versions
@@ -286,7 +288,7 @@ static int test_mem_march_c(uint64_t area, uint64_t max_address, uint64_t patter
int failures = 0;
/* Pass 1 ascending addresses, fill memory with pattern. */
- BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase1, address incrementing, pattern 0x%016lx\n", area, max_address-1, pattern);
+ BDK_TRACE(DRAM_TEST, " [0x%016llx:0x%016llx] Phase1, address incrementing, pattern 0x%016llx\n", area, max_address-1, pattern);
for (uint64_t address = area; address < max_address; address += 8)
WRITE64(address, pattern);
@@ -294,7 +296,7 @@ static int test_mem_march_c(uint64_t area, uint64_t max_address, uint64_t patter
BDK_DCACHE_INVALIDATE;
/* Pass 2: ascending addresses, read pattern and write ~pattern */
- BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase2, address incrementing, pattern 0x%016lx\n", area, max_address-1, ~pattern);
+ BDK_TRACE(DRAM_TEST, " [0x%016llx:0x%016llx] Phase2, address incrementing, pattern 0x%016llx\n", area, max_address-1, ~pattern);
for (uint64_t address = area; address < max_address; address += 8)
{
uint64_t data = READ64(address);
@@ -307,7 +309,7 @@ static int test_mem_march_c(uint64_t area, uint64_t max_address, uint64_t patter
BDK_DCACHE_INVALIDATE;
/* Pass 3: ascending addresses, read ~pattern and write pattern. */
- BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase3, address incrementing, pattern 0x%016lx\n", area, max_address-1, pattern);
+ BDK_TRACE(DRAM_TEST, " [0x%016llx:0x%016llx] Phase3, address incrementing, pattern 0x%016llx\n", area, max_address-1, pattern);
for (uint64_t address = area; address < max_address; address += 8)
{
uint64_t data = READ64(address);
@@ -320,7 +322,7 @@ static int test_mem_march_c(uint64_t area, uint64_t max_address, uint64_t patter
BDK_DCACHE_INVALIDATE;
/* Pass 4: descending addresses, read pattern and write ~pattern. */
- BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase4, address decrementing, pattern 0x%016lx\n", area, max_address-1, ~pattern);
+ BDK_TRACE(DRAM_TEST, " [0x%016llx:0x%016llx] Phase4, address decrementing, pattern 0x%016llx\n", area, max_address-1, ~pattern);
uint64_t end = max_address - sizeof(uint64_t);
for (uint64_t address = end; address >= area; address -= 8)
{
@@ -334,7 +336,7 @@ static int test_mem_march_c(uint64_t area, uint64_t max_address, uint64_t patter
BDK_DCACHE_INVALIDATE;
/* Pass 5: descending addresses, read ~pattern and write pattern. */
- BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase5, address decrementing, pattern 0x%016lx\n", area, max_address-1, pattern);
+ BDK_TRACE(DRAM_TEST, " [0x%016llx:0x%016llx] Phase5, address decrementing, pattern 0x%016llx\n", area, max_address-1, pattern);
for (uint64_t address = end; address >= area; address -= 8)
{
uint64_t data = READ64(address);
@@ -347,7 +349,7 @@ static int test_mem_march_c(uint64_t area, uint64_t max_address, uint64_t patter
BDK_DCACHE_INVALIDATE;
/* Pass 6: ascending addresses, read pattern. */
- BDK_TRACE(DRAM_TEST, " [0x%016lx:0x%016lx] Phase6, address incrementing\n", area, max_address-1);
+ BDK_TRACE(DRAM_TEST, " [0x%016llx:0x%016llx] Phase6, address incrementing\n", area, max_address-1);
for (uint64_t address = area; address < max_address; address += 8)
{
uint64_t data = READ64(address);
@@ -660,7 +662,7 @@ int __bdk_dram_test_mem_xor(uint64_t area, uint64_t max_address, int bursts)
WRITE64(address1 , p);
WRITE64(address1 + offset, p);
address1 += 8;
- p += pincr;
+ p += pincr;
}
__bdk_dram_flush_to_mem_range(area, max_address);
BDK_DCACHE_INVALIDATE;
@@ -674,7 +676,7 @@ int __bdk_dram_test_mem_xor(uint64_t area, uint64_t max_address, int bursts)
address1 = area;
this_pattern = bdk_rng_get_random64();
- pattern2 ^= this_pattern;
+ pattern2 ^= this_pattern;
while (address1 < area2)
{
@@ -693,13 +695,13 @@ int __bdk_dram_test_mem_xor(uint64_t area, uint64_t max_address, int bursts)
BDK_DCACHE_INVALIDATE;
/* Look for differences from the expected pattern in both areas.
- * If there is a mismatch, reset the appropriate memory location
- * with the correct pattern. Failing to do so
+ * If there is a mismatch, reset the appropriate memory location
+ * with the correct pattern. Failing to do so
* means that on all subsequent passes the erroring locations
- * will be out of sync, giving spurious errors.
+ * will be out of sync, giving spurious errors.
*/
address1 = area;
- ppred = pbase;
+ ppred = pbase;
while (address1 < area2)
{
@@ -712,21 +714,21 @@ int __bdk_dram_test_mem_xor(uint64_t area, uint64_t max_address, int bursts)
d1 = READ64(address1 );
d2 = READ64(address1 + offset);
- p = ppred ^ pattern2;
+ p = ppred ^ pattern2;
if (bdk_unlikely(d1 != p)) {
- failures += __bdk_dram_retry_failure(burst, address1, d1, p);
+ failures += __bdk_dram_retry_failure(burst, address1, d1, p);
// Synchronize the area, adjusting for the error.
//WRITE64(address1, p); // retries should do this
}
if (bdk_unlikely(d2 != p)) {
- failures += __bdk_dram_retry_failure(burst, address1 + offset, d2, p);
+ failures += __bdk_dram_retry_failure(burst, address1 + offset, d2, p);
// Synchronize the area, adjusting for the error.
//WRITE64(address1 + offset, p); // retries should do this
}
address1 += 8;
- ppred += pincr;
+ ppred += pincr;
} /* while (address1 < area2) */
} /* for (int burst = 0; burst < bursts; burst++) */
@@ -761,7 +763,7 @@ int __bdk_dram_test_mem_rows(uint64_t area, uint64_t max_address, int bursts)
WRITE64(address1 , pattern2);
WRITE64(address1 + offset, pattern2);
address1 += 8;
- pattern2 = ~pattern2; // flip for next slots
+ pattern2 = ~pattern2; // flip for next slots
}
__bdk_dram_flush_to_mem_range(area, max_address);
@@ -771,7 +773,7 @@ int __bdk_dram_test_mem_rows(uint64_t area, uint64_t max_address, int bursts)
for (burst = 0; burst < bursts; burst++)
{
/* Invert the data, applying the change to both memory areas. Thus on
- * alternate passes, the data flips from 0 to 1 and vice versa.
+ * alternate passes, the data flips from 0 to 1 and vice versa.
*/
address1 = area;
@@ -796,8 +798,8 @@ int __bdk_dram_test_mem_rows(uint64_t area, uint64_t max_address, int bursts)
* out of sync giving spurious errors.
*/
address1 = area;
- pattern1 = ~pattern1; // flip the starting pattern to match above loop
- pattern2 = pattern1; // slots have been flipped by the above loop
+ pattern1 = ~pattern1; // flip the starting pattern to match above loop
+ pattern2 = pattern1; // slots have been flipped by the above loop
while (address1 < area2)
{
@@ -810,18 +812,18 @@ int __bdk_dram_test_mem_rows(uint64_t area, uint64_t max_address, int bursts)
d2 = READ64(address1 + offset);
if (bdk_unlikely(d1 != pattern2)) {
- failures += __bdk_dram_retry_failure(burst, address1, d1, pattern2);
+ failures += __bdk_dram_retry_failure(burst, address1, d1, pattern2);
// Synchronize the area, adjusting for the error.
//WRITE64(address1, pattern2); // retries should do this
}
if (bdk_unlikely(d2 != pattern2)) {
- failures += __bdk_dram_retry_failure(burst, address1 + offset, d2, pattern2);
+ failures += __bdk_dram_retry_failure(burst, address1 + offset, d2, pattern2);
// Synchronize the two areas, adjusting for the error.
//WRITE64(address1 + offset, pattern2); // retries should do this
}
address1 += 8;
- pattern2 = ~pattern2; // flip for next pair of slots
+ pattern2 = ~pattern2; // flip for next pair of slots
}
}
return failures;
diff --git a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test.c b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test.c
index 53137502fc..4f54b69516 100644
--- a/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test.c
+++ b/src/vendorcode/cavium/bdk/libbdk-dram/bdk-dram-test.c
@@ -40,6 +40,14 @@
#include "libbdk-arch/bdk-csrs-gti.h"
#include "libbdk-arch/bdk-csrs-ocx.h"
+#include <bdk-minimal.h> /* for printf --> printk */
+#include <libbdk-dram/bdk-dram-test.h>
+#include <libbdk-hal/bdk-atomic.h>
+#include <libbdk-hal/bdk-clock.h>
+#include <libbdk-hal/bdk-utils.h>
+#include <libbdk-os/bdk-init.h>
+#include <libbdk-os/bdk-thread.h>
+
/* This code is an optional part of the BDK. It is only linked in
if BDK_REQUIRE() needs it */
BDK_REQUIRE_DEFINE(DRAM_TEST);
@@ -170,7 +178,7 @@ static void dram_test_thread(int arg, void *arg1)
start_address = bdk_numa_get_address(test_node, start_address);
end_address = bdk_numa_get_address(test_node, end_address);
/* Test the region */
- BDK_TRACE(DRAM_TEST, " Node %d, core %d, Testing [0x%011lx:0x%011lx]\n",
+ BDK_TRACE(DRAM_TEST, " Node %d, core %d, Testing [0x%011llx:0x%011llx]\n",
bdk_numa_local(), bdk_get_core_num() & 127, start_address, end_address - 1);
test_info->test_func(start_address, end_address, bursts);
@@ -197,7 +205,7 @@ static int __bdk_dram_run_test(const dram_test_info_t *test_info, uint64_t start
{
/* Figure out the addess of the byte one off the top of memory */
uint64_t max_address = bdk_dram_get_size_mbytes(bdk_numa_local());
- BDK_TRACE(DRAM_TEST, "DRAM available per node: %lu MB\n", max_address);
+ BDK_TRACE(DRAM_TEST, "DRAM available per node: %llu MB\n", max_address);
max_address <<= 20;
/* Make sure we have enough */
@@ -218,13 +226,13 @@ static int __bdk_dram_run_test(const dram_test_info_t *test_info, uint64_t start
if (max_address > (1ull << 43)) /* 43 bits in CN9XXX */
max_address = 1ull << 43;
}
- BDK_TRACE(DRAM_TEST, "DRAM max address: 0x%011lx\n", max_address-1);
+ BDK_TRACE(DRAM_TEST, "DRAM max address: 0x%011llx\n", max_address-1);
/* Make sure the start address is lower than the top of memory */
if (start_address >= max_address)
{
- bdk_error("Start address is larger than the amount of memory: 0x%011lx versus 0x%011lx\n",
- start_address, max_address);
+ bdk_error("Start address is larger than the amount of memory: 0x%011llx versus 0x%011llx\n",
+ start_address, max_address);
return -1;
}
if (length == (uint64_t)-1)
@@ -260,8 +268,8 @@ static int __bdk_dram_run_test(const dram_test_info_t *test_info, uint64_t start
}
}
if (!(flags & BDK_DRAM_TEST_NO_BANNERS))
- printf("Starting Test \"%s\" for [0x%011lx:0x%011lx] using %d core(s)\n",
- test_info->name, start_address, end_address - 1, total_cores_all_nodes);
+ printf("Starting Test \"%s\" for [0x%011llx:0x%011llx] using %d core(s)\n",
+ test_info->name, start_address, end_address - 1, total_cores_all_nodes);
/* Remember the LMC perf counters for stats after the test */
uint64_t start_dram_dclk[BDK_NUMA_MAX_NODES][4];
@@ -332,15 +340,15 @@ static int __bdk_dram_run_test(const dram_test_info_t *test_info, uint64_t start
/* Poke the watchdog */
BDK_CSR_WRITE(bdk_numa_local(), BDK_GTI_CWD_POKEX(0), 0);
- /* disable progress output when batch mode is ON */
+ /* disable progress output when batch mode is ON */
if (!(flags & BDK_DRAM_TEST_NO_PROGRESS)) {
/* Report progress percentage */
int percent_x10 = (work_address - start_address) * 1000 / (end_address - start_address);
- printf(" %3d.%d%% complete, testing [0x%011lx:0x%011lx]\r",
+ printf(" %3d.%d%% complete, testing [0x%011llx:0x%011llx]\r",
percent_x10 / 10, percent_x10 % 10, work_address, work_address + size - 1);
fflush(stdout);
- }
+ }
work_address += size;
@@ -357,17 +365,8 @@ static int __bdk_dram_run_test(const dram_test_info_t *test_info, uint64_t start
{
if (per_node >= max_cores)
break;
- int run_node = (flags & BDK_DRAM_TEST_USE_CCPI) ? node ^ 1 : node;
BDK_TRACE(DRAM_TEST, "Starting thread %d on node %d for memory test\n", per_node, node);
- if (bdk_thread_create(run_node, 0, dram_test_thread, per_node, (void *)test_info, 0))
- {
- bdk_error("Failed to create thread %d for memory test on node %d\n", per_node, node);
- }
- else
- {
- per_node++;
- total_count++;
- }
+ dram_test_thread(per_node, (void *)test_info);
}
}
}
@@ -384,7 +383,6 @@ static int __bdk_dram_run_test(const dram_test_info_t *test_info, uint64_t start
uint64_t period = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) * TIMEOUT_SECS; // FIXME?
uint64_t timeout = bdk_clock_get_count(BDK_CLOCK_TIME) + period;
do {
- bdk_thread_yield();
cur_count = bdk_atomic_get64(&dram_test_thread_done);
cur_time = bdk_clock_get_count(BDK_CLOCK_TIME);
if (cur_time >= timeout) {
@@ -430,7 +428,7 @@ static int __bdk_dram_run_test(const dram_test_info_t *test_info, uint64_t start
if (!(flags & BDK_DRAM_TEST_NO_PROGRESS)) {
/* Report progress percentage as complete */
- printf(" %3d.%d%% complete, testing [0x%011lx:0x%011lx]\n",
+ printf(" %3d.%d%% complete, testing [0x%011llx:0x%011llx]\n",
100, 0, start_address, end_address - 1);
fflush(stdout);
}
@@ -450,7 +448,7 @@ static int __bdk_dram_run_test(const dram_test_info_t *test_info, uint64_t start
if (dclk == 0)
dclk = 1;
uint64_t percent_x10 = ops * 1000 / dclk;
- printf(" Node %d, LMC%d: ops %lu, cycles %lu, used %lu.%lu%%\n",
+ printf(" Node %d, LMC%d: ops %llu, cycles %llu, used %llu.%llu%%\n",
node, i, ops, dclk, percent_x10 / 10, percent_x10 % 10);
}
}
@@ -471,7 +469,7 @@ static int __bdk_dram_run_test(const dram_test_info_t *test_info, uint64_t start
if (total == 0)
continue;
uint64_t percent_x10 = busy * 1000 / total;
- printf(" Node %d, CCPI%d: busy %lu, total %lu, used %lu.%lu%%\n",
+ printf(" Node %d, CCPI%d: busy %llu, total %llu, used %llu.%llu%%\n",
node, link, busy, total, percent_x10 / 10, percent_x10 % 10);
}
}
@@ -543,11 +541,13 @@ int bdk_dram_test(int test, uint64_t start_address, uint64_t length, bdk_dram_te
/* Clear ECC error counters before starting the test */
for (int chan = 0; chan < BDK_MAX_MEM_CHANS; chan++) {
- bdk_atomic_set64(&__bdk_dram_ecc_single_bit_errors[chan], 0);
- bdk_atomic_set64(&__bdk_dram_ecc_double_bit_errors[chan], 0);
+ bdk_atomic_set64(&__bdk_dram_ecc_single_bit_errors[chan], 0);
+ bdk_atomic_set64(&__bdk_dram_ecc_double_bit_errors[chan], 0);
}
/* Make sure at least one core from each node is running */
+ /* FIXME(dhendrix): we only care about core0 on node0 for now */
+#if 0
for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
{
if (flags & (1<<node))
@@ -557,17 +557,11 @@ int bdk_dram_test(int test, uint64_t start_address, uint64_t length, bdk_dram_te
bdk_init_cores(use_node, 1);
}
}
+#endif
/* This returns any data compare errors found */
int errors = __bdk_dram_run_test(&TEST_INFO[test], start_address, length, flags);
- /* Poll for any errors right now to make sure any ECC errors are reported */
- for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
- {
- if (bdk_numa_exists(node) && bdk_error_check)
- bdk_error_check(node);
- }
-
/* Check ECC error counters after the test */
int64_t ecc_single = 0;
int64_t ecc_double = 0;
@@ -582,14 +576,14 @@ int bdk_dram_test(int test, uint64_t start_address, uint64_t length, bdk_dram_te
/* Always print any ECC errors */
if (ecc_single || ecc_double)
{
- printf("Test \"%s\": ECC errors, %ld/%ld/%ld/%ld corrected, %ld/%ld/%ld/%ld uncorrected\n",
- name,
- ecc_single_errs[0], ecc_single_errs[1], ecc_single_errs[2], ecc_single_errs[3],
- ecc_double_errs[0], ecc_double_errs[1], ecc_double_errs[2], ecc_double_errs[3]);
+ printf("Test \"%s\": ECC errors, %lld/%lld/%lld/%lld corrected, %lld/%lld/%lld/%lld uncorrected\n",
+ name,
+ ecc_single_errs[0], ecc_single_errs[1], ecc_single_errs[2], ecc_single_errs[3],
+ ecc_double_errs[0], ecc_double_errs[1], ecc_double_errs[2], ecc_double_errs[3]);
}
if (errors || ecc_double || ecc_single) {
- printf("Test \"%s\": FAIL: %ld single, %ld double, %d compare errors\n",
- name, ecc_single, ecc_double, errors);
+ printf("Test \"%s\": FAIL: %lld single, %lld double, %d compare errors\n",
+ name, ecc_single, ecc_double, errors);
}
else
BDK_TRACE(DRAM_TEST, "Test \"%s\": PASS\n", name);
@@ -610,7 +604,7 @@ static void __bdk_dram_report_address_decode(uint64_t address, char *buffer, int
bdk_dram_address_extract_info(address, &node, &lmc, &dimm, &prank, &lrank, &bank, &row, &col);
snprintf(buffer, len, "[0x%011lx] (N%d,LMC%d,DIMM%d,Rank%d/%d,Bank%02d,Row 0x%05x,Col 0x%04x)",
- address, node, lmc, dimm, prank, lrank, bank, row, col);
+ address, node, lmc, dimm, prank, lrank, bank, row, col);
}
/**
@@ -632,22 +626,22 @@ static void __bdk_dram_report_address_decode_new(uint64_t address, uint64_t orig
for (int i = 0; i < 8; i++) {
bits = xor & 0xffULL;
xor >>= 8;
- if (bits) {
- if (byte != 8) {
- byte = 9; // means more than 1 byte-lane was present
+ if (bits) {
+ if (byte != 8) {
+ byte = 9; // means more than 1 byte-lane was present
print_bits = orig_xor; // print the full original
- break; // quit now
- } else {
- byte = i; // keep checking
+ break; // quit now
+ } else {
+ byte = i; // keep checking
print_bits = bits;
- }
- }
+ }
+ }
}
-
+
bdk_dram_address_extract_info(address, &node, &lmc, &dimm, &prank, &lrank, &bank, &row, &col);
snprintf(buffer, len, "N%d.LMC%d: CMP byte %d xor 0x%02lx (DIMM%d,Rank%d/%d,Bank%02d,Row 0x%05x,Col 0x%04x)[0x%011lx]",
- node, lmc, byte, print_bits, dimm, prank, lrank, bank, row, col, address);
+ node, lmc, byte, print_bits, dimm, prank, lrank, bank, row, col, address);
}
/**
@@ -671,15 +665,15 @@ void __bdk_dram_report_error(uint64_t address, uint64_t data, uint64_t correct,
if (errors < MAX_ERRORS_TO_REPORT)
{
- if (fails < 0) {
- snprintf(failbuf, sizeof(failbuf), " ");
- } else {
+ if (fails < 0) {
+ snprintf(failbuf, sizeof(failbuf), " ");
+ } else {
int percent_x10 = fails * 1000 / RETRY_LIMIT;
- snprintf(failbuf, sizeof(failbuf), ", retries failed %3d.%d%%",
+ snprintf(failbuf, sizeof(failbuf), ", retries failed %3d.%d%%",
percent_x10 / 10, percent_x10 % 10);
- }
+ }
- __bdk_dram_report_address_decode_new(address, xor, buffer, sizeof(buffer));
+ __bdk_dram_report_address_decode_new(address, xor, buffer, sizeof(buffer));
bdk_error("%s%s\n", buffer, failbuf);
if (errors == MAX_ERRORS_TO_REPORT-1)
@@ -702,26 +696,26 @@ void __bdk_dram_report_error(uint64_t address, uint64_t data, uint64_t correct,
* @return Zero if a message was logged, non-zero if the error limit has been reached
*/
void __bdk_dram_report_error2(uint64_t address1, uint64_t data1, uint64_t address2, uint64_t data2,
- int burst, int fails)
+ int burst, int fails)
{
int64_t errors = bdk_atomic_fetch_and_add64(&dram_test_thread_errors, 1);
if (errors < MAX_ERRORS_TO_REPORT)
{
- char buffer1[80], buffer2[80];
- char failbuf[32];
-
- if (fails < 0) {
- snprintf(failbuf, sizeof(failbuf), " ");
- } else {
- snprintf(failbuf, sizeof(failbuf), ", retried %d failed %d", RETRY_LIMIT, fails);
- }
- __bdk_dram_report_address_decode(address1, buffer1, sizeof(buffer1));
- __bdk_dram_report_address_decode(address2, buffer2, sizeof(buffer2));
-
- bdk_error("compare: data1: 0x%016lx, xor: 0x%016lx%s\n"
- " %s\n %s\n",
- data1, data1 ^ data2, failbuf,
- buffer1, buffer2);
+ char buffer1[80], buffer2[80];
+ char failbuf[32];
+
+ if (fails < 0) {
+ snprintf(failbuf, sizeof(failbuf), " ");
+ } else {
+ snprintf(failbuf, sizeof(failbuf), ", retried %d failed %d", RETRY_LIMIT, fails);
+ }
+ __bdk_dram_report_address_decode(address1, buffer1, sizeof(buffer1));
+ __bdk_dram_report_address_decode(address2, buffer2, sizeof(buffer2));
+
+ bdk_error("compare: data1: 0x%016llx, xor: 0x%016llx%s\n"
+ " %s\n %s\n",
+ data1, data1 ^ data2, failbuf,
+ buffer1, buffer2);
if (errors == MAX_ERRORS_TO_REPORT-1)
bdk_error("No further DRAM errors will be reported\n");
@@ -741,23 +735,23 @@ int __bdk_dram_retry_failure(int burst, uint64_t address, uint64_t data, uint64_
// bypass the retries if we are already over the limit...
if (bdk_atomic_get64(&dram_test_thread_errors) < MAX_ERRORS_TO_REPORT) {
- /* Try re-reading the memory location. A transient error may fail
- * on one read and work on another. Keep on retrying even when a
- * read succeeds.
- */
- for (int i = 0; i < RETRY_LIMIT; i++) {
+ /* Try re-reading the memory location. A transient error may fail
+ * on one read and work on another. Keep on retrying even when a
+ * read succeeds.
+ */
+ for (int i = 0; i < RETRY_LIMIT; i++) {
- __bdk_dram_flush_to_mem(address);
- BDK_DCACHE_INVALIDATE;
+ __bdk_dram_flush_to_mem(address);
+ BDK_DCACHE_INVALIDATE;
- uint64_t new = __bdk_dram_read64(address);
+ uint64_t new = __bdk_dram_read64(address);
- if (new != expected) {
- refail++;
- }
- }
+ if (new != expected) {
+ refail++;
+ }
+ }
} else
- refail = -1;
+ refail = -1;
// this will increment the errors always, but maybe not print...
__bdk_dram_report_error(address, data, expected, burst, refail);
@@ -779,20 +773,20 @@ int __bdk_dram_retry_failure2(int burst, uint64_t address1, uint64_t data1, uint
// bypass the retries if we are already over the limit...
if (bdk_atomic_get64(&dram_test_thread_errors) < MAX_ERRORS_TO_REPORT) {
- for (int i = 0; i < RETRY_LIMIT; i++) {
- __bdk_dram_flush_to_mem(address1);
- __bdk_dram_flush_to_mem(address2);
- BDK_DCACHE_INVALIDATE;
+ for (int i = 0; i < RETRY_LIMIT; i++) {
+ __bdk_dram_flush_to_mem(address1);
+ __bdk_dram_flush_to_mem(address2);
+ BDK_DCACHE_INVALIDATE;
- uint64_t d1 = __bdk_dram_read64(address1);
- uint64_t d2 = __bdk_dram_read64(address2);
+ uint64_t d1 = __bdk_dram_read64(address1);
+ uint64_t d2 = __bdk_dram_read64(address2);
- if (d1 != d2) {
- refail++;
- }
- }
+ if (d1 != d2) {
+ refail++;
+ }
+ }
} else
- refail = -1;
+ refail = -1;
// this will increment the errors always, but maybe not print...
__bdk_dram_report_error2(address1, data1, address2, data2, burst, refail);
@@ -854,7 +848,7 @@ void bdk_dram_test_inject_error(uint64_t address, int bit)
BDK_CSR_WRITE(node, BDK_LMCX_CHAR_MASK2(lmc), 0);
/* Read back the data, which should now cause an error */
- printf("Loading the injected error address 0x%lx, node=%d, lmc=%d, dimm=%d, rank=%d/%d, bank=%d, row=%d, col=%d\n",
+ printf("Loading the injected error address 0x%llx, node=%d, lmc=%d, dimm=%d, rank=%d/%d, bank=%d, row=%d, col=%d\n",
address, node, lmc, dimm, prank, lrank, bank, row, col);
__bdk_dram_read64(aligned_address);
}
diff --git a/src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-mdio.c b/src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-mdio.c
new file mode 100644
index 0000000000..7f13d7ca32
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-mdio.c
@@ -0,0 +1,351 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <libbdk-arch/bdk-csrs-pccpf.h>
+#include <libbdk-arch/bdk-csrs-smi.h>
+#include <libbdk-hal/device/bdk-device.h>
+#include <libbdk-hal/bdk-mdio.h>
+
+/* This code is an optional part of the BDK. It is only linked in
+ if BDK_REQUIRE() needs it */
+BDK_REQUIRE_DEFINE(MDIO);
+
+/* To maintain backwards compatibility for the old MDIO API we need
+ to lookup the MDIO device on the ECAM bus by ID. This defines
+ the ID */
+#define MDIO_DEVID ((BDK_PCC_PROD_E_GEN << 24) | BDK_PCC_VENDOR_E_CAVIUM | (BDK_PCC_DEV_IDL_E_SMI << 16))
+
+#define BDK_MDIO_TIMEOUT 100000 /* 100 millisec */
+
+/* Operating request encodings. */
+#define MDIO_CLAUSE_22_WRITE 0
+#define MDIO_CLAUSE_22_READ 1
+
+#define MDIO_CLAUSE_45_ADDRESS 0
+#define MDIO_CLAUSE_45_WRITE 1
+#define MDIO_CLAUSE_45_READ_INC 2
+#define MDIO_CLAUSE_45_READ 3
+
+/**
+ * Helper function to put MDIO interface into clause 45 mode
+ *
+ * @param bus_id
+ */
+static void __bdk_mdio_set_clause45_mode(const bdk_device_t *device, int bus_id)
+{
+ bdk_smi_x_clk_t smi_clk;
+ /* Put bus into clause 45 mode */
+ smi_clk.u = BDK_BAR_READ(device, BDK_SMI_X_CLK(bus_id));
+ if (smi_clk.s.mode != 1)
+ {
+ smi_clk.s.mode = 1;
+ smi_clk.s.preamble = 1;
+ BDK_BAR_WRITE(device, BDK_SMI_X_CLK(bus_id), smi_clk.u);
+ }
+}
+
+/**
+ * Helper function to put MDIO interface into clause 22 mode
+ *
+ * @param bus_id
+ */
+static void __bdk_mdio_set_clause22_mode(const bdk_device_t *device, int bus_id)
+{
+ bdk_smi_x_clk_t smi_clk;
+ /* Put bus into clause 22 mode */
+ smi_clk.u = BDK_BAR_READ(device, BDK_SMI_X_CLK(bus_id));
+ if (smi_clk.s.mode != 0)
+ {
+ smi_clk.s.mode = 0;
+ BDK_BAR_WRITE(device, BDK_SMI_X_CLK(bus_id), smi_clk.u);
+ }
+}
+
+/**
+ * @INTERNAL
+ * Function to read SMIX_RD_DAT and check for timeouts. This
+ * code sequence is done fairly often, so put in in one spot.
+ *
+ * @param bus_id SMI/MDIO bus to read
+ *
+ * @return Value of SMIX_RD_DAT. pending will be set on
+ * a timeout.
+ */
+static bdk_smi_x_rd_dat_t __bdk_mdio_read_rd_dat(const bdk_device_t *device, int bus_id)
+{
+ bdk_smi_x_rd_dat_t smi_rd;
+ uint64_t done = bdk_clock_get_count(BDK_CLOCK_TIME) + (uint64_t)BDK_MDIO_TIMEOUT *
+ bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) / 1000000;
+ do
+ {
+ smi_rd.u = BDK_BAR_READ(device, BDK_SMI_X_RD_DAT(bus_id));
+ } while (smi_rd.s.pending && (bdk_clock_get_count(BDK_CLOCK_TIME) < done));
+ return smi_rd;
+}
+
+
+/**
+ * Perform an MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param location Register location to read
+ *
+ * @return Result from the read or -1 on failure
+ */
+int bdk_mdio_read(bdk_node_t node, int bus_id, int phy_id, int location)
+{
+ const bdk_device_t *device = bdk_device_lookup(node, MDIO_DEVID, 0);
+ if (!device)
+ {
+ bdk_error("MDIO: ECAM device not found\n");
+ return -1;
+ }
+ bdk_smi_x_cmd_t smi_cmd;
+ bdk_smi_x_rd_dat_t smi_rd;
+
+ __bdk_mdio_set_clause22_mode(device, bus_id);
+
+ smi_cmd.u = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_22_READ;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = location;
+ BDK_BAR_WRITE(device, BDK_SMI_X_CMD(bus_id), smi_cmd.u);
+
+ smi_rd = __bdk_mdio_read_rd_dat(device, bus_id);
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ return -1;
+}
+
+
+/**
+ * Perform an MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param location Register location to write
+ * @param val Value to write
+ *
+ * @return -1 on error
+ * 0 on success
+ */
+int bdk_mdio_write(bdk_node_t node, int bus_id, int phy_id, int location, int val)
+{
+ const bdk_device_t *device = bdk_device_lookup(node, MDIO_DEVID, 0);
+ if (!device)
+ {
+ bdk_error("MDIO: ECAM device not found\n");
+ return -1;
+ }
+ bdk_smi_x_cmd_t smi_cmd;
+ bdk_smi_x_wr_dat_t smi_wr;
+
+ __bdk_mdio_set_clause22_mode(device, bus_id);
+
+ smi_wr.u = 0;
+ smi_wr.s.dat = val;
+ BDK_BAR_WRITE(device, BDK_SMI_X_WR_DAT(bus_id), smi_wr.u);
+
+ smi_cmd.u = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_22_WRITE;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = location;
+ BDK_BAR_WRITE(device, BDK_SMI_X_CMD(bus_id), smi_cmd.u);
+
+ if (BDK_BAR_WAIT_FOR_FIELD(device, BDK_SMI_X_WR_DAT(bus_id), pending, ==, 0, BDK_MDIO_TIMEOUT))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII read. This function is used to read PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param device MDIO Manageable Device (MMD) id
+ * @param location Register location to read
+ *
+ * @return Result from the read or -1 on failure
+ */
+
+int bdk_mdio_45_read(bdk_node_t node, int bus_id, int phy_id, int device, int location)
+{
+ const bdk_device_t *ecam_device = bdk_device_lookup(node, MDIO_DEVID, 0);
+ if (!ecam_device)
+ {
+ bdk_error("MDIO: ECAM device not found\n");
+ return -1;
+ }
+ bdk_smi_x_cmd_t smi_cmd;
+ bdk_smi_x_rd_dat_t smi_rd;
+ bdk_smi_x_wr_dat_t smi_wr;
+
+ __bdk_mdio_set_clause45_mode(ecam_device, bus_id);
+
+ smi_wr.u = 0;
+ smi_wr.s.dat = location;
+ BDK_BAR_WRITE(ecam_device, BDK_SMI_X_WR_DAT(bus_id), smi_wr.u);
+
+ smi_cmd.u = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ BDK_BAR_WRITE(ecam_device, BDK_SMI_X_CMD(bus_id), smi_cmd.u);
+
+ if (BDK_BAR_WAIT_FOR_FIELD(ecam_device, BDK_SMI_X_WR_DAT(bus_id), pending, ==, 0, BDK_MDIO_TIMEOUT))
+ {
+ bdk_error("bdk_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d TIME OUT(address)\n", bus_id, phy_id, device, location);
+ return -1;
+ }
+
+ smi_cmd.u = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_READ;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ BDK_BAR_WRITE(ecam_device, BDK_SMI_X_CMD(bus_id), smi_cmd.u);
+
+ smi_rd = __bdk_mdio_read_rd_dat(ecam_device, bus_id);
+ if (smi_rd.s.pending)
+ {
+ bdk_error("bdk_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d TIME OUT(data)\n", bus_id, phy_id, device, location);
+ return -1;
+ }
+
+ if (smi_rd.s.val)
+ return smi_rd.s.dat;
+ else
+ {
+ bdk_error("bdk_mdio_45_read: bus_id %d phy_id %2d device %2d register %2d INVALID READ\n", bus_id, phy_id, device, location);
+ return -1;
+ }
+}
+
+/**
+ * Perform an IEEE 802.3 clause 45 MII write. This function is used to write PHY
+ * registers controlling auto negotiation.
+ *
+ * @param bus_id MDIO bus number. Zero on most chips, but some chips (ex CN56XX)
+ * support multiple busses.
+ * @param phy_id The MII phy id
+ * @param device MDIO Manageable Device (MMD) id
+ * @param location Register location to write
+ * @param val Value to write
+ *
+ * @return -1 on error
+ * 0 on success
+ */
+int bdk_mdio_45_write(bdk_node_t node, int bus_id, int phy_id, int device, int location,
+ int val)
+{
+ const bdk_device_t *ecam_device = bdk_device_lookup(node, MDIO_DEVID, 0);
+ if (!ecam_device)
+ {
+ bdk_error("MDIO: ECAM device not found\n");
+ return -1;
+ }
+ bdk_smi_x_cmd_t smi_cmd;
+ bdk_smi_x_wr_dat_t smi_wr;
+
+ __bdk_mdio_set_clause45_mode(ecam_device, bus_id);
+
+ smi_wr.u = 0;
+ smi_wr.s.dat = location;
+ BDK_BAR_WRITE(ecam_device, BDK_SMI_X_WR_DAT(bus_id), smi_wr.u);
+
+ smi_cmd.u = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_ADDRESS;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ BDK_BAR_WRITE(ecam_device, BDK_SMI_X_CMD(bus_id), smi_cmd.u);
+
+ if (BDK_BAR_WAIT_FOR_FIELD(ecam_device, BDK_SMI_X_WR_DAT(bus_id), pending, ==, 0, BDK_MDIO_TIMEOUT))
+ return -1;
+
+ smi_wr.u = 0;
+ smi_wr.s.dat = val;
+ BDK_BAR_WRITE(ecam_device, BDK_SMI_X_WR_DAT(bus_id), smi_wr.u);
+
+ smi_cmd.u = 0;
+ smi_cmd.s.phy_op = MDIO_CLAUSE_45_WRITE;
+ smi_cmd.s.phy_adr = phy_id;
+ smi_cmd.s.reg_adr = device;
+ BDK_BAR_WRITE(ecam_device, BDK_SMI_X_CMD(bus_id), smi_cmd.u);
+
+ if (BDK_BAR_WAIT_FOR_FIELD(ecam_device, BDK_SMI_X_WR_DAT(bus_id), pending, ==, 0, BDK_MDIO_TIMEOUT))
+ return -1;
+
+ return 0;
+}
+
+/**
+ * MDIO init() function
+ *
+ * @param device MDIO/SMI to initialize
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_mdio_init(bdk_node_t node)
+{
+ const bdk_device_t *device = bdk_device_lookup(node, MDIO_DEVID, 0);
+ if (!device)
+ {
+ bdk_error("MDIO: ECAM device not found\n");
+ return -1;
+ }
+ /* Change drive strength bits to fix issues when a QLM cable
+ is connected, creating a long spur path */
+ BDK_CSR_MODIFY(c, device->node, BDK_SMI_DRV_CTL,
+ c.s.pctl = 7; /* 30 ohm */
+ c.s.nctl = 7); /* 30 ohm */
+
+ for (int i = 0; i < 2; i++)
+ BDK_BAR_MODIFY(c, device, BDK_SMI_X_EN(i), c.s.en = 1);
+
+ return 0;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-rnm.c b/src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-rnm.c
index 8394ad8c5e..c3a71f79dc 100644
--- a/src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-rnm.c
+++ b/src/vendorcode/cavium/bdk/libbdk-driver/bdk-driver-rnm.c
@@ -22,7 +22,8 @@
*
* This Software, including technical data, may be subject to U.S. export
* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
+* associateint bdk_rng_init(bdk_node_t node)
+* d regulations, and may be subject to export or import
* regulations in other countries.
*
* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
@@ -40,6 +41,10 @@
#include "libbdk-arch/bdk-csrs-pccpf.h"
#include "libbdk-arch/bdk-csrs-rnm.h"
+#include <libbdk-hal/bdk-rng.h>
+#include <libbdk-hal/device/bdk-device.h>
+#define RNG_DEVID ((BDK_PCC_PROD_E_GEN << 24) | BDK_PCC_VENDOR_E_CAVIUM | (BDK_PCC_DEV_IDL_E_RNM << 16))
+
BDK_REQUIRE_DEFINE(RNM);
/**
@@ -84,32 +89,26 @@ uint64_t bdk_rng_get_random64(void)
}
/**
- * The RNM probe function
- *
- * @param device RNM to probe
- *
- * @return Zero on success, negative on failure
- */
-static int probe(bdk_device_t *device)
-{
- bdk_device_rename(device, "N%d.RNM%d", device->node, device->instance);
- return 0;
-}
-
-/**
* RNM init() function
*
* @param device RNM to initialize
*
* @return Zero on success, negative on failure
*/
-static int init(bdk_device_t *device)
+int bdk_rng_init(bdk_node_t node)
{
+ const bdk_device_t *device = bdk_device_lookup(node, RNG_DEVID, 0);
+ if (!device)
+ {
+ bdk_error("RNM: ECAM device not found\n");
+ return -1;
+ }
BDK_BAR_MODIFY(c, device, BDK_RNM_CTL_STATUS,
c.s.ent_en = 1;
c.s.rng_en = 1);
- /* Read back after enable so we know it is done. Needed on t88 pass 2.0 emulator */
+ /* Read back after enable so we know it is done. Needed on t88 pass 2.0 emulator and t81 real hardware !!!! */
BDK_BAR_READ(device, BDK_RNM_CTL_STATUS);
+
/* Errata (RNM-22528) First consecutive reads to RNM_RANDOM return same
value. Before using the random entropy, read RNM_RANDOM at least once
and discard the data */
@@ -117,8 +116,3 @@ static int init(bdk_device_t *device)
return 0;
}
-bdk_driver_t __bdk_driver_rnm = {
- .id = (BDK_PCC_PROD_E_GEN << 24) | BDK_PCC_VENDOR_E_CAVIUM | (BDK_PCC_DEV_IDL_E_RNM << 16),
- .probe = probe,
- .init = init,
-};
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-version.h b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-access.c
index bb9b919777..ebced00715 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-version.h
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-access.c
@@ -36,24 +36,35 @@
* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-uaa.h"
+#include "libbdk-arch/bdk-csrs-rst.h"
/**
- * @file
+ * Perform a soft reset of the chip
*
- * Functions for identifying BDK build version.
- *
- * <hr>$Revision$<hr>
+ * @return
*/
+void bdk_reset_chip(bdk_node_t node)
+{
+ fflush(NULL);
+ /* Wait for TX fifo to empty */
+ while (1)
+ {
+ BDK_CSR_INIT(fr, node, BDK_UAAX_FR(0));
+ if (fr.s.txfe)
+ break;
+ }
-extern const char bdk_version_str[];
+ /* RST_OCX is not cleared by a chip reset. Clear it now to avoid repeated
+ resets due to CCPI state changes during reset */
+ BDK_CSR_WRITE(node, BDK_RST_OCX, 0);
+ BDK_CSR_READ(node, BDK_RST_OCX);
-/**
- * Return BDK version string
- *
- * @return BDK version string
- */
-static inline const char *bdk_version_string(void)
-{
- return bdk_version_str;
+ bdk_rst_soft_rst_t rst_soft_rst;
+ rst_soft_rst.u = 0;
+ rst_soft_rst.s.soft_rst = 1;
+ BDK_CSR_WRITE(node, BDK_RST_SOFT_RST, rst_soft_rst.u);
}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-clock.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-clock.c
index f81285dffd..b8b0952de4 100644
--- a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-clock.c
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-clock.c
@@ -37,123 +37,10 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <bdk.h>
-#include "libbdk-arch/bdk-csrs-gti.h"
-#include "libbdk-arch/bdk-csrs-ocx.h"
-
-/**
- * Called in __bdk_init to setup the global timer
- */
-void bdk_clock_setup(bdk_node_t node)
-{
- const bdk_node_t local_node = bdk_numa_local();
-
- /* Check if the counter was already setup */
- BDK_CSR_INIT(cntcr, node, BDK_GTI_CC_CNTCR);
- if (cntcr.s.en)
- return;
-
- /* Configure GTI to tick at BDK_GTI_RATE */
- uint64_t sclk = bdk_clock_get_rate(node, BDK_CLOCK_SCLK);
- uint64_t inc = (BDK_GTI_RATE << 32) / sclk;
- BDK_CSR_WRITE(node, BDK_GTI_CC_CNTRATE, inc);
- BDK_CSR_WRITE(node, BDK_GTI_CTL_CNTFRQ, BDK_GTI_RATE);
- cntcr.s.en = 1;
- if (node != local_node)
- {
- /* Synchronize with local node. Very simple set of counter, will be
- off a little */
- BDK_CSR_WRITE(node, BDK_GTI_CC_CNTCV, bdk_clock_get_count(BDK_CLOCK_TIME));
- }
- /* Enable the counter */
- BDK_CSR_WRITE(node, BDK_GTI_CC_CNTCR, cntcr.u);
- BDK_CSR_READ(node, BDK_GTI_CC_CNTCR);
-
- if (node != local_node)
- {
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
- {
- /* Assume the delay in each direction is the same, sync the counters */
- int64_t local1 = bdk_clock_get_count(BDK_CLOCK_TIME);
- int64_t remote = BDK_CSR_READ(node, BDK_GTI_CC_CNTCV);
- int64_t local2 = bdk_clock_get_count(BDK_CLOCK_TIME);
- int64_t expected = (local1 + local2) / 2;
- BDK_CSR_WRITE(node, BDK_GTI_CC_CNTADD, expected - remote);
- BDK_TRACE(INIT, "N%d.GTI: Clock synchronization with master\n"
- " expected: %ld, remote %ld\n"
- " Counter correction: %ld\n",
- node, expected, remote, expected - remote);
- }
- else
- {
- /* Due to errata TBD, we need to use OCX_PP_CMD to write
- GTI_CC_CNTMB in order for timestamps to update. These constants
- are the addresses we need for both local and remote GTI_CC_CNTMB */
- const uint64_t LOCAL_GTI_CC_CNTMB = bdk_numa_get_address(local_node, BDK_GTI_CC_CNTMB);
- const uint64_t REMOTE_GTI_CC_CNTMB = bdk_numa_get_address(node, BDK_GTI_CC_CNTMB);
- /* Build partial OCX_PP_CMD command used for writes. Address will
- be filled later */
- BDK_CSR_DEFINE(pp_cmd, BDK_OCX_PP_CMD);
- pp_cmd.u = 0;
- pp_cmd.s.wr_mask = 0xff;
-
- const int NUM_AVERAGE = 16; /* Choose a power of two to avoid division */
- int64_t local_to_remote_sum = 0;
- int64_t local_to_remote_min = 1000000;
- int64_t local_to_remote_max = -1000000;
- int64_t remote_to_local_sum = 0;
- int64_t remote_to_local_min = 1000000;
- int64_t remote_to_local_max = -1000000;
- for (int loop = 0; loop < NUM_AVERAGE; loop++)
- {
- /* Perform a write to the remote GTI_CC_CNTMB to cause timestamp
- update. We don't care about the value actually written */
- pp_cmd.s.addr = REMOTE_GTI_CC_CNTMB;
- BDK_CSR_WRITE(local_node, BDK_OCX_PP_CMD, pp_cmd.u);
- BDK_CSR_READ(local_node, BDK_OCX_PP_CMD);
-
- int64_t remote = BDK_CSR_READ(node, BDK_GTI_CC_CNTMBTS);
- int64_t local = BDK_CSR_READ(local_node, BDK_GTI_CC_CNTMBTS);
- int64_t delta = remote - local;
-
- local_to_remote_sum += delta;
- if (delta < local_to_remote_min)
- local_to_remote_min = delta;
- if (delta > local_to_remote_max)
- local_to_remote_max = delta;
-
- /* Perform a write to the local GTI_CC_CNTMB to cause timestamp
- update. We don't care about the value actually written */
- pp_cmd.s.addr = LOCAL_GTI_CC_CNTMB;
- BDK_CSR_WRITE(node, BDK_OCX_PP_CMD, pp_cmd.u);
- BDK_CSR_READ(node, BDK_OCX_PP_CMD);
-
- remote = BDK_CSR_READ(node, BDK_GTI_CC_CNTMBTS);
- local = BDK_CSR_READ(local_node, BDK_GTI_CC_CNTMBTS);
- delta = local - remote;
-
- remote_to_local_sum += delta;
- if (delta < remote_to_local_min)
- remote_to_local_min = delta;
- if (delta > remote_to_local_max)
- remote_to_local_max = delta;
- }
- /* Calculate average, rounding to nearest */
- int64_t local_to_remote = (local_to_remote_sum + NUM_AVERAGE/2) / NUM_AVERAGE;
- int64_t remote_to_local = (remote_to_local_sum + NUM_AVERAGE/2) / NUM_AVERAGE;
- /* Calculate remote node offset */
- int64_t remote_offset = (remote_to_local - local_to_remote) / 2;
- BDK_CSR_WRITE(node, BDK_GTI_CC_CNTADD, remote_offset);
- BDK_TRACE(INIT, "N%d.GTI: Clock synchronization with master\n"
- " local -> remote: min %ld, avg %ld, max %ld\n"
- " remote -> local: min %ld, avg %ld, max %ld\n"
- " Counter correction: %ld\n",
- node,
- local_to_remote_min, local_to_remote, local_to_remote_max,
- remote_to_local_min, remote_to_local, remote_to_local_max,
- remote_offset);
- }
- }
-}
+#include <libbdk-arch/bdk-csrs-gti.h>
+#include <libbdk-arch/bdk-csrs-ocx.h>
+#include <libbdk-hal/bdk-clock.h>
+#include <libbdk-arch/bdk-csrs-rst.h>
/**
* Get cycle count based on the clock type.
@@ -165,12 +52,6 @@ uint64_t __bdk_clock_get_count_slow(bdk_clock_t clock)
{
bdk_node_t node = bdk_numa_local();
BDK_CSR_INIT(rst_boot, node, BDK_RST_BOOT);
- if (bdk_is_platform(BDK_PLATFORM_EMULATOR))
- {
- /* Force RCLK and SCLK to be 1GHz on emulator */
- rst_boot.s.c_mul = 20;
- rst_boot.s.pnr_mul = 20;
- }
uint64_t ref_cntr = BDK_CSR_READ(node, BDK_RST_REF_CNTR);
switch(clock)
{
@@ -199,12 +80,6 @@ uint64_t __bdk_clock_get_rate_slow(bdk_node_t node, bdk_clock_t clock)
const uint64_t REF_CLOCK = 50000000;
BDK_CSR_INIT(mio_rst_boot, node, BDK_RST_BOOT);
- if (bdk_is_platform(BDK_PLATFORM_EMULATOR))
- {
- /* Force RCLK and SCLK to be 1GHz on emulator */
- mio_rst_boot.s.c_mul = 20;
- mio_rst_boot.s.pnr_mul = 20;
- }
switch (clock)
{
case BDK_CLOCK_TIME:
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-config.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-config.c
index d4b412d439..91f05d3ae7 100644
--- a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-config.c
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-config.c
@@ -1,81 +1,265 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ * Copyright 2018-present Facebook, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * This file consists of data imported from bdk-config.c
+ */
+
#include <bdk.h>
-#include <stdarg.h>
-#include <libfdt.h>
-#include <unistd.h>
-#include "libbdk-arch/bdk-csrs-mio_fus.h"
-#include "libbdk-arch/bdk-csrs-fus.h"
+#include <libbdk-hal/bdk-config.h>
+#include <string.h>
+#include <assert.h>
+#include <lame_string.h>
+
+static struct bdk_devicetree_key_value *config_fdt;
+
+#if !defined(__PRE_RAM__)
+static struct bdk_devicetree_key_value *bdk_config_duplicate(
+ const struct bdk_devicetree_key_value *old,
+ size_t free_space)
+{
+ struct bdk_devicetree_key_value *new;
+ size_t len = sizeof(struct bdk_devicetree_key_value) + free_space;
+ const struct bdk_devicetree_key_value *iter = old;
+ while (iter->key) {
+ iter++;
+ len += sizeof(struct bdk_devicetree_key_value);
+ }
+ new = malloc(len);
+ if (!new)
+ return NULL;
-/* Set this define to override the trace the BDK uses. This is most
- useful with trusted boot when the setup menus are not able to
- configure the trace level. A possible example: */
-//#define BDK_TRACE_OVERRIDE (1ull << BDK_TRACE_ENABLE_INIT)
-#define BDK_TRACE_OVERRIDE 0
+ memcpy(new, old, len);
+
+ return new;
+}
+#endif
+/**
+ * Set the device tree used for configuration
+ *
+ * @param fdt Device tree to use. Memory is assumed to be from malloc() and bdk_config takes
+ * over ownership on success
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_config_set_fdt(const struct bdk_devicetree_key_value *fdt)
+{
+#if !defined(__PRE_RAM__)
+ config_fdt = bdk_config_duplicate(fdt, 0);
+#else
+ config_fdt = (void *)fdt;
+#endif
+ return 0;
+}
-typedef enum
+/**
+ * Look up a configuration item in the environment and replace it.
+ *
+ * @param name
+ *
+ * @return
+ */
+static void set_value(const char *name, const char *val)
{
- BDK_CONFIG_TYPE_INT,
- BDK_CONFIG_TYPE_STR,
- BDK_CONFIG_TYPE_STR_LIST,
- BDK_CONFIG_TYPE_BINARY,
-} bdk_config_type_t;
+#if !defined(__PRE_RAM__)
+ struct bdk_devicetree_key_value *iter;
+ char n[64];
+
+ strncpy(n, name, sizeof(n));
+ n[sizeof(n)-1] = '\0';
+
+ iter = config_fdt;
+ while (iter->key) {
+ if (strcmp(iter->key, n) == 0) {
+ // we are leaking memory here...
+ iter->value = (const char *)strdup(val);
+ return;
+ }
+ iter++;
+ }
+ /* Not found: Create one */
+ iter = bdk_config_duplicate(config_fdt,
+ sizeof(struct bdk_devicetree_key_value));
+ if (!iter)
+ return;
-typedef struct
+ free(config_fdt);
+ config_fdt = iter;
+ while (iter->key) {
+ iter++;
+ }
+ iter->key = (const char *)strdup(name);
+ iter->value = (const char *)strdup(val);
+ iter++;
+ iter->key = 0;
+ iter->value = 0;
+#endif
+}
+
+/**
+ * Look up a configuration item in the environment.
+ *
+ * @param name
+ *
+ * @return
+ */
+static const char *get_value(const char *name)
{
- const char *format; /* Printf style format string to create the item name */
- const bdk_config_type_t ctype;/* Type of this item */
- int64_t default_value; /* Default value when no present. String defaults are cast to pointers from this */
- const int64_t min_value;/* Minimum valid value for INT parameters. Unused for Strings */
- const int64_t max_value;/* Maximum valid value for INT parameters. Unused for Strings */
-} bdk_config_info_t;
+ const struct bdk_devicetree_key_value *iter;
+ char n[64];
-static void config_set_defaults(void);
+ strncpy(n, name, sizeof(n));
+ n[sizeof(n)-1] = '\0';
-/* Tracing defaults to the level specified here before config files are loaded */
-uint64_t bdk_trace_enables = BDK_TRACE_OVERRIDE;
+ while (*n) {
+ iter = config_fdt;
+ while (iter->key) {
+ if (strcmp(iter->key, n) == 0)
+ return iter->value;
+ iter++;
+ }
-/* Global variables that contain the config inside a FDT */
-static void *config_fdt;
-static int config_node;
+ char *p = strrchr(n, '.');
+ if (p)
+ *p = '\0';
+ else
+ break;
+ }
+ return NULL;
+}
-static bdk_config_info_t config_info[__BDK_CONFIG_END] = {
+/**
+ * Get an integer configuration item
+ *
+ * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ *
+ * @return The value of the configuration item, or def_value if the item is not set
+ */
+int64_t bdk_config_get_int(bdk_config_t cfg_item, ...)
+{
+ char name[64];
+ size_t count;
+ int64_t tmp;
+
+ assert(cfg_item < __BDK_CONFIG_END);
+
+ /* Make sure the correct access function was called */
+ assert(config_info[cfg_item].ctype == BDK_CONFIG_TYPE_INT);
+
+ if (!config_fdt)
+ return config_info[cfg_item].default_value;
+
+ va_list args;
+ va_start(args, cfg_item);
+ vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
+ va_end(args);
+
+ const char *val = get_value(name);
+ if (!val)
+ return config_info[cfg_item].default_value;
+
+#if 0
+ if ((val[0] == '0') && (val[1] == 'x'))
+ count = sscanf(val + 2, "%lx", &tmp);
+ else
+ count = sscanf(val, "%li", &tmp);
+#endif
+
+ if ((val[0] == '0') && (val[1] == 'x'))
+ count = str_to_hex(val + 2, &tmp);
+ else
+ count = str_to_int(val, &tmp);
+ if (count == 1) {
+ if ((tmp < config_info[cfg_item].min_value) ||
+ (tmp > config_info[cfg_item].max_value)) {
+ printk(BIOS_WARNING, "Out of range for %s = %s, using "
+ "default\n", name, val);
+ return config_info[cfg_item].default_value;
+ }
+ return tmp;
+ }
+
+ printk(BIOS_WARNING, "Failed to parse %s = %s, using default\n",
+ name, val);
+ return config_info[cfg_item].default_value;
+}
+
+/**
+ * Set an integer configuration item. Note this only sets the item in memory,
+ * persistent storage is not updated.
+ *
+ * @param value Configuration item value
+ * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ */
+void bdk_config_set_int(int64_t value, bdk_config_t cfg_item, ...)
+{
+ char name[64], val[32];
+
+ assert(cfg_item < __BDK_CONFIG_END);
+
+ /* Make sure the correct access function was called */
+ assert(config_info[cfg_item].ctype == BDK_CONFIG_TYPE_INT);
+
+ if (!config_fdt)
+ return;
+
+ va_list args;
+ va_start(args, cfg_item);
+ vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
+ va_end(args);
+
+ snprintf(val, sizeof(val), "0x%016llx", value);
+ set_value(name, val);
+}
+
+/**
+ * Get a string configuration item
+ *
+ * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
+ *
+ * @return The value of the configuration item, or def_value if the item is not set
+ */
+const char *bdk_config_get_str(bdk_config_t cfg_item, ...)
+{
+ char name[64];
+
+ /* Make sure the correct access function was called */
+ assert(config_info[cfg_item].ctype == BDK_CONFIG_TYPE_STR);
+
+ if (!config_fdt)
+ return (const char *)config_info[cfg_item].default_value;
+
+ va_list args;
+ va_start(args, cfg_item);
+ vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
+
+ if (BDK_CONFIG_QLM_MODE == cfg_item) {
+ char name2[64];
+ vsnprintf(name2, sizeof(name2)-1,"QLM-MODE.N%d.DLM%d" , args);
+ const char *val = get_value(name2);
+ if (val)
+ printk(BIOS_WARNING, "%s: QLM-MODE.N%%d.DLM%%d format "
+ "depricated. Please use QLM-MODE.N%%d.QLM%%d "
+ "instead\n", name2);
+ }
+ va_end(args);
+
+ const char *val = get_value(name);
+ if (val)
+ return val;
+ else
+ return (const char *)config_info[cfg_item].default_value;
+}
+
+bdk_config_info_t config_info[] = {
/* Board manufacturing data */
[BDK_CONFIG_BOARD_MODEL] = {
.format = "BOARD-MODEL", /* String, No parameters */
@@ -251,11 +435,11 @@ static bdk_config_info_t config_info[__BDK_CONFIG_END] = {
.max_value = 0xffff,
},
[BDK_CONFIG_PCIE_WIDTH] = {
- .format = "PCIE-WIDTH.N%d.PORT%d", /* Parameters: Node, Port */
- .ctype = BDK_CONFIG_TYPE_INT,
- .default_value = -1, /* Width override for PCIe links */
- .min_value = -1,
- .max_value = 16,
+ .format = "PCIE-WIDTH.N%d.PORT%d", /* Parameters: Node, Port */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = -1, /* Width override for PCIe links */
+ .min_value = -1,
+ .max_value = 16,
},
[BDK_CONFIG_PCIE_PHYSICAL_SLOT] = {
.format = "PCIE-PHYSICAL-SLOT.N%d.PORT%d", /* Parameters: Node, Port */
@@ -264,6 +448,13 @@ static bdk_config_info_t config_info[__BDK_CONFIG_END] = {
.min_value = -1,
.max_value = 8191,
},
+ [BDK_CONFIG_PCIE_SKIP_LINK_TRAIN] = {
+ .format = "PCIE-SKIP-LINK-TRAIN.N%d.PORT%d", /* Parameters: Node, Port */
+ .ctype = BDK_CONFIG_TYPE_INT,
+ .default_value = 0, /* Define which physical slot we connect to on the board */
+ .min_value = 0,
+ .max_value = 1,
+ },
[BDK_CONFIG_PCIE_FLASH] = {
.format = "PCIE-FLASH.N%d.PORT%d", /* Parameters: Node, Port */
.ctype = BDK_CONFIG_TYPE_STR_LIST,
@@ -361,7 +552,6 @@ static bdk_config_info_t config_info[__BDK_CONFIG_END] = {
.min_value = -1,
.max_value = 40,
},
-
/* DRAM configuration options */
[BDK_CONFIG_DDR_SPEED] = {
.format = "DDR-SPEED.N%d", /* Parameters: Node */
@@ -862,1085 +1052,3 @@ static bdk_config_info_t config_info[__BDK_CONFIG_END] = {
},
};
-/**
- * Look up a configuration item in the environment.
- *
- * @param name
- *
- * @return
- */
-static const char *get_value(const char *name, int *blob_size)
-{
- if (!config_fdt)
- {
- bdk_error("bdk-config asked for %s before configuration loaded\n", name);
- return NULL;
- }
-
- char n[64];
- strncpy(n, name, sizeof(n));
- n[sizeof(n)-1] = '\0';
-
- while (*n)
- {
- const char *val = fdt_getprop(config_fdt, config_node, n, blob_size);
- if (val)
- return val;
-
- char *p = strrchr(n, '.');
- if (p)
- *p = '\0';
- else
- break;
- }
- return NULL;
-}
-
-/**
- * Get an integer configuration item
- *
- * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- *
- * @return The value of the configuration item, or def_value if the item is not set
- */
-int64_t bdk_config_get_int(bdk_config_t cfg_item, ...)
-{
- /* Make sure the correct access function was called */
- if (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_INT)
- bdk_fatal("bdk_config_get_int() called for %s, not an int\n",
- config_info[cfg_item].format);
-
- char name[64];
- va_list args;
- va_start(args, cfg_item);
- vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
- va_end(args);
-
- const char *val = get_value(name, NULL);
- if (val)
- {
- int count;
- int64_t tmp;
- if ((val[0] == '0') && (val[1] == 'x'))
- count = sscanf(val + 2, "%lx", &tmp);
- else
- count = sscanf(val, "%li", &tmp);
- if (count == 1)
- {
- if ((tmp < config_info[cfg_item].min_value) || (tmp > config_info[cfg_item].max_value))
- {
- bdk_warn("Out of range for %s = \"%s\", using default\n", name, val);
- return config_info[cfg_item].default_value;
- }
- return tmp;
- }
- else
- {
- bdk_warn("Failed to parse %s = \"%s\", using default\n", name, val);
- return config_info[cfg_item].default_value;
- }
- }
- else
- return config_info[cfg_item].default_value;
-}
-
-/**
- * Get a string configuration item
- *
- * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- *
- * @return The value of the configuration item, or def_value if the item is not set
- */
-const char *bdk_config_get_str(bdk_config_t cfg_item, ...)
-{
- /* Make sure the correct access function was called */
- if (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_STR)
- bdk_fatal("bdk_config_get_str() called for %s, not a str\n",
- config_info[cfg_item].format);
-
- char name[64];
- va_list args;
- va_start(args, cfg_item);
- vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
-
- if (BDK_CONFIG_QLM_MODE == cfg_item)
- {
- char name2[64];
- vsnprintf(name2, sizeof(name2)-1,"QLM-MODE.N%d.DLM%d" , args);
- const char *val = get_value(name2, NULL);
- if (val)
- bdk_warn("%s: QLM-MODE.N%%d.DLM%%d format depricated. Please use QLM-MODE.N%%d.QLM%%d instead\n", name2);
-
- }
- va_end(args);
-
- const char *val = get_value(name, NULL);
- if (val)
- return val;
- else
- return (const char *)config_info[cfg_item].default_value;
-}
-
-/**
- * Get a binary blob
- *
- * @param blob_size Integer to receive the size of the blob
- * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- *
- * @return The value of the configuration item, or def_value if the item is not set
- */
-const void* bdk_config_get_blob(int *blob_size, bdk_config_t cfg_item, ...)
-{
- char name[64];
- va_list args;
- va_start(args, cfg_item);
- vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
- va_end(args);
-
- const void *val = get_value(name, blob_size);
- if (val)
- return val;
- else
- return (const void *)config_info[cfg_item].default_value;
-}
-
-/**
- * Set an integer configuration item. Note this only sets the item in memory,
- * persistent storage is not updated. The optional parameters for the setting are
- * not supplied, meaning this function only changes the global default.
- *
- * @param value Configuration item value
- * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- */
-void bdk_config_set_int_no_param(int64_t value, bdk_config_t cfg_item)
-{
- /* Make sure the correct access function was called */
- if (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_INT)
- bdk_fatal("bdk_config_set_int_no_param() called for %s, not an int\n",
- config_info[cfg_item].format);
-
- char name[64];
- char valstr[20];
- /* Create a name without the optional parameters */
- strncpy(name, config_info[cfg_item].format, sizeof(name) - 1);
- name[sizeof(name) - 1] = 0;
- char *ptr = strchr(name, '.');
- if (ptr)
- *ptr = 0;
-
- if (!config_fdt)
- {
- bdk_error("bdk-config set %s before configuration loaded\n", name);
- return;
- }
- if ((value < config_info[cfg_item].min_value) || (value > config_info[cfg_item].max_value))
- {
- bdk_error("Set out of range for %s = \"0x%lx\", ignoring\n", name, value);
- return;
- }
-
- if (value < 10)
- snprintf(valstr, sizeof(valstr), "%ld", value);
- else
- snprintf(valstr, sizeof(valstr), "0x%lx", value);
-
- int status = fdt_setprop_string(config_fdt, config_node, name, valstr);
- if (status < 0)
- bdk_fatal("Failed to set %s=%s in FDT\n", name, valstr);
-}
-
-/**
- * Set an integer configuration item. Note this only sets the item in memory,
- * persistent storage is not updated.
- *
- * @param value Configuration item value
- * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- */
-void bdk_config_set_int(int64_t value, bdk_config_t cfg_item, ...)
-{
- /* Make sure the correct access function was called */
- if (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_INT)
- bdk_fatal("bdk_config_set_int() called for %s, not an int\n",
- config_info[cfg_item].format);
-
- char name[64];
- char valstr[20];
- va_list args;
- va_start(args, cfg_item);
- vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
- va_end(args);
-
- if (!config_fdt)
- {
- bdk_error("bdk-config set %s before configuration loaded\n", name);
- return;
- }
- if ((value < config_info[cfg_item].min_value) || (value > config_info[cfg_item].max_value))
- {
- bdk_error("Set out of range for %s = \"0x%lx\", ignoring\n", name, value);
- return;
- }
-
- if (value < 10)
- snprintf(valstr, sizeof(valstr), "%ld", value);
- else
- snprintf(valstr, sizeof(valstr), "0x%lx", value);
-
- int status = fdt_setprop_string(config_fdt, config_node, name, valstr);
- if (status < 0)
- bdk_fatal("Failed to set %s=%s in FDT\n", name, valstr);
-}
-
-/**
- * Set an integer configuration item. Note this only sets the item in memory,
- * persistent storage is not updated.
- *
- * @param value Configuration item value
- * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- */
-void bdk_config_set_str(const char *value, bdk_config_t cfg_item, ...)
-{
- /* Make sure the correct access function was called */
- if (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_STR)
- bdk_fatal("bdk_config_set_str() called for %s, not a str\n",
- config_info[cfg_item].format);
-
- char name[64];
- va_list args;
-
- va_start(args, cfg_item);
- vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
- va_end(args);
-
- if (!config_fdt)
- {
- bdk_error("bdk-config set %s before configuration loaded\n", name);
- return;
- }
-
- int status;
- if (value)
- status = fdt_setprop_string(config_fdt, config_node, name, value);
- else
- status = fdt_delprop(config_fdt, config_node, name);
-
- if ((status < 0) && (status != -FDT_ERR_NOTFOUND))
- bdk_fatal("Failed to set %s=%s in FDT\n", name, value);
-}
-
-/**
- * Set a blob configuration item. Note this only sets the
- * item in memory, persistent storage is not updated. The optional
- * parameters for the setting are not supplied, meaning this function
- * only changes the global default.
- *
- * @param size Size of the item in bytes. A size of zero removes the device tree field
- * @param value Configuration item value
- * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- */
-void bdk_config_set_blob_no_param(int size, const void *value, bdk_config_t cfg_item)
-{
- /* Make sure the correct access function was called */
- if ((config_info[cfg_item].ctype != BDK_CONFIG_TYPE_BINARY) &&
- (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_STR_LIST))
- bdk_fatal("bdk_config_set_blob() called for %s, not binary\n",
- config_info[cfg_item].format);
-
- char name[64];
- /* Create a name without the optional parameters */
- strncpy(name, config_info[cfg_item].format, sizeof(name) - 1);
- name[sizeof(name) - 1] = 0;
- char *ptr = strchr(name, '.');
- if (ptr)
- *ptr = 0;
-
- if (!config_fdt)
- {
- bdk_error("bdk-config set %s before configuration loaded\n", name);
- return;
- }
-
- int status;
- if (size)
- status = fdt_setprop(config_fdt, config_node, name, value, size);
- else
- status = fdt_delprop(config_fdt, config_node, name);
-
- if ((status < 0) && (status != -FDT_ERR_NOTFOUND))
- bdk_fatal("Failed to set %s in FDT\n", name);
-}
-
-/**
- * Set a blob configuration item. Note this only sets the
- * item in memory, persistent storage is not updated.
- *
- * @param size Size of the item in bytes. A size of zero removes the device tree field
- * @param value Configuration item value
- * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- */
-void bdk_config_set_blob(int size, const void *value, bdk_config_t cfg_item, ...)
-{
- /* Make sure the correct access function was called */
- if ((config_info[cfg_item].ctype != BDK_CONFIG_TYPE_BINARY) &&
- (config_info[cfg_item].ctype != BDK_CONFIG_TYPE_STR_LIST))
- bdk_fatal("bdk_config_set_blob() called for %s, not binary\n",
- config_info[cfg_item].format);
-
- char name[64];
- va_list args;
-
- va_start(args, cfg_item);
- vsnprintf(name, sizeof(name)-1, config_info[cfg_item].format, args);
- va_end(args);
-
- if (!config_fdt)
- {
- bdk_error("bdk-config set %s before configuration loaded\n", name);
- return;
- }
-
- int status;
- if (size)
- status = fdt_setprop(config_fdt, config_node, name, value, size);
- else
- status = fdt_delprop(config_fdt, config_node, name);
-
- if ((status < 0) && (status != -FDT_ERR_NOTFOUND))
- bdk_fatal("Failed to set %s in FDT\n", name);
-}
-
-/**
- * Multiple functions need to display the config item help string in a format
- * suitable for inclusion in a device tree. This function displays the help
- * message properly indented and such.
- *
- * @param cfg Config item to display help for
- */
-static void display_help(bdk_config_t cfg)
-{
- /* Print the help text as a comment before the entry */
- /* Indent with tabs like Linux requires */
- printf("\n");
- printf("\t/* ");
- const char *ptr = bdk_config_get_help(cfg);
- while (*ptr)
- {
- putchar(*ptr);
- if (*ptr == '\n')
- putchar('\t');
- ptr++;
- }
- printf(" */\n");
- /* Print the parameter and its default value a comment. This will be
- a reference that is easy for the user to change */
- printf("\t//%s = ", config_info[cfg].format);
- switch (config_info[cfg].ctype)
- {
- case BDK_CONFIG_TYPE_INT:
- if (config_info[cfg].default_value < 10)
- printf("\"%ld\"", config_info[cfg].default_value);
- else
- printf("\"0x%lx\"", config_info[cfg].default_value);
- break;
- case BDK_CONFIG_TYPE_STR:
- case BDK_CONFIG_TYPE_STR_LIST:
- if (config_info[cfg].default_value)
- printf("\"%s\"", (const char *)config_info[cfg].default_value);
- else
- printf("\"\"");
- break;
- case BDK_CONFIG_TYPE_BINARY:
- printf("[]");
- break;
- }
- printf(";\n");
-}
-
-/**
- * Display the active configuration as a valid device tree
- */
-void bdk_config_show(void)
-{
- /* Output the standard DTS headers */
- printf("/dts-v1/;\n");
- printf("\n");
- printf("/ {\n");
- printf("cavium,bdk {\n");
- for (bdk_config_t cfg = 0; cfg < __BDK_CONFIG_END; cfg++)
- {
- /* Show the help message */
- display_help(cfg);
-
- /* Figure out how much of the config item is fixed versus
- the optional parameters */
- const char *format = config_info[cfg].format;
- const char *format_param = strchr(format, '.');
- int format_length = 0;
- if (format_param)
- format_length = format_param - format;
-
- /* Loop through all device tree entries displaying the ones that
- match this format */
- int offset = fdt_first_property_offset(config_fdt, config_node);
- while (offset >= 0)
- {
- /* Get the device tree item */
- const char *name = NULL;
- int data_size = 0;
- const char *data = fdt_getprop_by_offset(config_fdt, offset, &name, &data_size);
- const char *data_end = data + data_size;
- /* Find the first param */
- const char *name_param = strchr(name, '.');
- int name_length = 0;
- if (name_param)
- {
- /* We want to compare up to the first param */
- name_length = name_param - name;
- /* If the lengths are different not including the parameters,
- then we force a full matchn which will always fail */
- if (name_length != format_length)
- name_length = 0;
- }
- else /* No params, match base of format */
- name_length = format_length;
-
- /* Check if it matches the current config format */
- int match;
- if (name_length)
- {
- /* Check the prefix */
- match = strncmp(name, format, name_length);
- if (match == 0)
- {
- /* Prefix matched. We only really match if the next
- character is the end of the string or a '.' */
- if ((name[name_length] != 0) && (name[name_length] != '.'))
- match = 1;
- }
- }
- else
- match = strcmp(name, format);
- /* Print matching entries */
- if (match == 0)
- {
- if (config_info[cfg].ctype == BDK_CONFIG_TYPE_BINARY)
- {
- printf("\t%s = [", name);
- const char *ptr = data;
- while (ptr < data_end)
- {
- printf(" %02x", (int)*ptr);
- ptr++;
- }
- printf(" ]");
- }
- else
- {
- printf("\t%s = \"%s\"", name, data);
- data += strlen(data) + 1;
- while (data < data_end)
- {
- printf(",\n\t\t\"%s\"", data);
- data += strlen(data) + 1;
- }
- }
- printf(";\n");
- }
- offset = fdt_next_property_offset(config_fdt, offset);
- }
- }
- /* Output the standard DTS footers */
- printf("}; /* cavium,bdk */\n");
- printf("}; /* / */\n");
-}
-
-/**
- * Display a list of all possible config items with help text
- */
-void bdk_config_help(void)
-{
- /* Write out formatted as part of a device tree source (dts) file */
- printf("/dts-v1/;\n");
- printf("\n");
- printf("/ {\n");
- printf("cavium,bdk {\n");
- for (bdk_config_t cfg = 0; cfg < __BDK_CONFIG_END; cfg++)
- display_help(cfg);
- printf("}; /* cavium,bdk */\n");
- printf("}; /* / */\n");
-}
-
-
-/**
- * Save the current configuration to flash
- *
- * @return Zero on success, negative on failure
- */
-int bdk_config_save(void)
-{
- /* Pack the FDT so it uses less space */
- int status = fdt_pack(config_fdt);
- if (status < 0)
- {
- bdk_error("FDT error %d: %s\n", status, fdt_strerror(status));
- return -1;
- }
-
- /* Calculate a CRC32 of the FDT */
- int fdt_size = fdt_totalsize(config_fdt);
- uint32_t crc32 = bdk_crc32(config_fdt, fdt_size, 0);
-
- /* Open the output file */
- FILE *outf = fopen("/fatfs/default.dtb", "wb");
- if (!outf)
- {
- bdk_error("Failed to open flash");
- return -1;
- }
-
- /* Write the FDT */
- if (fwrite(config_fdt, fdt_size, 1, outf) != 1)
- {
- bdk_error("Failed to write FDT");
- fclose(outf);
- return -1;
- }
-
- /* Save the CRC32 in the same endianness as the FDT */
- crc32 = cpu_to_fdt32(crc32);
- if (fwrite(&crc32, sizeof(crc32), 1, outf) != 1)
- {
- bdk_error("Failed to write FDT CRC32");
- fclose(outf);
- return -1;
- }
-
- fclose(outf);
- return 0;
-}
-
-/**
- * Takes the current live device tree and exports it to a memory address suitable
- * for passing to the next binary in register X1.
- *
- * @return Physical address of the device tree, or 0 on failure
- */
-uint64_t __bdk_config_export_to_mem(void)
-{
- void *end_ptr = sbrk(0);
- bdk_node_t node = bdk_numa_master();
- int fdt_size = fdt_totalsize(config_fdt);
-
- /* Round size up to 4KB boundary, be sure to add 4 bytes for CRC32 */
- int fdt_space = (fdt_size + 4 + 0xfff) & -4096;
- /* First try 4MB - FDT size as this keeps the FDT in the 4MB secure space
- setup by ATF */
- void *fdt_ptr = bdk_phys_to_ptr(0x400000 - fdt_space);
- if (!__bdk_is_dram_enabled(node))
- {
- /* Address must be in L2 */
- int l2_size = bdk_l2c_get_cache_size_bytes(node);
- void *l2_ptr = bdk_phys_to_ptr(l2_size - fdt_space);
- if (l2_ptr < fdt_ptr)
- fdt_ptr = l2_ptr;
- if (fdt_ptr < end_ptr)
- {
- bdk_error("No room for FDT to pass to next binary\n");
- return 0;
- }
- }
- else
- {
- /* We have DRAM, make sure we're past the end of this image */
- if (fdt_ptr < end_ptr)
- fdt_ptr = end_ptr;
- }
- uint32_t crc32 = bdk_crc32(config_fdt, fdt_size, 0);
- fdt_move(config_fdt, fdt_ptr, fdt_size);
- /* CRC32 is stored in same endianness as FDT at the end */
- *(uint32_t *)((const char *)fdt_ptr + fdt_size) = cpu_to_fdt32(crc32);
- BDK_TRACE(FDT_OS, "Exported device tree to memory %p, size 0x%x, CRC32 %08x\n",
- fdt_ptr, fdt_size, crc32);
- return bdk_ptr_to_phys(fdt_ptr);
-}
-
-/**
- * Return a pointer to the device tree used for configuration
- *
- * @return FDT or NULL on failure
- */
-void* bdk_config_get_fdt(void)
-{
- return config_fdt;
-}
-
-/**
- * Set the device tree used for configuration
- *
- * @param fdt Device tree to use. Memory is assumed to be from malloc() and bdk_config takes
- * over ownership on success
- *
- * @return Zero on success, negative on failure
- */
-int bdk_config_set_fdt(void *fdt)
-{
- int offset = fdt_path_offset(fdt, "/cavium,bdk"); /* Find our node */
- if (offset < 0)
- return -1;
- free(config_fdt);
- config_fdt = fdt;
- config_node = offset;
- return 0;
-}
-
-/**
- * Write all default values to a FDT. Missing config items get defaults in the
- * BDK config, this function adds those defaults to the FDT. This way other code
- * gets the default value without needing special code.
- *
- * @param fdt FDT structure to fill defaults into
- *
- * @return Zero on success, negative on failure
- */
-int bdk_config_expand_defaults(void *fdt)
-{
- const struct fdt_property *prop;
-
- /* The best defaults may have changed while this image was running if DRAM
- is setup. Update the defaults before expanding them */
- config_set_defaults();
-
- int fdt_node = fdt_path_offset(fdt, "/cavium,bdk"); /* Find our node */
- if (fdt_node < 0)
- {
- bdk_error("Failed to find top node, FDT error %d: %s\n",
- fdt_node, fdt_strerror(fdt_node));
- return -1;
- }
-
- /* Loop through all configuration items */
- for (bdk_config_t cfg = 0; cfg < __BDK_CONFIG_END; cfg++)
- {
- /* Figure out the base name without and dot parameters */
- const char *name = config_info[cfg].format;
- const char *name_end = strchr(name, '.');
- int name_len;
- if (name_end)
- name_len = name_end - name;
- else
- name_len = strlen(name);
- /* Try and find the base name in the FDT */
- prop = fdt_get_property_namelen(fdt, fdt_node, name, name_len, NULL);
- /* If it wasn't found, then we need to add the default */
- if (prop == NULL)
- {
- /* Create a copy of the name for use in FDT calls */
- char temp_name[name_len + 1];
- memcpy(temp_name, name, name_len);
- temp_name[name_len] = 0;
- /* Call the correct FDT call based on the type */
- int status = 0;
- switch (config_info[cfg].ctype)
- {
- case BDK_CONFIG_TYPE_INT:
- {
- char temp_value[20];
- if (config_info[cfg].default_value < 10)
- snprintf(temp_value, sizeof(temp_value), "%ld", config_info[cfg].default_value);
- else
- snprintf(temp_value, sizeof(temp_value), "0x%lx", config_info[cfg].default_value);
- /* Store the default int value */
- status = fdt_setprop_string(fdt, fdt_node, temp_name, temp_value);
- break;
- }
- case BDK_CONFIG_TYPE_STR:
- /* Store the default string value, if present */
- if (config_info[cfg].default_value)
- {
- status = fdt_setprop_string(fdt, fdt_node, temp_name,
- (const char *)config_info[cfg].default_value);
- }
- break;
- case BDK_CONFIG_TYPE_STR_LIST:
- /* Do nothing, string list default to empty */
- break;
- case BDK_CONFIG_TYPE_BINARY:
- /* Do nothing, binary defaults to empty */
- break;
- }
- if (status < 0)
- {
- bdk_error("Failed to set default for %s, FDT error %d: %s\n",
- temp_name, status, fdt_strerror(status));
- return -1;
- }
- }
- }
- return 0;
-}
-
-/**
- * Some of the default config values can vary based on runtime parameters. This
- * function sets those default parameters. It must be run before anyone calls
- * bdk_config_get_*().
- */
-static void config_set_defaults(void)
-{
- bool isEmulation = bdk_is_platform(BDK_PLATFORM_EMULATOR);
- /* This is Cavium's OUI with the local admin bit. We will use this as a
- default as it won't collide with official addresses, but is sort of
- part of the Cavium range. The lower three bytes will be updated with
- the wafer info */
- uint64_t mac_address = 0x020fb7000000ull;
- /* Set the lower MAC address bits based on the chip manufacturing
- information. This should give reasonable MAC address defaults
- for production parts */
- if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
- {
- BDK_CSR_INIT(fus_dat0, bdk_numa_local(), BDK_MIO_FUS_DAT0);
- mac_address |= fus_dat0.u & 0xffffff;
- }
- else
- {
- mac_address |= bdk_fuse_read_range(bdk_numa_local(), BDK_FUS_FUSE_NUM_E_MFG_INFOX(0), 24);
- }
- config_info[BDK_CONFIG_MAC_ADDRESS].default_value = mac_address;
-
- /* Set the number of packet buffers */
- int num_packet_buffers = 4096;
- /* If DRAM is setup, allocate 8K buffers for 8 ports plus some slop */
- if (__bdk_is_dram_enabled(bdk_numa_master()))
- num_packet_buffers = 8192 * 16 + 1024;
- else if (isEmulation) {
- if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
- num_packet_buffers = 4096 * 4;
- }
- config_info[BDK_CONFIG_NUM_PACKET_BUFFERS].default_value = num_packet_buffers;
- config_info[BDK_CONFIG_PACKET_BUFFER_SIZE].default_value = 1024;
-
- /* Asim doesn't scale to 48 cores well. Limit to 4 */
- if (bdk_is_platform(BDK_PLATFORM_ASIM))
- config_info[BDK_CONFIG_COREMASK].default_value = 0xf;
- /* CN88XX pass 1.x doesn't support EA */
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
- config_info[BDK_CONFIG_PCIE_EA].default_value = 0;
- /* Emulator only supports 4 cores */
- if (isEmulation)
- config_info[BDK_CONFIG_COREMASK].default_value = 0xf;
-}
-
-/**
- * BDK configuration items are stored in a device tree so thay can be passed to
- * other software later. This function creates the initial empty device tree
- * used for BDK configuration items. The values will be populated as configuration
- * files are read from flash.
- */
-static void config_setup_fdt(void)
-{
- const int FDT_SIZE = 0x10000;
- config_fdt = calloc(1, FDT_SIZE);
- if (!config_fdt)
- bdk_fatal("Unable to allocate memory for config FDT\n");
- if (fdt_create_empty_tree(config_fdt, FDT_SIZE) < 0)
- bdk_fatal("Unable to create FDT for config\n");
- config_node = fdt_add_subnode(config_fdt, 0, "cavium,bdk");
- if (config_node < 0)
- bdk_fatal("Unable to create cavium,bdk node in FDT\n");
-}
-
-/**
- * Parse a FDT and copy its properties to our configuration FDT
- *
- * @param fdt FDT to parse
- */
-static int config_parse_fdt(const void *fdt, const char *base_path)
-{
- /* Check the FDT header */
- int result = fdt_check_header(fdt);
- if (result)
- goto fail;
-
- /* Find our node */
- result = fdt_path_offset(fdt, base_path);
- if (result < 0)
- goto fail;
-
- /* Copy all parameters to our in memory FDT */
- int offset = fdt_first_property_offset(fdt, result);
- while (offset >= 0)
- {
- const char *name = NULL;
- int blob_size = 0;
- const char *data = fdt_getprop_by_offset(fdt, offset, &name, &blob_size);
- result = fdt_setprop(config_fdt, config_node, name, data, blob_size);
- offset = fdt_next_property_offset(fdt, offset);
- }
- return 0;
-fail:
- bdk_error("FDT error %d: %s\n", result, fdt_strerror(result));
- return -1;
-}
-
-/**
- * Load a FDT from a file and pull in its configuration properties
- *
- * @param filename File to read from
- * @param offset Offset into the file to read from
- *
- * @return Zero on success, negative on failure
- */
-static int config_load_file(const char *filename, uint64_t offset)
-{
- uint64_t ftd_size = 0;
- bdk_signed_flags_t sign_flags = BDK_SIGNED_FLAG_NONE;
- if (offset)
- sign_flags = BDK_SIGNED_FLAG_ALLOW_UNSIGNED | BDK_SIGNED_FLAG_NOT_ENCRYPTED;
- void *fdt = bdk_signed_load(filename, offset, BDK_SIGNED_DTS, sign_flags, &ftd_size);
- if (!fdt)
- return -1;
-
- /* Make sure the read succeeded */
- if (ftd_size < (int)sizeof(struct fdt_header))
- {
- bdk_error("Invalid device tee %s\n", filename);
- free(fdt);
- return -1;
- }
-
- if (fdt_check_header(fdt))
- {
- bdk_error("Invalid FDT header read from %s\n", filename);
- free(fdt);
- return -1;
- }
-
- /* Make sure we read enough data to contain the FDT */
- int correct_size = fdt_totalsize(fdt);
- if ((int)ftd_size < correct_size)
- {
- bdk_error("Unable to read FDT from %s\n", filename);
- free(fdt);
- return -1;
- }
-
- /* Check if a CRC32 was added on the end of the FDT */
- if ((int)ftd_size >= correct_size + 4)
- {
- uint32_t crc32 = bdk_crc32(fdt, correct_size, 0);
- uint32_t correct_crc32 = *(uint32_t *)((const char *)fdt + correct_size);
- /* CRC32 is stored in same endianness as FDT */
- correct_crc32 = fdt32_to_cpu(correct_crc32);
- if (crc32 != correct_crc32)
- {
- bdk_error("FDT failed CRC32 verification (%s)\n", filename);
- free(fdt);
- return -1;
- }
- //printf("PASS: FDT CRC32 verification (%s)\n", filename);
- }
-
- /* Parse the device tree, adding its configuration to ours */
- if (config_parse_fdt(fdt, "/cavium,bdk"))
- {
- free(fdt);
- return -1;
- }
-
- free(fdt);
- return 0;
-}
-
-/**
- * Internal BDK function to initialize the config system. Must be called before
- * any configuration functions are called
- */
-void __bdk_config_init(void)
-{
- bool done_trust_init = false;
- /* Set default that can vary dynamically at runtime */
- config_set_defaults();
-
- /* Regsiter X1 is expected to be a device tree when we boot. Check that
- the physical address seems correct, then load the device tree */
- if ((__bdk_init_reg_x1 > 0) && /* Not zero */
- (__bdk_init_reg_x1 < 0x1000000) && /* In the lower 16MB */
- ((__bdk_init_reg_x1 & 0xfff) == 0)) /* Aligned on a 4KB boundary */
- {
- const void *fdt = (const void *)__bdk_init_reg_x1;
- /* Check the FDT header */
- int result = fdt_check_header(fdt);
- if (result)
- result = -1; /* Invalid tree */
- else
- {
- int fdt_size = fdt_totalsize(fdt);
- uint32_t crc32 = bdk_crc32(fdt, fdt_size, 0);
- uint32_t correct_crc32 = *(uint32_t *)((const char *)fdt + fdt_size);
- /* CRC32 is stored in same endianness as FDT */
- correct_crc32 = fdt32_to_cpu(correct_crc32);
- if (crc32 == correct_crc32)
- {
- //printf("Previous image FDT passed CRC32 verification(%p, size 0x%x, CRC32 %08x)\n", fdt, fdt_size, crc32);
- result = fdt_path_offset(fdt, "/cavium,bdk"); /* Find our node */
- }
- else
- {
- bdk_error("Previous image FDT failed CRC32 verification(%p, size 0x%x)\n", fdt, fdt_size);
- result = -1; /* Invalid tree */
- }
- }
- /* If tree is valid so far, attempt to move it into our memory space */
- if (result > 0)
- {
- /* 4KB extra room for growth */
- const int fdt_size = fdt_totalsize(fdt) + 4096;
- config_fdt = calloc(1, fdt_size);
- if (config_fdt)
- {
- int result = fdt_move(fdt, config_fdt, fdt_size);
- if (result == 0)
- {
- /* Find our node */
- config_node = fdt_path_offset(config_fdt, "/cavium,bdk");
- if (config_node > 0)
- {
- printf("Using configuration from previous image\n");
- goto done;
- }
- else
- {
- bdk_error("Unable to find BDK node after move\n");
- free(config_fdt);
- config_node = 0;
- config_fdt = NULL;
- }
- }
- else
- {
- bdk_error("Unable to move passed device tree\n");
- free(config_fdt);
- config_fdt = NULL;
- }
- }
- else
- bdk_error("Failed to allocate memory for passed device tree (%d bytes)\n", fdt_size);
- }
- }
-
- /* Create the global device tree used to store config items */
- config_setup_fdt();
- /* Setup trust level so reading device trees works */
- __bdk_trust_init();
- done_trust_init = true;
-
- if (bdk_is_platform(BDK_PLATFORM_ASIM))
- {
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
- bdk_config_set_str("ASIM-CN88XX", BDK_CONFIG_BOARD_MODEL);
- else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
- bdk_config_set_str("ASIM-CN83XX", BDK_CONFIG_BOARD_MODEL);
- else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
- bdk_config_set_str("ASIM-CN81XX", BDK_CONFIG_BOARD_MODEL);
- else if (CAVIUM_IS_MODEL(CAVIUM_CN93XX))
- bdk_config_set_str("ASIM-CN93XX", BDK_CONFIG_BOARD_MODEL);
- }
- else if (bdk_is_platform(BDK_PLATFORM_EMULATOR))
- {
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
- bdk_config_set_str("EMUL-CN88XX", BDK_CONFIG_BOARD_MODEL);
- else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
- bdk_config_set_str("EMUL-CN83XX", BDK_CONFIG_BOARD_MODEL);
- else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
- bdk_config_set_str("EMUL-CN81XX", BDK_CONFIG_BOARD_MODEL);
- else if (CAVIUM_IS_MODEL(CAVIUM_CN93XX))
- bdk_config_set_str("EMUL-CN93XX", BDK_CONFIG_BOARD_MODEL);
- }
- else if (config_load_file("/rom/boardcfg.dtb", 0) == 0)
- {
- printf("Board manufacturing information loaded from ROM-FS\n");
- }
- /* Load manufacturing data from the top 64KB of flash */
- else if (config_load_file("/boot", BDK_CONFIG_MANUFACTURING_ADDRESS) != 0)
- {
- printf("\33[1m"); /* Bold */
- bdk_warn("\n");
- bdk_warn("********************************************************\n");
- bdk_warn("* Board manufacturing information not found. Program\n");
- bdk_warn("* the board manufacturing information in the Setup menu.\n");
- bdk_warn("********************************************************\n");
- bdk_warn("\n");
- printf("\33[0m"); /* Normal */
- goto done;
- }
-
- const char *model = bdk_config_get_str(BDK_CONFIG_BOARD_MODEL);
- const char *revision = bdk_config_get_str(BDK_CONFIG_BOARD_REVISION);
-
- /* Load BOARD-REVISION.cfg if it is on ROM-FS */
- if (model && revision)
- {
- char filename[64];
- snprintf(filename, sizeof(filename), "/rom/%s-%s.dtb", model, revision);
- if (config_load_file(filename, 0) == 0)
- goto done;
- }
-
- /* Load BOARD.cfg if it is on ROM-FS */
- if (model)
- {
- char filename[64];
- snprintf(filename, sizeof(filename), "/rom/%s.dtb", model);
- if (config_load_file(filename, 0) == 0)
- goto done;
- }
-
- /* Load default.dtb if it is there */
- if (config_load_file("/fatfs/default.dtb", 0) == 0)
- goto done;
-
- /* Load BOARD-REVISION.cfg if it is there */
- if (model && revision)
- {
- char filename[64];
- snprintf(filename, sizeof(filename), "/fatfs/%s-%s.dtb", model, revision);
- if (config_load_file(filename, 0) == 0)
- goto done;
- }
-
- /* Load BOARD.cfg if it is there */
- if (model)
- {
- char filename[64];
- snprintf(filename, sizeof(filename), "/fatfs/%s.dtb", model);
- if (config_load_file(filename, 0) == 0)
- goto done;
- }
-
- /* No board specific configuration was found. Warn the user */
- printf("\33[1m"); /* Bold */
- bdk_warn("\n");
- bdk_warn("********************************************************\n");
- bdk_warn("* Board configuration file not found. Either the board\n");
- bdk_warn("* model is incorrect, or factory settings are not\n");
- bdk_warn("* available. DTB file not found for board \"%s\".\n", model);
- bdk_warn("********************************************************\n");
- bdk_warn("\n");
- printf("\33[0m"); /* Normal */
-
-done:
- bdk_config_set_str(bdk_version_string(), BDK_CONFIG_VERSION);
- /* Load the tracing level */
- bdk_trace_enables = bdk_config_get_int(BDK_CONFIG_TRACE);
- if (BDK_TRACE_OVERRIDE)
- bdk_trace_enables = BDK_TRACE_OVERRIDE;
- if (!done_trust_init)
- __bdk_trust_init();
-}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-ecam-io.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-ecam-io.c
new file mode 100644
index 0000000000..8120820626
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-ecam-io.c
@@ -0,0 +1,373 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-ecam.h"
+#include "libbdk-arch/bdk-csrs-gser.h"
+#include "libbdk-arch/bdk-csrs-pccpf.h"
+#include "libbdk-arch/bdk-csrs-pem.h"
+#include "libbdk-hal/device/bdk-device.h"
+#include "libbdk-hal/bdk-ecam.h"
+
+#if 1 /* Support CN88XX pass 1.0 */
+/*******************************************************************
+ *******************************************************************
+ These functions are related to CN88XX pass 1.0 errata and do not
+ apply to any other chip
+ *******************************************************************
+ *******************************************************************/
+
+/**
+ * Errata (ECAM-22630) ECAM function accesses can fault
+ * For some errata workaround we need a check to tell if a ECAM access is to a
+ * valid intenral device. This function decodes a pcc_dev_con_e enumeration and
+ * checks if the supplied arguments match it. This should only
+ * ever be called on CN88XX pass 1.0.
+ *
+ * @param ecam ECAM to check
+ * @param bus ECAM bus number
+ * @param dev Device to check
+ * @param fn sub function of device
+ * @param dev_con Enumeration to match against
+ *
+ * @return Non zero if the device matches
+ */
+static int is_internal_cn88xxp1_0(const bdk_device_t *device, int dev_con)
+{
+ union bdk_pcc_dev_con_s d = { .u = dev_con };
+ return (d.cn8.ecam == device->ecam) && (d.s.bus == device->bus) && (d.s.func == ((device->dev<<3)|device->func));
+}
+
+/**
+ * Errata (ECAM-22630) ECAM function accesses can fault
+ * This is a companion to the function above to determine if the ECAM device is
+ * any of the valid internal devices. This should only ever be
+ * called on CN88XX pass 1.0.
+ *
+ * @param ecam ECAM to check
+ * @param bus ECAM bus number
+ * @param dev Device to check
+ * @param fn sub function of device
+ *
+ * @return Non zero if the device matches
+ */
+static int is_any_internal_cn88xxp1_0(const bdk_device_t *device)
+{
+ /* Errata (ECAM-22630) ECAM function accesses can fault
+ CN88XXP1.0: The ECAM has a bug where accessing a non-existent
+ device causes an exception. This is a list of all valid devices
+ for CN88XX pass 1.0 */
+ static const uint32_t INTERNAL_DEVICES_CN88XXP1_0[] = {
+ BDK_PCC_DEV_CON_E_BGXX(0),
+ BDK_PCC_DEV_CON_E_BGXX(1),
+ BDK_PCC_DEV_CON_E_DAP,
+ BDK_PCC_DEV_CON_E_DFA,
+ BDK_PCC_DEV_CON_E_FUSF,
+ BDK_PCC_DEV_CON_E_GIC_CN8,
+ BDK_PCC_DEV_CON_E_GPIO_CN8,
+ BDK_PCC_DEV_CON_E_GSERX(0),
+ BDK_PCC_DEV_CON_E_GSERX(1),
+ BDK_PCC_DEV_CON_E_GSERX(10),
+ BDK_PCC_DEV_CON_E_GSERX(11),
+ BDK_PCC_DEV_CON_E_GSERX(12),
+ BDK_PCC_DEV_CON_E_GSERX(13),
+ BDK_PCC_DEV_CON_E_GSERX(2),
+ BDK_PCC_DEV_CON_E_GSERX(3),
+ BDK_PCC_DEV_CON_E_GSERX(4),
+ BDK_PCC_DEV_CON_E_GSERX(5),
+ BDK_PCC_DEV_CON_E_GSERX(6),
+ BDK_PCC_DEV_CON_E_GSERX(7),
+ BDK_PCC_DEV_CON_E_GSERX(8),
+ BDK_PCC_DEV_CON_E_GSERX(9),
+ BDK_PCC_DEV_CON_E_GTI_CN8,
+ BDK_PCC_DEV_CON_E_IOBNX(0),
+ BDK_PCC_DEV_CON_E_IOBNX(1),
+ BDK_PCC_DEV_CON_E_KEY,
+ BDK_PCC_DEV_CON_E_L2C,
+ BDK_PCC_DEV_CON_E_L2C_CBCX(0),
+ BDK_PCC_DEV_CON_E_L2C_CBCX(1),
+ BDK_PCC_DEV_CON_E_L2C_CBCX(2),
+ BDK_PCC_DEV_CON_E_L2C_CBCX(3),
+ BDK_PCC_DEV_CON_E_L2C_MCIX(0),
+ BDK_PCC_DEV_CON_E_L2C_MCIX(1),
+ BDK_PCC_DEV_CON_E_L2C_MCIX(2),
+ BDK_PCC_DEV_CON_E_L2C_MCIX(3),
+ BDK_PCC_DEV_CON_E_L2C_TADX(0),
+ BDK_PCC_DEV_CON_E_L2C_TADX(1),
+ BDK_PCC_DEV_CON_E_L2C_TADX(2),
+ BDK_PCC_DEV_CON_E_L2C_TADX(3),
+ BDK_PCC_DEV_CON_E_L2C_TADX(4),
+ BDK_PCC_DEV_CON_E_L2C_TADX(5),
+ BDK_PCC_DEV_CON_E_L2C_TADX(6),
+ BDK_PCC_DEV_CON_E_L2C_TADX(7),
+ BDK_PCC_DEV_CON_E_LMCX(0),
+ BDK_PCC_DEV_CON_E_LMCX(1),
+ BDK_PCC_DEV_CON_E_LMCX(2),
+ BDK_PCC_DEV_CON_E_LMCX(3),
+ BDK_PCC_DEV_CON_E_MIO_BOOT,
+ BDK_PCC_DEV_CON_E_MIO_EMM,
+ BDK_PCC_DEV_CON_E_MIO_FUS,
+ BDK_PCC_DEV_CON_E_MIO_PTP,
+ BDK_PCC_DEV_CON_E_MIO_TWSX(0),
+ BDK_PCC_DEV_CON_E_MIO_TWSX(1),
+ BDK_PCC_DEV_CON_E_MIO_TWSX(2),
+ BDK_PCC_DEV_CON_E_MIO_TWSX(3),
+ BDK_PCC_DEV_CON_E_MIO_TWSX(4),
+ BDK_PCC_DEV_CON_E_MIO_TWSX(5),
+ BDK_PCC_DEV_CON_E_MPI,
+ BDK_PCC_DEV_CON_E_MRML,
+ BDK_PCC_DEV_CON_E_NCSI,
+ BDK_PCC_DEV_CON_E_NIC_CN88XX,
+ BDK_PCC_DEV_CON_E_OCLAX_CN8(0),
+ BDK_PCC_DEV_CON_E_OCLAX_CN8(1),
+ BDK_PCC_DEV_CON_E_OCLAX_CN8(2),
+ BDK_PCC_DEV_CON_E_OCLAX_CN8(3),
+ BDK_PCC_DEV_CON_E_OCLAX_CN8(4),
+ BDK_PCC_DEV_CON_E_OCX,
+ BDK_PCC_DEV_CON_E_PCCBR_DFA,
+ BDK_PCC_DEV_CON_E_PCCBR_MRML,
+ BDK_PCC_DEV_CON_E_PCCBR_NIC_CN88XX,
+ BDK_PCC_DEV_CON_E_PCCBR_RAD_CN88XX,
+ BDK_PCC_DEV_CON_E_PCCBR_ZIP_CN88XX,
+ BDK_PCC_DEV_CON_E_PCIERC0_CN88XX,
+ BDK_PCC_DEV_CON_E_PCIERC1_CN88XX,
+ BDK_PCC_DEV_CON_E_PCIERC2_CN88XX,
+ BDK_PCC_DEV_CON_E_PCIERC3_CN88XX,
+ BDK_PCC_DEV_CON_E_PCIERC4,
+ BDK_PCC_DEV_CON_E_PCIERC5,
+ BDK_PCC_DEV_CON_E_PEMX(0),
+ BDK_PCC_DEV_CON_E_PEMX(1),
+ BDK_PCC_DEV_CON_E_PEMX(2),
+ BDK_PCC_DEV_CON_E_PEMX(3),
+ BDK_PCC_DEV_CON_E_PEMX(4),
+ BDK_PCC_DEV_CON_E_PEMX(5),
+ BDK_PCC_DEV_CON_E_RAD_CN88XX,
+ BDK_PCC_DEV_CON_E_RNM_CN88XX,
+ BDK_PCC_DEV_CON_E_RST,
+ BDK_PCC_DEV_CON_E_SATA0_CN88XX,
+ BDK_PCC_DEV_CON_E_SATA1_CN88XX,
+ BDK_PCC_DEV_CON_E_SATA10,
+ BDK_PCC_DEV_CON_E_SATA11,
+ BDK_PCC_DEV_CON_E_SATA12,
+ BDK_PCC_DEV_CON_E_SATA13,
+ BDK_PCC_DEV_CON_E_SATA14,
+ BDK_PCC_DEV_CON_E_SATA15,
+ BDK_PCC_DEV_CON_E_SATA2,
+ BDK_PCC_DEV_CON_E_SATA3,
+ BDK_PCC_DEV_CON_E_SATA4,
+ BDK_PCC_DEV_CON_E_SATA5,
+ BDK_PCC_DEV_CON_E_SATA6,
+ BDK_PCC_DEV_CON_E_SATA7,
+ BDK_PCC_DEV_CON_E_SATA8,
+ BDK_PCC_DEV_CON_E_SATA9,
+ BDK_PCC_DEV_CON_E_SGP,
+ BDK_PCC_DEV_CON_E_SLI0_CN88XX,
+ BDK_PCC_DEV_CON_E_SLI1,
+ BDK_PCC_DEV_CON_E_SMI,
+ BDK_PCC_DEV_CON_E_SMMU0_CN8,
+ BDK_PCC_DEV_CON_E_SMMU1,
+ BDK_PCC_DEV_CON_E_SMMU2,
+ BDK_PCC_DEV_CON_E_SMMU3,
+ BDK_PCC_DEV_CON_E_TNS,
+ BDK_PCC_DEV_CON_E_UAAX_CN8(0),
+ BDK_PCC_DEV_CON_E_UAAX_CN8(1),
+ BDK_PCC_DEV_CON_E_USBHX(0),
+ BDK_PCC_DEV_CON_E_USBHX(1),
+ BDK_PCC_DEV_CON_E_VRMX(0),
+ BDK_PCC_DEV_CON_E_VRMX(1),
+ BDK_PCC_DEV_CON_E_ZIP_CN88XX,
+ 0,
+ };
+
+ int loc = 0;
+ while (INTERNAL_DEVICES_CN88XXP1_0[loc])
+ {
+ if (is_internal_cn88xxp1_0(device, INTERNAL_DEVICES_CN88XXP1_0[loc]))
+ return 1;
+ loc++;
+ }
+ return 0;
+}
+
+static int is_accessable_cn88xxp1_0(const bdk_device_t *device)
+{
+ /* Errata (ECAM-22630) ECAM function accesses can fault */
+ /* Skip internal devices that don't exists */
+ if (!is_any_internal_cn88xxp1_0(device))
+ return 0;
+
+ /* Errata (ECAM-23020) PCIERC transactions fault unless PEM is
+ out of reset. The PCIe ports don't work until the PEM is
+ turned on. Check for one of the PCIe ports */
+ int pem = -1;
+ if (is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_PCIERC0_CN88XX))
+ pem = 0;
+ if (is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_PCIERC1_CN88XX))
+ pem = 1;
+ if (is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_PCIERC2_CN88XX))
+ pem = 2;
+ if (is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_PCIERC3_CN88XX))
+ pem = 3;
+ if (is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_PCIERC4))
+ pem = 4;
+ if (is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_PCIERC5))
+ pem = 5;
+ if (pem != -1)
+ {
+ BDK_CSR_INIT(pem_on, device->node, BDK_PEMX_ON(pem));
+ if (!pem_on.s.pemon || !pem_on.s.pemoor)
+ return 0;
+ }
+
+ {
+ /* SATA ports should be hidden if they aren't configured at the QLM */
+ int qlm = -1;
+ if (is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA0_CN88XX) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA1_CN88XX) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA2) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA3))
+ qlm = 2;
+ if (is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA4) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA5) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA6) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA7))
+ qlm = 3;
+ if (is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA8) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA9) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA10) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA11))
+ qlm = 6;
+ if (is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA12) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA13) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA14) ||
+ is_internal_cn88xxp1_0(device, BDK_PCC_DEV_CON_E_SATA15))
+ qlm = 7;
+ if (qlm != -1)
+ {
+ BDK_CSR_INIT(cfg, device->node, BDK_GSERX_CFG(qlm));
+ if (!cfg.s.sata)
+ return 0;
+ }
+ }
+ return 1;
+}
+
+#endif /* Support CN88XX pass 1.0 */
+
+/**
+ * Build an ECAM config space request address for a device
+ *
+ * @param device Device being accessed
+ * @param reg Register to access
+ *
+ * @return 64bit IO address
+ */
+uint64_t __bdk_ecam_build_address(const bdk_device_t *device, int reg)
+{
+ /* CN88XX pass 1.0 had a plethora of errata related to ECAM access. This
+ checks to make sure we're allowed to access this location based on
+ the various errata */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_0) && !is_accessable_cn88xxp1_0(device))
+ return 0;
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ /* Build the address */
+ union bdk_ecam_cfg_addr_s address;
+ address.u = BDK_ECAM_BAR_E_ECAMX_PF_BAR2(device->ecam);
+ address.s.node = device->node;
+ address.s.bus = device->bus;
+ address.s.func = device->dev << 3 | device->func;
+ address.s.addr = reg;
+ return address.u;
+ }
+ else
+ {
+ /* Build the address. The architects decided to make it different
+ from CN8XXX for no obvious reason */
+ union bdk_ecam_cfg_addr_s address;
+ address.u = BDK_ECAM_BAR_E_ECAMX_PF_BAR2(0);
+ address.s.node = device->node;
+ address.s.dmn = device->ecam;
+ address.s.bus = device->bus;
+ address.s.func = device->dev << 3 | device->func;
+ address.s.addr = reg;
+ return address.u;
+ }
+}
+
+/**
+ * Read from an ECAM
+ *
+ * @param device Device to read from
+ * @param reg Register to read
+ *
+ * @return Result of the read of -1 on failure
+ */
+uint32_t bdk_ecam_read32(const bdk_device_t *device, int reg)
+{
+ uint64_t address = __bdk_ecam_build_address(device, reg);
+ uint32_t result;
+ if (address)
+ result = bdk_le32_to_cpu(bdk_read64_uint32(address));
+ else
+ result = 0xffffffff;
+
+ /* Errata ECAM-22630: CN88XX pass 1.x, except pass 1.0, will return zero
+ for non-existent devices instead of ones. We look for this special case
+ for 32bit reads for reg=0 so we can scan device properly */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (reg == 0) && (result == 0))
+ result = 0xffffffff;
+
+ return result;
+}
+
+/**
+ * Write to an ECAM register
+ *
+ * @param device Device to write to
+ * @param reg Register to write
+ * @param value Value to write
+ */
+void bdk_ecam_write32(const bdk_device_t *device, int reg, uint32_t value)
+{
+ uint64_t address = __bdk_ecam_build_address(device, reg);
+ if (address)
+ bdk_write64_uint32(address, bdk_cpu_to_le32(value));
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-ecam.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-ecam.c
new file mode 100644
index 0000000000..f79eb587f4
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-ecam.c
@@ -0,0 +1,216 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <string.h>
+#include "libbdk-arch/bdk-csrs-ecam.h"
+#include "libbdk-arch/bdk-csrs-pccbr.h"
+#include "libbdk-arch/bdk-csrs-pccpf.h"
+#include "libbdk-arch/bdk-csrs-rvu.h"
+#include "libbdk-hal/device/bdk-device.h"
+#include "libbdk-hal/bdk-ecam.h"
+
+/* This code is an optional part of the BDK. It is only linked in
+ if BDK_REQUIRE() needs it */
+BDK_REQUIRE_DEFINE(ECAM);
+
+/**
+ * Walk an ECAM finding all internal devices. Each internal
+ * device is then added to the list of device maintained by
+ * bdk-device.
+ *
+ * @param node Node to walk
+ * @param ecam Ecam to walk
+ * @param bus Zero on first call. Will be non-zero when sub busses are walked
+ */
+static void ecam_walk_internal_bus(bdk_node_t node, int ecam, int bus)
+{
+ /* Create a fake bdk-device to pass around until we create the
+ real device */
+ bdk_device_t device;
+ memset(&device, 0, sizeof(device));
+ device.node = node;
+ device.ecam = ecam;
+ device.bus = bus;
+
+ /* Scan all possible device IDs on the bus */
+ for (int dev = 0; dev < 32; dev++)
+ {
+ /* Update the current scan location */
+ device.dev = dev;
+ device.func = 0;
+
+ uint32_t device_id = bdk_ecam_read32(&device, BDK_PCCPF_XXX_ID);
+
+ /* Only add devices that exist. Our internal devices can have function
+ zero missing. The all ones we get back matches the multi-function
+ check, but not a bridge. This means the later code works fine */
+ if (device_id != (uint32_t)-1)
+ bdk_device_add(device.node, device.ecam, device.bus, device.dev, device.func);
+
+ /* Check for Multi function and Bridge devices */
+ BDK_CSR_DEFINE(clsize, BDK_PCCPF_XXX_CLSIZE);
+ clsize.u = bdk_ecam_read32(&device, BDK_PCCPF_XXX_CLSIZE);
+ int ismultifunction = (clsize.s.hdrtype & 0x80);
+ int isbridge = (clsize.s.hdrtype & 0x7f) == 1;
+
+ if (ismultifunction)
+ {
+ /* Scan for other functions on multifunction devices */
+ for (int func = 1; func < 8; func++)
+ {
+ /* Check if we're past all functions */
+ device.func = func;
+ device_id = bdk_ecam_read32(&device, BDK_PCCPF_XXX_ID);
+ if (device_id != (uint32_t)-1)
+ bdk_device_add(device.node, device.ecam, device.bus, device.dev, device.func);
+ }
+ device.func = 0;
+ }
+ if (isbridge)
+ {
+ /* Internal bus numbers are hard coded. Read the bus ID */
+ bdk_pccbr_xxx_bus_t ibus;
+ ibus.u = bdk_ecam_read32(&device, BDK_PCCBR_XXX_BUS);
+ /* Asim used to have a bug where bus number were zero, report errors
+ for those */
+ if (ibus.s.sbnum == 0)
+ {
+ bdk_error("N%d:E%d:%d:%d.%d: Secondary bus number is zero\n",
+ device.node, device.ecam, device.bus, device.dev, device.func);
+ }
+ /* Real PCIe external device use high bus numbers, so skip them */
+ else if (ibus.s.sbnum < 16)
+ {
+ ecam_walk_internal_bus(node, ecam, ibus.s.sbnum);
+ }
+ }
+ }
+}
+
+/**
+ * Return the number of internal ECAMS on a node.
+ *
+ * @param node Node to query
+ *
+ * @return Number of ECAMs available
+ */
+int bdk_ecam_get_num(bdk_node_t node)
+{
+ /* CN88XX lacks the ECAM_CONST for finding the number of ECAMs */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 4;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN93XX))
+ return 3; /* Map ECAMs to the first 3 domains */
+ else
+ {
+ BDK_CSR_INIT(ecam_const, node, BDK_ECAMX_CONST(0));
+ if (ecam_const.s.ecams == 0)
+ {
+ bdk_error("N%d.ECAM: Number of ecams incorrect in ECAMX_CONST\n", node);
+ return 1;
+ }
+ return ecam_const.s.ecams;
+ }
+}
+
+/**
+ * Initialize RVU functions for use by the BDK. This doesn't setup the hardware
+ * behind RVU, juse allows register access to it. The BDK uses a static RVU
+ * configuration where everything is accessable from RVU PF0.
+ *
+ * @param node Node to initialize
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __bdk_ecam_rvu_init(bdk_node_t node)
+{
+ const int rvu_pf = 0;
+ /* Enable PF access to all blocks */
+ BDK_CSR_MODIFY(c, node, BDK_RVU_PRIV_PFX_CPTX_CFG(rvu_pf, 0),
+ c.s.num_lfs = 1); // FIXME: How many LFs?
+ BDK_CSR_MODIFY(c, node, BDK_RVU_PRIV_PFX_INT_CFG(rvu_pf),
+ c.s.msix_offset = 0);
+ BDK_CSR_MODIFY(c, node, BDK_RVU_PRIV_PFX_MSIX_CFG(rvu_pf),
+ c.s.pf_msixt_offset = 0;
+ c.s.pf_msixt_sizem1 = 0;
+ c.s.vf_msixt_offset = 0;
+ c.s.vf_msixt_sizem1 = 0);
+ BDK_CSR_MODIFY(c, node, BDK_RVU_PRIV_PFX_NIXX_CFG(rvu_pf, 0),
+ c.s.has_lf = 1);
+ BDK_CSR_MODIFY(c, node, BDK_RVU_PRIV_PFX_NPA_CFG(rvu_pf),
+ c.s.has_lf = 1);
+ BDK_CSR_MODIFY(c, node, BDK_RVU_PRIV_PFX_SSO_CFG(rvu_pf),
+ c.s.num_lfs = 1); // FIXME: How many LFs?
+ BDK_CSR_MODIFY(c, node, BDK_RVU_PRIV_PFX_SSOW_CFG(rvu_pf),
+ c.s.num_lfs = 1); // FIXME: How many LFs?
+ BDK_CSR_MODIFY(c, node, BDK_RVU_PRIV_PFX_TIM_CFG(rvu_pf),
+ c.s.num_lfs = 1); // FIXME: How many LFs?
+ /* Enable RVU with full access */
+ BDK_CSR_MODIFY(c, node, BDK_RVU_PRIV_PFX_CFG(rvu_pf),
+ c.s.me_flr_ena = 1;
+ c.s.af_ena = 1;
+ c.s.ena = 1;
+ c.s.nvf = 0;
+ c.s.first_hwvf = 0);
+ return 0;
+}
+
+/**
+ * Scan all ECAMs for devices and add them to bdk-device
+ *
+ * @param node Node to scan
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_ecam_scan_all(bdk_node_t node)
+{
+ /* RVU must be setup before we scan the bus otherwise it doesn't
+ show up */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ __bdk_ecam_rvu_init(node);
+
+ int num_ecams = bdk_ecam_get_num(node);
+ for (int ecam = 0; ecam < num_ecams; ecam++)
+ ecam_walk_internal_bus(node, ecam, 0);
+
+ bdk_device_init();
+
+ return 0;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-gpio.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-gpio.c
index 55f0dbf3f2..986f68fac3 100644
--- a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-gpio.c
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-gpio.c
@@ -38,6 +38,7 @@
***********************license end**************************************/
#include <bdk.h>
#include "libbdk-arch/bdk-csrs-gpio.h"
+#include "libbdk-hal/bdk-gpio.h"
/* This code is an optional part of the BDK. It is only linked in
if BDK_REQUIRE() needs it */
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-l2c.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-l2c.c
index b1e2a88ce1..6c163da2d4 100644
--- a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-l2c.c
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-l2c.c
@@ -41,6 +41,8 @@
#include "libbdk-arch/bdk-csrs-l2c.h"
#include "libbdk-arch/bdk-csrs-l2c_cbc.h"
#include "libbdk-arch/bdk-csrs-mio_fus.h"
+#include "libbdk-hal/bdk-l2c.h"
+#include "libbdk-hal/bdk-utils.h"
typedef struct
{
@@ -51,56 +53,6 @@ typedef struct
static l2_node_state_t l2_node_state[BDK_NUMA_MAX_NODES];
-/**
- * Perform one time initialization of L2 for improved
- * performance. This can be called after L2 is in use.
- *
- * @return Zero on success, negative on failure.
- */
-int bdk_l2c_initialize(bdk_node_t node)
-{
- if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
- {
- /* Tell L2 to give the IOB statically higher priority compared to the
- cores. This avoids conditions where IO blocks might be starved under
- very high L2 loads */
- BDK_CSR_MODIFY(c, node, BDK_L2C_CTL,
- c.s.rsp_arb_mode = 1;
- c.s.xmc_arb_mode = 0);
- }
-
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && !bdk_is_platform(BDK_PLATFORM_ASIM))
- {
- /* Errata: (L2C-22279) RCAS/RSTC which hits S/S can use wrong compare data */
- BDK_CSR_MODIFY(c, node, BDK_L2C_CTL,
- c.s.dissblkdty = 1);
- /* Errata: (L2C-22249) Broadcast invals can cause starvation on the INV bus */
- for (int i = 0; i < 4; i++)
- BDK_CSR_MODIFY(c, node, BDK_L2C_CBCX_SCRATCH(i),
- c.s.invdly = 1);
- }
-
- // FIXME: Disable partial writes on pass 2 until it is debugged
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && !bdk_is_platform(BDK_PLATFORM_ASIM))
- {
- BDK_CSR_MODIFY(c, node, BDK_L2C_CTL,
- c.s.dissblkdty = 1);
- }
-
- if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && bdk_is_platform(BDK_PLATFORM_EMULATOR))
- {
- /* The emulator requires L2C_CTL[DISSBLKDTY] to be set */
- BDK_CSR_MODIFY(c, node, BDK_L2C_CTL,
- c.s.dissblkdty = 1);
- }
- return 0;
-}
-
-int bdk_l2c_get_core_way_partition(bdk_node_t node, int core)
-{
- return (BDK_CSR_READ(node, BDK_L2C_WPAR_PPX(core)) & 0xffff);
-}
-
int bdk_l2c_set_core_way_partition(bdk_node_t node, int core, uint32_t mask)
{
uint32_t valid_mask = (1 << bdk_l2c_get_num_assoc(node)) - 1;
@@ -120,82 +72,11 @@ int bdk_l2c_set_hw_way_partition(bdk_node_t node, uint32_t mask)
return 0;
}
-
int bdk_l2c_get_hw_way_partition(bdk_node_t node)
{
return (BDK_CSR_READ(node, BDK_L2C_WPAR_IOBX(0)) & 0xffff);
}
-
-int bdk_l2c_lock_mem_region(bdk_node_t node, uint64_t start, uint64_t len)
-{
- /* Round start/end to cache line boundaries */
- len += start & BDK_CACHE_LINE_MASK;
- start &= ~BDK_CACHE_LINE_MASK;
- len = (len + BDK_CACHE_LINE_MASK) & ~BDK_CACHE_LINE_MASK;
- void *ptr = (start) ? bdk_phys_to_ptr(start) : NULL;
-
- while (len)
- {
- BDK_CACHE_LCK_L2(ptr);
- ptr += BDK_CACHE_LINE_SIZE;
- len -= BDK_CACHE_LINE_SIZE;
- }
- l2_node_state[node].is_locked = true;
- return 0;
-}
-
-void bdk_l2c_flush(bdk_node_t node)
-{
- /* The number of ways can be reduced with fuses, but the equations below
- assume the max number of ways */
- const int MAX_WAYS = 16;
- int num_sets = bdk_l2c_get_num_sets(node);
- int num_ways = bdk_l2c_get_num_assoc(node);
-
- int is_rtg = 1; /* Clear remote tags */
- for (int l2_way = 0; l2_way < num_ways; l2_way++)
- {
- for (int l2_set = 0; l2_set < num_sets; l2_set++)
- {
- uint64_t encoded = 128 * (l2_set + num_sets * (l2_way + (is_rtg * MAX_WAYS)));
- BDK_CACHE_WBI_L2_INDEXED(encoded);
- }
- }
-
- is_rtg = 0; /* Clear local tags */
- for (int l2_way = 0; l2_way < num_ways; l2_way++)
- {
- for (int l2_set = 0; l2_set < num_sets; l2_set++)
- {
- uint64_t encoded = 128 * (l2_set + num_sets * (l2_way + (is_rtg * MAX_WAYS)));
- BDK_CACHE_WBI_L2_INDEXED(encoded);
- }
- }
- l2_node_state[node].is_locked = false;
-}
-
-int bdk_l2c_unlock_mem_region(bdk_node_t node, uint64_t start, uint64_t len)
-{
- /* Round start/end to cache line boundaries */
- len += start & BDK_CACHE_LINE_MASK;
- start &= ~BDK_CACHE_LINE_MASK;
- len = (len + BDK_CACHE_LINE_MASK) & ~BDK_CACHE_LINE_MASK;
- void *ptr = (start) ? bdk_phys_to_ptr(start) : NULL;
-
- while (len > 0)
- {
- /* Must use invalidate version to release lock */
- BDK_CACHE_WBI_L2(ptr);
- ptr += BDK_CACHE_LINE_SIZE;
- len -= BDK_CACHE_LINE_SIZE;
- }
-
- l2_node_state[node].is_locked = false;
- return 0;
-}
-
-
int bdk_l2c_get_cache_size_bytes(bdk_node_t node)
{
return bdk_l2c_get_num_sets(node) * bdk_l2c_get_num_assoc(node) * BDK_CACHE_LINE_SIZE;
@@ -254,17 +135,22 @@ int bdk_l2c_get_num_assoc(bdk_node_t node)
return l2_node_state[node].ways;
}
-/**
- * Return true if the BDK has locked itself in L2
- *
- * @return
- */
-int bdk_l2c_is_locked(bdk_node_t node)
+int bdk_l2c_unlock_mem_region(bdk_node_t node, uint64_t start, uint64_t len)
{
- /* Determining the lock state of L2 requires reading exact tags from L2
- which varies per chip. Rather than deal with that complexity, we just
- keep a flag around saying if the L2 lock functions have been called.
- This works for the BDK as its use of locking is very simple */
- return l2_node_state[node].is_locked;
-}
+ /* Round start/end to cache line boundaries */
+ len += start & BDK_CACHE_LINE_MASK;
+ start &= ~BDK_CACHE_LINE_MASK;
+ len = (len + BDK_CACHE_LINE_MASK) & ~BDK_CACHE_LINE_MASK;
+ void *ptr = (start) ? bdk_phys_to_ptr(start) : NULL;
+ while (len > 0)
+ {
+ /* Must use invalidate version to release lock */
+ BDK_CACHE_WBI_L2(ptr);
+ ptr += BDK_CACHE_LINE_SIZE;
+ len -= BDK_CACHE_LINE_SIZE;
+ }
+
+ l2_node_state[node].is_locked = false;
+ return 0;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-nic.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-nic.c
new file mode 100644
index 0000000000..b6a9384e10
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-nic.c
@@ -0,0 +1,1090 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <malloc.h>
+#include "libbdk-arch/bdk-csrs-nic.h"
+
+#define MAX_MTU 9212
+#define CQ_ENTRIES_QSIZE 0
+#define CQ_ENTRIES (1024 << CQ_ENTRIES_QSIZE)
+#define SQ_ENTRIES_QSIZE 0
+#define SQ_ENTRIES (1024 << SQ_ENTRIES_QSIZE)
+#define RBDR_ENTRIES_QSIZE 0
+#define RBDR_ENTRIES (8192 << RBDR_ENTRIES_QSIZE)
+
+typedef struct
+{
+ /* VNIC related config */
+ bdk_node_t node : 8; /* Node the NIC is on */
+ bdk_nic_type_t ntype : 8; /* They type of device this NIC is connected to */
+ uint8_t nic_vf; /* NIC VF index number (0 - MAX_VNIC-1) */
+ uint8_t sq; /* Send Queue (SQ) inside NIC VF (0-7) */
+ uint8_t cq; /* Complete Queue (CQ) inside NIC VF (0-7) */
+ uint8_t rq; /* Receive Queue (RQ) inside NIC VF (0-7) */
+ uint8_t rbdr; /* Receive Buffer Descriptor Ring (RBDR) inside NIC VF (0-1) */
+ uint8_t bpid; /* Backpressure ID (0-127) */
+ bdk_if_handle_t handle; /* bdk-if handle associated with this NIC */
+
+ /* Transmit */
+ void * sq_base; /* Pointer to the beginning of the SQ in memory */
+ int sq_loc; /* Location where the next send should go */
+ int sq_available; /* Amount of space left in the queue (fuzzy) */
+} nic_t;
+
+typedef struct
+{
+ void *base;
+ int loc;
+} nic_rbdr_state_t;
+
+typedef struct
+{
+ int num_nic_vf;
+ int next_free_nic_vf;
+ int next_free_cpi;
+ int next_free_rssi;
+ int next_free_bpid;
+ nic_t *nic_map[0]; /* Indexed by handle->nic_id */
+} nic_node_state_t;
+
+static nic_node_state_t *global_node_state[BDK_NUMA_MAX_NODES];
+static int global_buffer_size = 0;
+
+/**
+ * Setup a receive Completion Queue (CQ). CQ can be shared across multiple NICs
+ * to save space. This happens if the NIC has "shares_cq" set.
+ *
+ * @param nic NIC to setup
+ *
+ * @return Zero on success, negative on failure
+ */
+static int vnic_setup_cq(nic_t *nic)
+{
+ /* CN88XX pass 1.x had the drop level reset value too low */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_CQM_CFG,
+ c.s.drop_level = 128);
+
+ /* All devices using the same NIC VF use the same CQ */
+ if (nic->handle->index == 0)
+ {
+ BDK_TRACE(NIC, "%s: Setting up CQ(%d, %d)\n", nic->handle->name, nic->nic_vf, nic->cq);
+ /* Note that the completion queue requires 512 byte alignment */
+ void *cq_memory = memalign(512, 512 * CQ_ENTRIES);
+ if (!cq_memory)
+ {
+ bdk_error("%s: Failed to allocate memory for completion queue\n", nic->handle->name);
+ return -1;
+ }
+ /* Configure the completion queue (CQ) */
+ BDK_CSR_WRITE(nic->node, BDK_NIC_QSX_CQX_BASE(nic->nic_vf, nic->cq),
+ bdk_ptr_to_phys(cq_memory));
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_QSX_CQX_CFG(nic->nic_vf, nic->cq),
+ c.s.ena = 1;
+ c.s.caching = 1;
+ c.s.qsize = CQ_ENTRIES_QSIZE);
+ }
+
+ /* Configure our vnic to send to the CQ */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_QSX_SQX_CFG(nic->nic_vf, nic->sq),
+ c.s.cq_qs = nic->nic_vf;
+ c.s.cq_idx = nic->cq);
+ return 0;
+}
+
+/**
+ * Add buffers to a receive buffer descriptor ring (RBDR). Note that RBDRs are
+ * shared between NICs using the same CQ.
+ *
+ * @param nic NIC using the RBDR
+ * @param rbdr_free Number of buffers to add
+ */
+static void vnic_fill_receive_buffer(const nic_t *nic, int rbdr_free)
+{
+ int nic_vf = nic->nic_vf;
+ int rbdr = nic->rbdr;
+
+ BDK_CSR_INIT(rbdr_base, nic->node, BDK_NIC_QSX_RBDRX_BASE(nic_vf, rbdr));
+ BDK_CSR_INIT(rbdr_tail, nic->node, BDK_NIC_QSX_RBDRX_TAIL(nic_vf, rbdr));
+ BDK_TRACE(NIC, "%s: In Filling RBDR(%d, %d) base 0x%lx\n", nic->handle->name, nic->nic_vf, nic->rbdr, rbdr_base.u);
+
+ uint64_t *rbdr_ptr = bdk_phys_to_ptr(rbdr_base.u);
+ int loc = rbdr_tail.s.tail_ptr;
+ BDK_TRACE(NIC, "%s: In Filling RBDR(%d, %d) loc %d\n", nic->handle->name, nic->nic_vf, nic->rbdr, loc);
+
+ int added = 0;
+ for (int i = 0; i < rbdr_free; i++)
+ {
+ bdk_if_packet_t packet;
+ if (bdk_if_alloc(&packet, global_buffer_size))
+ {
+ bdk_error("%s: Failed to allocate buffer for RX ring (added %d)\n", nic->handle->name, added);
+ break;
+ }
+ rbdr_ptr[loc] = bdk_cpu_to_le64(packet.packet[0].s.address);
+ BDK_TRACE(NIC, "%s: In Filling RBDR(%d, %d) loc %d = 0x%lx\n", nic->handle->name, nic->nic_vf, nic->rbdr, loc, rbdr_ptr[loc]);
+ loc++;
+ loc &= RBDR_ENTRIES - 1;
+ added++;
+ }
+ BDK_WMB;
+ BDK_CSR_WRITE(nic->node, BDK_NIC_QSX_RBDRX_DOOR(nic_vf, rbdr), added);
+ BDK_TRACE(NIC, "%s: In Filling RBDR(%d, %d) added %d\n", nic->handle->name, nic->nic_vf, nic->rbdr, added);
+}
+
+/**
+ * Setup a receive buffer descriptor ring (RBDR). Note that NIC share the RBDR if
+ * "share_cq" is set.
+ *
+ * @param nic NIC to setup RBDR for
+ *
+ * @return Zero on success, negative on failure
+ */
+static int vnic_setup_rbdr(nic_t *nic)
+{
+ bool do_fill;
+
+ /* All devices using the same NIC VF use the same RBDRs. Don't fill them
+ for and ports except the first */
+ if (nic->handle->index)
+ {
+ do_fill = false;
+ }
+ else
+ {
+ BDK_TRACE(NIC, "%s: Setting up RBDR(%d, %d)\n", nic->handle->name, nic->nic_vf, nic->rbdr);
+ void *rbdr_base = memalign(BDK_CACHE_LINE_SIZE, 8 * RBDR_ENTRIES);
+ if (!rbdr_base)
+ {
+ bdk_error("%s: Failed to allocate memory for RBDR\n", nic->handle->name);
+ return -1;
+ }
+ /* Configure the receive buffer ring (RBDR) */
+ BDK_CSR_WRITE(nic->node, BDK_NIC_QSX_RBDRX_BASE(nic->nic_vf, nic->rbdr),
+ bdk_ptr_to_phys(rbdr_base));
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_QSX_RBDRX_CFG(nic->nic_vf, nic->rbdr),
+ c.s.ena = 1;
+ c.s.ldwb = BDK_USE_DWB;
+ c.s.qsize = RBDR_ENTRIES_QSIZE;
+ c.s.lines = global_buffer_size / BDK_CACHE_LINE_SIZE);
+ do_fill = true;
+ }
+
+ BDK_TRACE(NIC, "%s: Setting up RQ(%d, %d)\n", nic->handle->name, nic->nic_vf, nic->rq);
+ /* Configure our vnic to use the RBDR */
+ /* Connect this RQ to the RBDR. Both the first and next buffers come from
+ the same RBDR */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_QSX_RQX_CFG(nic->nic_vf, nic->rq),
+ c.s.caching = 1; /* Allocate to L2 */
+ c.s.cq_qs = nic->nic_vf;
+ c.s.cq_idx = nic->cq;
+ c.s.rbdr_cont_qs = nic->nic_vf;
+ c.s.rbdr_cont_idx = nic->rbdr;
+ c.s.rbdr_strt_qs = nic->nic_vf;
+ c.s.rbdr_strt_idx = nic->rbdr);
+ /* NIC_PF_CQM_CFG is configure to drop everything if the CQ has 128 or
+ less entries available. Start backpressure when we have 256 or less */
+ int cq_bp = 256;
+ int rbdr_bp = 256;
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_QSX_RQX_BP_CFG(nic->nic_vf, nic->rq),
+ c.s.rbdr_bp_ena = 1;
+ c.s.cq_bp_ena = 1;
+ c.s.rbdr_bp = rbdr_bp * 256 / RBDR_ENTRIES; /* Zero means no buffers, 256 means lots available */
+ c.s.cq_bp = cq_bp * 256 / CQ_ENTRIES; /* Zero means full, 256 means idle */
+ c.s.bpid = nic->bpid);
+ /* Errata (NIC-21269) Limited NIC receive scenario verification */
+ /* RED drop set with pass=drop, so no statistical dropping */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_QSX_RQX_DROP_CFG(nic->nic_vf, nic->rq),
+ c.s.rbdr_red = 0;
+ c.s.cq_red = 0;
+ c.s.rbdr_pass = 0; /* Zero means no buffers, 256 means lots available */
+ c.s.rbdr_drop = 0;
+ c.s.cq_pass = 0; /* Zero means full, 256 means idle */
+ c.s.cq_drop = 0);
+
+ if (do_fill)
+ {
+ BDK_TRACE(NIC, "%s: Filling RBDR(%d, %d)\n", nic->handle->name, nic->nic_vf, nic->rbdr);
+ /* We probably don't have enough space to completely fill the RBDR. Use
+ 1/8 of the buffers available */
+ int fill_num = bdk_config_get_int(BDK_CONFIG_NUM_PACKET_BUFFERS) / 8;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX)) fill_num = fill_num/3; /* CN83XX has more nics */
+ /* Note that RBDR must leave one spot empty */
+ if (fill_num > RBDR_ENTRIES - 1)
+ fill_num = RBDR_ENTRIES - 1;
+ vnic_fill_receive_buffer(nic, fill_num);
+ }
+
+ return 0;
+}
+
+/**
+ * Setup traffic shapping for a NIC. This put the shappers in passthrough mode
+ * where no shapping is applied.
+ *
+ * @param nic NIC to configure shaping for
+ *
+ * @return Zero on success, negative on failure
+ */
+static int vnic_setup_tx_shaping(nic_t *nic)
+{
+ int tl1_index = -1;
+ int tl2_index = -1;
+ int tl3_index = -1;
+ int tl4_index = -1;
+ int nic_chan_e = -1;
+
+ BDK_TRACE(NIC, "%s: Setting up shaping(%d, %d)\n", nic->handle->name, nic->nic_vf, nic->sq);
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ /* TL1 feeds the DMA engines. One for each BGX */
+ tl1_index = nic->handle->interface;
+ /* TL2 feeds TL1 based on the top/bottom half. Use an independent TL1
+ entry for each BGX port */
+ tl2_index = tl1_index * 32 + nic->handle->index;
+ /* Each block of 4 TL3 feed TL2 */
+ tl3_index = tl2_index * 4;
+ /* Each block of 4 TL4 feed TL3 */
+ tl4_index = tl3_index * 4;
+ nic_chan_e = BDK_NIC_CHAN_E_BGXX_PORTX_CHX(nic->handle->interface, nic->handle->index, 0/*channel*/);
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ {
+ switch (nic->ntype)
+ {
+ case BDK_NIC_TYPE_BGX:
+ tl1_index = BDK_NIC_LMAC_E_BGXX_LMACX(nic->handle->interface, nic->handle->index);
+ nic_chan_e = 0 ; /* Channel is lmac-relative */
+ break;
+ case BDK_NIC_TYPE_LBK:
+ tl1_index = BDK_NIC_LMAC_E_LBKX_CN83XX((nic->handle->interface == 3) ? 1 : 0);
+ nic_chan_e = nic->handle->index; /* Channel is lmac-relative */
+ break;
+ default:
+ bdk_error("%s: Unsupported NIC TYPE %d\n", nic->handle->name, nic->ntype);
+ return -1;
+ }
+ /* TL1 index by NIC_LMAC_E */
+ /* Set in above switch statement */
+ /* TL2 index is software defined, make it the same as TL1 for straight through */
+ tl2_index = tl1_index;
+ /* Each block of 4 TL3 feed TL2. This assumes there are never more than 4 ports per interface */
+ tl3_index = tl2_index * 4 + nic->handle->index;
+ /* TL4 index is the same as TL3, 1:1 hookup */
+ tl4_index = tl3_index;
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ {
+ switch (nic->ntype)
+ {
+ case BDK_NIC_TYPE_BGX:
+ tl1_index = BDK_NIC_LMAC_E_BGXX_LMACX(nic->handle->interface, nic->handle->index);
+ nic_chan_e = BDK_NIC_CHAN_E_BGXX_LMACX_CHX(nic->handle->interface, nic->handle->index, 0/*channel*/);
+ break;
+ case BDK_NIC_TYPE_RGMII:
+ tl1_index = BDK_NIC_LMAC_E_RGXX_LMACX(nic->handle->interface, nic->handle->index);
+ nic_chan_e = 0; /* Channel is lmac-relative */
+ break;
+ case BDK_NIC_TYPE_LBK:
+ tl1_index = BDK_NIC_LMAC_E_LBKX_CN81XX(nic->handle->interface);
+ nic_chan_e = nic->handle->index; /* Channel is lmac-relative */
+ break;
+ default:
+ bdk_error("%s: Unsupported NIC TYPE %d\n", nic->handle->name, nic->ntype);
+ return -1;
+ }
+ /* TL1 index by NIC_LMAC_E */
+ /* Set in above switch statement */
+ /* TL2 index is software defined, make it the same as TL1 for straight through */
+ tl2_index = tl1_index;
+ /* Each block of 4 TL3 feed TL2. This assumes there are never more than 4 ports per interface */
+ tl3_index = tl2_index * 4 + nic->handle->index;
+ /* TL4 index is the same as TL3, 1:1 hookup */
+ tl4_index = tl3_index;
+ }
+ else
+ {
+ bdk_error("%s: Unsupported chip (NIC shaping)\n", nic->handle->name);
+ return -1;
+ }
+
+ /* Setup TL2 to TL1 mappings */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_TL2X_CFG(tl2_index),
+ c.s.rr_quantum = (MAX_MTU+4) / 4);
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_TL2X_PRI(tl2_index),
+ c.s.rr_pri = 0);
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_TL2X_LMAC(tl2_index),
+ c.s.lmac = tl1_index);
+ }
+
+ /* TL3 feeds Tl2 */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_TL3AX_CFG(tl3_index / 4),
+ c.s.tl3a = tl2_index);
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_TL3X_CFG(tl3_index),
+ c.s.rr_quantum = (MAX_MTU+4) / 4);
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_TL3X_CHAN(tl3_index),
+ c.s.chan = nic_chan_e);
+
+ /* TL4 feeds TL3 */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_TL4AX_CFG(tl4_index / 4),
+ c.s.tl4a = tl3_index);
+ }
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_TL4X_CFG(tl4_index),
+ c.s.sq_qs = nic->nic_vf;
+ c.s.sq_idx = nic->sq;
+ c.s.rr_quantum = (MAX_MTU+4) / 4);
+
+ /* SQ feeds TL4 */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_QSX_SQX_CFG2(nic->nic_vf, nic->sq),
+ c.s.tl4 = tl4_index);
+
+ return 0;
+}
+
+/**
+ * Free the buffers in a packet to the RBDR used by the port
+ *
+ * @param priv Determines which RBDR is used
+ * @param packet Packet to put in RBDR
+ */
+static void if_free_to_rbdr(bdk_if_packet_t *packet, nic_rbdr_state_t *vnic_rbdr_state)
+{
+ uint64_t *rbdr_ptr = vnic_rbdr_state->base;
+ int loc = vnic_rbdr_state->loc;
+
+ for (int s = 0; s < packet->segments; s++)
+ {
+ /* Make sure we strip off any padding added by the hardware in the address */
+ uint64_t address = packet->packet[s].s.address & -BDK_CACHE_LINE_SIZE;
+ rbdr_ptr[loc] = bdk_cpu_to_le64(address);
+ loc++;
+ loc &= RBDR_ENTRIES - 1;
+ }
+ vnic_rbdr_state->loc = loc;
+}
+
+/**
+ * Process a CQ receive entry
+ *
+ * @param node Node containing the CQ
+ * @param vnic_rbdr_state
+ * Current RBDR state for the RBDR connected to the CQ
+ * @param cq_header CQ header to process
+ * @param use_cqe_rx2
+ * True of the CQ will contain an extended CQE_RX2 header
+ *
+ * @return Returns the amount the RBDR doorbell needs to increment
+ */
+static int if_process_complete_rx(int node, nic_rbdr_state_t *vnic_rbdr_state, const union bdk_nic_cqe_rx_s *cq_header, const union bdk_nic_cqe_rx_s *cq_header_le, bool use_cqe_rx2)
+{
+ nic_node_state_t *node_state = global_node_state[node];
+ int nic_id = cq_header->s.rq_qs * 8 + cq_header->s.rq_idx;
+
+ bdk_if_packet_t packet;
+ packet.length = cq_header->s.len;
+ packet.segments = cq_header->s.rb_cnt;
+ packet.if_handle = node_state->nic_map[nic_id]->handle;
+ /* Combine the errlev and errop into a single 11 bit number. Errop
+ is 8 bits, so errlev will be in the top byte */
+ packet.rx_error = cq_header->s.errlev;
+ packet.rx_error <<= 8;
+ packet.rx_error |= cq_header->s.errop;
+
+ const uint16_t *rb_sizes = (void*)cq_header_le + 24; /* Offset of RBSZ0 */
+ const uint64_t *rb_addresses = (uint64_t*)(cq_header_le+1);
+ /* Update offset if nic_cqe_rx2_s is used */
+ if (use_cqe_rx2)
+ rb_addresses += sizeof(union bdk_nic_cqe_rx2_s) / 8;
+ int segment_length = 0;
+
+ for (int s = 0; s < packet.segments; s++)
+ {
+ uint64_t addr = bdk_le64_to_cpu(rb_addresses[s]);
+ BDK_PREFETCH(bdk_phys_to_ptr(addr), 0);
+ packet.packet[s].u = addr;
+ packet.packet[s].s.size = bdk_le16_to_cpu(rb_sizes[s]);
+ BDK_TRACE(NIC, " Receive segment size %d address 0x%lx\n", packet.packet[s].s.size, addr);
+ segment_length += packet.packet[s].s.size;
+ }
+
+ /* If we ran out of buffer the packet could be truncated */
+ if (segment_length < packet.length)
+ packet.length = segment_length;
+
+ if (bdk_likely(packet.if_handle))
+ {
+ /* Do RX stats in software as it is fast and I don't really trust
+ the hardware. The hardware tends to count packets that are received
+ and dropped in some weird way. Hopefully the hardware counters
+ looking for drops can find these. It is important that they
+ aren't counted as good */
+ packet.if_handle->stats.rx.packets++;
+ packet.if_handle->stats.rx.octets += packet.length;
+ if (packet.if_handle->flags & BDK_IF_FLAGS_HAS_FCS)
+ packet.if_handle->stats.rx.octets += 4;
+ if (packet.rx_error)
+ packet.if_handle->stats.rx.errors++;
+ bdk_if_dispatch_packet(&packet);
+ }
+ else
+ {
+ bdk_error("Unable to determine interface for NIC %d.%d\n", cq_header->s.rq_qs, cq_header->s.rq_idx);
+ }
+
+ if_free_to_rbdr(&packet, vnic_rbdr_state);
+ return packet.segments;
+}
+
+/**
+ * Process all entries in a completion queue (CQ). Note that a CQ is shared
+ * among many ports, so packets will be dispatch for other port handles.
+ *
+ * @param handle Interface handle connected to the CQ
+ *
+ * @return Number of packets received
+ */
+static void if_receive(int unused, void *hand)
+{
+ const nic_t *nic = hand;
+
+ /* Sadly the hardware team decided to change the meaning of NIC_PF_RX_CFG
+ for chips after CN88XX. This stupid spec change was really hard to
+ find */
+ bool use_cqe_rx2 = !CAVIUM_IS_MODEL(CAVIUM_CN88XX);
+
+ /* Figure out which completion queue we're using */
+ int nic_vf = nic->nic_vf;
+ int rbdr = nic->rbdr;
+ int cq = nic->cq;
+
+ BDK_CSR_INIT(cq_base, nic->node, BDK_NIC_QSX_CQX_BASE(nic_vf, cq));
+ const void *cq_ptr = bdk_phys_to_ptr(cq_base.u);
+
+ /* Find the current CQ location */
+ BDK_CSR_INIT(cq_head, nic->node, BDK_NIC_QSX_CQX_HEAD(nic_vf, cq));
+ int loc = cq_head.s.head_ptr;
+
+ /* Store the RBDR data locally to avoid contention */
+ BDK_CSR_INIT(rbdr_base, nic->node, BDK_NIC_QSX_RBDRX_BASE(nic_vf, rbdr));
+ BDK_CSR_INIT(rbdr_tail, nic->node, BDK_NIC_QSX_RBDRX_TAIL(nic_vf, rbdr));
+ nic_rbdr_state_t vnic_rbdr_state;
+ vnic_rbdr_state.base = bdk_phys_to_ptr(rbdr_base.u);
+ vnic_rbdr_state.loc = rbdr_tail.s.tail_ptr;
+
+ BDK_TRACE(NIC, "%s: Receive thread for CQ(%d, %d) started\n", nic->handle->name, nic->nic_vf, nic->cq);
+
+ while (1)
+ {
+ /* Exit immediately if the CQ is empty */
+ BDK_CSR_INIT(cq_status, nic->node, BDK_NIC_QSX_CQX_STATUS(nic_vf, cq));
+ int pending_count = cq_status.s.qcount;
+ if (bdk_likely(!pending_count))
+ {
+ bdk_wait_usec(1);
+ continue;
+ }
+
+ /* Loop through all pending CQs */
+ int rbdr_doorbell = 0;
+ int count = 0;
+ const union bdk_nic_cqe_rx_s *cq_next = cq_ptr + loc * 512;
+ BDK_TRACE(NIC, "%s: Receive thread CQ(%d, %d): %d pending\n", nic->handle->name, nic->nic_vf, nic->cq, pending_count);
+ while (count < pending_count)
+ {
+ const union bdk_nic_cqe_rx_s *cq_header = cq_next;
+ const union bdk_nic_cqe_rx_s *cq_header_le = cq_header;
+#if __BYTE_ORDER == __BIG_ENDIAN
+ union bdk_nic_cqe_rx_s cq_be;
+ for (int i = 0; i < 6; i++)
+ cq_be.u[i] = bdk_le64_to_cpu(cq_header_le->u[i]);
+ cq_header = &cq_be;
+#endif
+ BDK_TRACE(NIC, "%s: Receive HDR[%p] = 0x%lx 0x%lx 0x%lx 0x%lx\n",
+ nic->handle->name, cq_header_le, cq_header->u[0], cq_header->u[1], cq_header->u[2], cq_header->u[3]);
+ loc++;
+ loc &= CQ_ENTRIES - 1;
+ cq_next = cq_ptr + loc * 512;
+ BDK_PREFETCH(cq_next, 0);
+ if (bdk_likely(cq_header->s.cqe_type == BDK_NIC_CQE_TYPE_E_RX))
+ rbdr_doorbell += if_process_complete_rx(nic->node, &vnic_rbdr_state, cq_header, cq_header_le, use_cqe_rx2);
+ else
+ bdk_error("Unsupported CQ header type %d\n", cq_header->s.cqe_type);
+ count++;
+ }
+ /* Ring the RBDR doorbell for all packets */
+ BDK_WMB;
+ BDK_CSR_WRITE(nic->node, BDK_NIC_QSX_RBDRX_DOOR(nic_vf, rbdr), rbdr_doorbell);
+ /* Free all the CQs that we've processed */
+ BDK_CSR_WRITE(nic->node, BDK_NIC_QSX_CQX_DOOR(nic_vf, cq), count);
+ /* Yield before going through more packets. The low core count chips
+ don't have enough cores to dedicate for TX and RX. This forces
+ sharing under load. If there are enough cores, the yield does
+ nothing */
+ bdk_thread_yield();
+ }
+}
+
+/**
+ * Configure NIC for a specific port. This is called for each
+ * port on every interface that connects to NIC.
+ *
+ * @param handle Handle for port to config
+ * @param ntype Type of LMAC this NIC connects to
+ * @param lmac_credits
+ * Size of the LMAC buffer in bytes. Used to configure the number of credits to
+ * setup between the NIC and LMAC
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_nic_port_init(bdk_if_handle_t handle, bdk_nic_type_t ntype, int lmac_credits)
+{
+ int nic_chan_idx_e; /* Flow channel for the CPI */
+ bool has_rx_nic = (-1 == handle->pki_channel); /* true when nic rx channel exists - may be BGX or LBK-NIC*/
+ bool has_tx_nic = (-1 == handle->pko_queue); /* true when nic tx channel exists - may be BGX or LBK-NIC*/
+ int nic_intf_e = -1; /* Interface enumeration */
+ int nic_intf_block_e; /* Interface Block ID Enumeration */
+ int nic_lmac_e=-1; /* LMAC enumeration */
+
+ if (global_buffer_size == 0)
+ global_buffer_size = bdk_config_get_int(BDK_CONFIG_PACKET_BUFFER_SIZE);
+
+ if (!has_rx_nic && !has_tx_nic) return 0;
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ /* Flow here is a compressed NIC_CHAN_E enum value. Flow is bit[8] and
+ bit[6:0] from NIC_CHAN_E. This works out as:
+ bit 7: BGX interface number(0-1)
+ bit 6:4: BGX port number(0-3)
+ bit 3:0: BGX channel on a port (0-15) */
+ nic_chan_idx_e = (handle->interface) ? 0x80 : 0x00;
+ nic_chan_idx_e += handle->index * 16;
+ nic_chan_idx_e += 0; /* channel */
+ nic_intf_e = BDK_NIC_INTF_E_BGXX(handle->interface);
+ nic_intf_block_e = BDK_NIC_INTF_BLOCK_E_BGXX_BLOCK(handle->interface);
+ nic_lmac_e = BDK_NIC_LMAC_E_BGXX_LMACX(handle->interface, handle->index);
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ {
+ switch (ntype)
+ {
+ case BDK_NIC_TYPE_BGX:
+ nic_chan_idx_e = BDK_NIC_CHAN_IDX_E_BGXX_LMACX_CHX(handle->interface, handle->index, 0/*channel*/);
+ nic_intf_e = BDK_NIC_INTF_E_BGXX(handle->interface);
+ nic_intf_block_e = BDK_NIC_INTF_BLOCK_E_BGXX(handle->interface);
+ nic_lmac_e = BDK_NIC_LMAC_E_BGXX_LMACX(handle->interface, handle->index);
+ break;
+ case BDK_NIC_TYPE_LBK:
+ nic_chan_idx_e = BDK_NIC_CHAN_IDX_E_LBKX_CHX_CN83XX((handle->interface == 3) ? 1 : 0, handle->index);
+ // rx interface
+ if (3 == handle->interface) {
+ nic_intf_e = BDK_NIC_INTF_E_LBKX_CN83XX(1);
+ } else if (2 == handle->interface) {
+ nic_intf_e = BDK_NIC_INTF_E_LBKX_CN83XX(0);
+ }
+ nic_intf_block_e = BDK_NIC_INTF_BLOCK_E_LBKX(handle->interface);
+ // tx interface
+ if (3 == handle->interface) {
+ nic_lmac_e = BDK_NIC_LMAC_E_LBKX_CN83XX(1);
+ } else if (1 == handle->interface) {
+ nic_lmac_e = BDK_NIC_LMAC_E_LBKX_CN83XX(0);
+ }
+ break;
+ default:
+ bdk_error("%s: Unsupported NIC TYPE %d\n", handle->name, ntype);
+ return -1;
+ }
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ {
+ switch (ntype)
+ {
+ case BDK_NIC_TYPE_BGX:
+ nic_chan_idx_e = BDK_NIC_CHAN_IDX_E_BGXX_LMACX_CHX(handle->interface, handle->index, 0/*channel*/);
+ nic_intf_e = BDK_NIC_INTF_E_BGXX(handle->interface);
+ nic_intf_block_e = BDK_NIC_INTF_BLOCK_E_BGXX(handle->interface);
+ nic_lmac_e = BDK_NIC_LMAC_E_BGXX_LMACX(handle->interface, handle->index);
+ break;
+ case BDK_NIC_TYPE_RGMII:
+ nic_chan_idx_e = BDK_NIC_CHAN_IDX_E_RGXX_LMACX_CHX(handle->interface, handle->index, 0/*channel*/);
+ nic_intf_e = BDK_NIC_INTF_E_RGXX(handle->index);
+ nic_intf_block_e = BDK_NIC_INTF_BLOCK_E_BGXX(handle->interface + 2);
+ nic_lmac_e = BDK_NIC_LMAC_E_RGXX_LMACX(handle->interface, handle->index);
+ break;
+ case BDK_NIC_TYPE_LBK:
+ nic_chan_idx_e = BDK_NIC_CHAN_IDX_E_LBKX_CHX_CN81XX(handle->interface, handle->index);
+ nic_intf_e = BDK_NIC_INTF_E_LBKX_CN81XX(handle->interface);
+ nic_intf_block_e = BDK_NIC_INTF_BLOCK_E_LBKX(handle->interface);
+ nic_lmac_e = BDK_NIC_LMAC_E_LBKX_CN81XX(handle->interface);
+ break;
+ default:
+ bdk_error("%s: Unsupported NIC TYPE %d\n", handle->name, ntype);
+ return -1;
+ }
+ }
+ else
+ {
+ bdk_error("%s: Unsupported chip (NIC init)\n", handle->name);
+ return -1;
+ }
+
+ /* Make sure the node global state has been allocated */
+ if (global_node_state[handle->node] == NULL)
+ {
+ int num_nic_vf;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ /* NIC_PF_CONST1 didn't exist on this chip */
+ num_nic_vf = 128;
+ }
+ else
+ {
+ BDK_CSR_INIT(nic_pf_const1, handle->node, BDK_NIC_PF_CONST1);
+ num_nic_vf = nic_pf_const1.s.vnics;
+ }
+ global_node_state[handle->node] = calloc(1, sizeof(nic_node_state_t) + sizeof(handle) * num_nic_vf * 8);
+ if (global_node_state[handle->node] == NULL)
+ {
+ bdk_error("N%d.NIC: Failed to allocate node state\n", handle->node);
+ return -1;
+ }
+ global_node_state[handle->node]->num_nic_vf = num_nic_vf;
+ }
+ nic_node_state_t *node_state = global_node_state[handle->node];
+
+ /* See if we have a free VF */
+ if (!handle->index && (node_state->next_free_nic_vf >= node_state->num_nic_vf))
+ {
+ bdk_error("N%d.NIC: Ran out of NIC VFs\n", handle->node);
+ return -1;
+ }
+
+ /* VNIC setup requirements
+ The code in this file makes the following assumptions:
+ 1) One RBDR for each CQ. No locking is done on RBDR
+ 2) A CQ can be shared across multiple ports, saving space as the
+ cost of performance.
+ 3) One SQ per physical port, no locking on TX
+ 4) One RQ per physical port, many RQ may share RBDR/CQ
+
+ Current setup without DRAM:
+ 1) One NIC VF is used for an entire interface (BGX, LBK). The variable
+ nic_vf represents the NIC virtual function.
+ 2) SQs are allocated one per port. SQ index equals handle->index
+ 3) RQs are allocated one per port. RQ index equals handle->index
+ 4) One CQ is allcoated per entire interface, using index 0
+ 5) One RBDR is used for the CQ, index 0
+
+ Current setup with DRAM:
+ FIXME: Same as without DRAM. There are not enough RBDR to have
+ independent CQs without locking.
+ */
+ void *sq_memory = NULL;
+ if (has_tx_nic) {
+ sq_memory = memalign(BDK_CACHE_LINE_SIZE, 16 * SQ_ENTRIES);
+ if (!sq_memory)
+ {
+ bdk_error("%s: Unable to allocate queues\n", handle->name);
+ return -1;
+ }
+ }
+ nic_t *nic = calloc(1, sizeof(nic_t));
+ if (!nic)
+ {
+ if (sq_memory) free(sq_memory);
+ bdk_error("%s: Unable to NIC state\n", handle->name);
+ return -1;
+ }
+
+ /* Fill in the various NIC indexes */
+ nic->node = handle->node;
+ nic->ntype = ntype;
+ if (handle->index)
+ nic->nic_vf = node_state->next_free_nic_vf - 1; /* reuse last one */
+ else
+ nic->nic_vf = node_state->next_free_nic_vf++; /* New nic */
+ nic->sq = handle->index;
+ nic->cq = 0;
+ nic->rq = handle->index;
+ nic->rbdr = 0;
+ nic->bpid = node_state->next_free_bpid++;
+ nic->handle = handle;
+ BDK_TRACE(NIC, "%s: Creating NIC(%d, sq=%d, cq=%d, rq=%d, rbdr=%d, bpid=%d)\n",
+ nic->handle->name, nic->nic_vf, nic->sq, nic->cq, nic->rq, nic->rbdr, nic->bpid);
+
+ /* Connect this NIC to the handle */
+ handle->nic_id = nic->nic_vf * 8 + nic->rq;
+ node_state->nic_map[handle->nic_id] = nic;
+
+ /* Enable global BP state updates */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_BP_CFG,
+ c.s.bp_poll_ena = 1;
+ c.s.bp_poll_dly = 3);
+
+ /* Enable interface level backpresure */
+ if (-1 != nic_intf_e) {
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_INTFX_BP_CFG(nic_intf_e),
+ c.s.bp_ena = 1;
+ c.s.bp_type = ((nic->ntype == BDK_NIC_TYPE_BGX) ||
+ (nic->ntype == BDK_NIC_TYPE_RGMII)) ? 0 : 1; /* 0=BGX, 1=LBK/TNS */
+ c.s.bp_id = nic_intf_block_e);
+ }
+ if (has_tx_nic) {
+ /* Configure the submit queue (SQ) */
+ nic->sq_base = sq_memory;
+ nic->sq_loc = 0;
+ nic->sq_available = SQ_ENTRIES;
+ BDK_CSR_WRITE(nic->node, BDK_NIC_QSX_SQX_BASE(nic->nic_vf, nic->sq),
+ bdk_ptr_to_phys(sq_memory));
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_QSX_SQX_CFG(nic->nic_vf, nic->sq),
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ c.s.cq_limit = 1;
+ c.s.ena = 1;
+ c.s.ldwb = BDK_USE_DWB;
+ c.s.qsize = SQ_ENTRIES_QSIZE);
+ }
+ int cpi=0;
+ int rssi=0;
+ if (has_rx_nic) {
+ /* Configure the receive queue (RQ) */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_QSX_RQ_GEN_CFG(nic->nic_vf),
+ c.s.vlan_strip = 0;
+ c.s.len_l4 = 0;
+ c.s.len_l3 = 0;
+ c.s.csum_l4 = 0;
+ c.s.ip6_udp_opt = 0;
+ c.s.splt_hdr_ena = 0;
+ c.s.cq_hdr_copy = 0;
+ c.s.max_tcp_reass = 0;
+ c.s.cq_pkt_size = 0;
+ c.s.later_skip = 0;
+ c.s.first_skip = 0);
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_QSX_RQX_CFG(nic->nic_vf, nic->rq),
+ c.s.ena = 1;
+ c.s.tcp_ena = 0);
+
+ cpi = node_state->next_free_cpi++; /* Allocate a new Channel Parse Index (CPI) */
+ rssi = node_state->next_free_rssi++;/* Allocate a new Receive-Side Scaling Index (RSSI) */
+ /* NIC_CHAN_E hard mapped to "flow". Flow chooses the CPI */
+
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_CHANX_RX_CFG(nic_chan_idx_e),
+ c.s.cpi_alg = BDK_NIC_CPI_ALG_E_NONE;
+ c.s.cpi_base = cpi);
+ /* Setup backpressure */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_CHANX_RX_BP_CFG(nic_chan_idx_e),
+ c.s.ena = 1;
+ c.s.bpid = nic->bpid);
+ }
+ if ( has_tx_nic) {
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_CHANX_TX_CFG(nic_chan_idx_e),
+ c.s.bp_ena = 1);
+ }
+
+ if (has_rx_nic) {
+ /* CPI is the output of the above alogrithm, this is used to lookup the
+ VNIC for receive and RSSI */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_CPIX_CFG(cpi),
+ c.cn88xxp1.vnic = nic->nic_vf; /* TX and RX use the same VNIC */
+ c.cn88xxp1.rss_size = 0; /* RSS hash is disabled */
+ c.s.padd = 0; /* Used if we have multiple channels per port */
+ c.cn88xxp1.rssi_base = rssi); /* Base RSSI */
+
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ {
+ /* CN88XX pass 2 moved some fields to a different CSR */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_MPIX_CFG(cpi),
+ c.s.vnic = nic->nic_vf; /* TX and RX use the same VNIC */
+ c.s.rss_size = 0; /* RSS hash is disabled */
+ c.s.rssi_base = rssi); /* Base RSSI */
+ }
+
+ /* The RSSI is used to determine which Receive Queue (RQ) we use */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_RSSIX_RQ(rssi),
+ c.s.rq_qs = nic->nic_vf;
+ c.s.rq_idx = nic->rq);
+ /* Set the min and max packet size. PKND comes from BGX. It is always zero
+ for now */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_PKINDX_CFG(handle->pknd),
+ c.s.lenerr_en = 0;
+ c.s.minlen = 0;
+ c.s.maxlen = 65535);
+ }
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ /* Bypass the TNS */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_INTFX_SEND_CFG(handle->interface),
+ c.s.tns_nonbypass = 0;
+ c.s.block = 0x8 + handle->interface);
+ }
+
+ /* Errata (NIC-21858) If NIC_PF_QS()_CFG ENA is set after RRM enabled...RRM breaks */
+ /* Do global vnic init */
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_QSX_CFG(nic->nic_vf),
+ c.s.ena = 1;
+ c.s.vnic = nic->nic_vf);
+
+ if (has_tx_nic && vnic_setup_tx_shaping(nic))
+ return -1;
+
+ /* Completion queue may be used by both tx and rx.
+ ** Define it even if only one of rx/tx is in use
+ */
+ if (vnic_setup_cq(nic))
+ return -1;
+ /* RBDR is defined regardless of rx_nic to avoid possible backpressure */
+ if ( vnic_setup_rbdr(nic))
+ return -1;
+
+ /* Program LMAC credits */
+ if ((has_tx_nic) && (-1 != nic_lmac_e)) {
+ int credit;
+ if ((BDK_NIC_TYPE_LBK == nic->ntype) && CAVIUM_IS_MODEL(CAVIUM_CN83XX) )
+ credit = 512; /* HRM guidance */
+ else
+ credit = (lmac_credits - MAX_MTU) / 16;
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_LMACX_CREDIT(nic_lmac_e),
+ c.s.cc_unit_cnt = credit;
+ c.s.cc_packet_cnt = 0x1ff;
+ c.s.cc_enable = 1);
+
+ /* Pad packets to 60 bytes, 15 32bit words (before FCS) */
+ if (nic->ntype != BDK_NIC_TYPE_LBK)
+ BDK_CSR_MODIFY(c, nic->node, BDK_NIC_PF_LMACX_CFG(nic_lmac_e),
+ c.s.min_pkt_size = 15);
+ }
+ /* Create a receive thread if this handle has its own CQ/RBDR */
+ if (handle->index == 0)
+ {
+ /* FIXME
+ * At this time thread monitors both CQ and RBDR and uses it only for receive
+ * Setting up RBDR for tx only nics is wasteful.
+ * When nic_tx in bdk starts using CQ, thread needs to change
+ */
+ if (has_rx_nic && bdk_thread_create(nic->node, 0, if_receive, 0, nic, 0))
+ {
+ bdk_error("%s: Failed to allocate receive thread\n", handle->name);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * Send a packet
+ *
+ * @param handle Handle of port to send on
+ * @param packet Packet to send
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_nic_transmit(bdk_if_handle_t handle, const bdk_if_packet_t *packet)
+{
+ /* The SQ can't be filled completely as it reguires at least one free
+ entry so the head and pointer don't look like empty. SQ_SLOP is the
+ amount of SQ space we reserve to make sure of this */
+ const int SQ_SLOP = 1;
+ const nic_node_state_t *node_state = global_node_state[handle->node];
+ nic_t *nic = node_state->nic_map[handle->nic_id];
+ BDK_TRACE(NIC, "%s: Transmit packet of %d bytes, %d segments\n",
+ nic->handle->name, packet->length, packet->segments);
+
+ /* Update the SQ available if we're out of space. The NIC should have sent
+ packets, making more available. This allows us to only read the STATUS
+ CSR when really necessary, normally using the L1 cached value */
+ if (nic->sq_available < packet->segments + 1 + SQ_SLOP)
+ {
+ BDK_CSR_INIT(sq_status, nic->node, BDK_NIC_QSX_SQX_STATUS(nic->nic_vf, nic->sq));
+ nic->sq_available = SQ_ENTRIES - sq_status.s.qcount;
+ /* Re-Check for space. A packets is a header plus its segments */
+ if (nic->sq_available < packet->segments + 1 + SQ_SLOP)
+ {
+ BDK_TRACE(NIC, "%s: Transmit fail, queue full\n", nic->handle->name);
+ return -1;
+ }
+ }
+
+ /* Build the command */
+ void *sq_ptr = nic->sq_base;
+ int loc = nic->sq_loc;
+ union bdk_nic_send_hdr_s send_hdr;
+ send_hdr.u[0] = 0;
+ send_hdr.u[1] = 0;
+ send_hdr.s.subdc = BDK_NIC_SEND_SUBDC_E_HDR;
+ send_hdr.s.subdcnt = packet->segments;
+ send_hdr.s.total = packet->length;
+ switch (packet->packet_type)
+ {
+ case BDK_IF_TYPE_UNKNOWN:
+ break;
+ case BDK_IF_TYPE_UDP4:
+ send_hdr.s.ckl3 = 1; /* L3 - IPv4 checksum enable */
+ send_hdr.s.l3ptr = 14; /* L2 header is 14 bytes */
+ send_hdr.s.ckl4 = BDK_NIC_SEND_CKL4_E_UDP; /* L4 - UDP checksum enable */
+ send_hdr.s.l4ptr = 14 + 20; /* 14 bytes L2 + 20 bytes IPv4 */
+ break;
+ case BDK_IF_TYPE_TCP4:
+ send_hdr.s.ckl3 = 1; /* L3 - IPv4 checksum enable */
+ send_hdr.s.l3ptr = 14; /* L2 header is 14 bytes */
+ send_hdr.s.ckl4 = BDK_NIC_SEND_CKL4_E_TCP; /* L4 - TCP checksum enable */
+ send_hdr.s.l4ptr = 14 + 20; /* 14 bytes L2 + 20 bytes IPv4 */
+ if (packet->mtu)
+ {
+ int headers = 14 + 20 + 20;
+ send_hdr.s.tso = 1; /* Use TCP offload */
+ send_hdr.s.tso_sb = headers; /* 14 bytes L2 + 20 bytes IPv4, 20 bytes TCP */
+ send_hdr.s.tso_mps = packet->mtu - headers; /* Max TCP data payload size */
+ }
+ break;
+ }
+ volatile uint64_t *wptr = (uint64_t *)(sq_ptr + loc * 16);
+ wptr[0] = bdk_cpu_to_le64(send_hdr.u[0]);
+ wptr[1] = bdk_cpu_to_le64(send_hdr.u[1]);
+ BDK_TRACE(NIC, "%s: Transmit HDR[%p] = 0x%lx 0x%lx\n",
+ nic->handle->name, sq_ptr + loc * 16, send_hdr.u[0], send_hdr.u[1]);
+ loc++;
+ loc &= SQ_ENTRIES - 1;
+ for (int s = 0; s < packet->segments; s++)
+ {
+ union bdk_nic_send_gather_s gather;
+ gather.u[0] = 0;
+ gather.u[1] = 0;
+ gather.s.addr = packet->packet[s].s.address;
+ gather.s.subdc = BDK_NIC_SEND_SUBDC_E_GATHER;
+ gather.s.ld_type = (BDK_USE_DWB) ? BDK_NIC_SEND_LD_TYPE_E_LDWB : BDK_NIC_SEND_LD_TYPE_E_LDD;
+ gather.s.size = packet->packet[s].s.size;
+ wptr = (uint64_t *)(sq_ptr + loc * 16);
+ wptr[0] = bdk_cpu_to_le64(gather.u[0]);
+ wptr[1] = bdk_cpu_to_le64(gather.u[1]);
+ BDK_TRACE(NIC, "%s: Transmit Gather[%p] = 0x%lx 0x%lx\n",
+ nic->handle->name, sq_ptr + loc * 16, gather.u[0], gather.u[1]);
+ loc++;
+ loc &= SQ_ENTRIES - 1;
+ }
+
+ BDK_WMB;
+
+ /* Ring the doorbell */
+ BDK_CSR_WRITE(nic->node, BDK_NIC_QSX_SQX_DOOR(nic->nic_vf, nic->sq),
+ packet->segments + 1);
+ BDK_TRACE(NIC, "%s: Transmit Doorbell %d\n", nic->handle->name, packet->segments + 1);
+
+ /* Update our cached state */
+ nic->sq_available -= packet->segments + 1;
+ nic->sq_loc = loc;
+ if (handle->iftype != BDK_IF_BGX) {
+ /* Update stats as we do them in software for non-BGX */
+ handle->stats.tx.packets++;
+ handle->stats.tx.octets += packet->length;
+ if (handle->flags & BDK_IF_FLAGS_HAS_FCS)
+ handle->stats.tx.octets += 4;
+ }
+ return 0;
+}
+
+/**
+ * Get the current TX queue depth. Note that this operation may be slow
+ * and adversly affect packet IO performance.
+ *
+ * @param handle Port to check
+ *
+ * @return Depth of the queue in packets
+ */
+int bdk_nic_get_queue_depth(bdk_if_handle_t handle)
+{
+ const nic_node_state_t *node_state = global_node_state[handle->node];
+ const nic_t *nic = node_state->nic_map[handle->nic_id];
+ BDK_CSR_INIT(sq_status, nic->node, BDK_NIC_QSX_SQX_STATUS(nic->nic_vf, nic->sq));
+ return sq_status.s.qcount;
+}
+
+/**
+ * Query NIC and fill in the transmit stats for the supplied
+ * interface handle.
+ *
+ * @param handle Port handle
+ */
+void bdk_nic_fill_tx_stats(bdk_if_handle_t handle)
+{
+ const int vnic = handle->nic_id >> 3;
+
+ /* Transmit stats are done in software due to CN81XX not having enough NICs */
+
+ /* Note drops are shared across a BGX. People will be confused */
+ BDK_CSR_INIT(drps, handle->node, BDK_NIC_VNICX_TX_STATX(vnic, BDK_NIC_STAT_VNIC_TX_E_TX_DROP));
+ handle->stats.tx.dropped_packets = bdk_update_stat_with_overflow(drps.u, handle->stats.tx.dropped_packets, 48);
+ /* Dropped Octets are not available */
+}
+
+/**
+ * Query NIC and fill in the receive stats for the supplied
+ * interface handle.
+ *
+ * @param handle Port handle
+ */
+void bdk_nic_fill_rx_stats(bdk_if_handle_t handle)
+{
+ /* Account for RX FCS */
+ const int bytes_off_rx = (handle->flags & BDK_IF_FLAGS_HAS_FCS) ? 4 : 0;
+ const int vnic = handle->nic_id >> 3;
+
+ /* Note stats are shared across a BGX. People will be confused */
+
+ /* Read the RX statistics. These do not include the ethernet FCS */
+ BDK_CSR_INIT(rx_red, handle->node, BDK_NIC_VNICX_RX_STATX(vnic, BDK_NIC_STAT_VNIC_RX_E_RX_RED));
+ BDK_CSR_INIT(rx_red_octets, handle->node, BDK_NIC_VNICX_RX_STATX(vnic, BDK_NIC_STAT_VNIC_RX_E_RX_RED_OCTS));
+ BDK_CSR_INIT(rx_ovr, handle->node, BDK_NIC_VNICX_RX_STATX(vnic, BDK_NIC_STAT_VNIC_RX_E_RX_ORUN));
+ BDK_CSR_INIT(rx_ovr_octets, handle->node, BDK_NIC_VNICX_RX_STATX(vnic, BDK_NIC_STAT_VNIC_RX_E_RX_ORUN_OCTS));
+ uint64_t drops = rx_red.u + rx_ovr.u;
+ uint64_t drop_octets = rx_red_octets.u + rx_ovr_octets.u;
+
+ /* Drop and error counters */
+ handle->stats.rx.dropped_octets -= handle->stats.rx.dropped_packets * bytes_off_rx;
+ handle->stats.rx.dropped_octets = bdk_update_stat_with_overflow(drop_octets, handle->stats.rx.dropped_octets, 48);
+ handle->stats.rx.dropped_packets = bdk_update_stat_with_overflow(drops, handle->stats.rx.dropped_packets, 48);
+ handle->stats.rx.dropped_octets += handle->stats.rx.dropped_packets * bytes_off_rx;
+
+ /* Normal RX stats are done by software on receive */
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-pcie-cn8xxx.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-pcie-cn8xxx.c
new file mode 100644
index 0000000000..16034d27c3
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-pcie-cn8xxx.c
@@ -0,0 +1,1263 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <string.h>
+#include "libbdk-arch/bdk-csrs-dtx.h"
+#include "libbdk-arch/bdk-csrs-gser.h"
+#include "libbdk-arch/bdk-csrs-gic.h"
+#include "libbdk-arch/bdk-csrs-pem.h"
+#include "libbdk-arch/bdk-csrs-pcierc.h"
+#include "libbdk-arch/bdk-csrs-sli.h"
+#include "libbdk-arch/bdk-csrs-rst.h"
+#include "libbdk-hal/bdk-pcie.h"
+#include "libbdk-hal/bdk-config.h"
+#include "libbdk-hal/bdk-utils.h"
+#include "libbdk-hal/if/bdk-if.h"
+#include "libbdk-hal/bdk-qlm.h"
+#include "libbdk-hal/device/bdk-device.h"
+#include "libbdk-hal/bdk-ecam.h"
+
+/**
+ * Return the number of possible PCIe ports on a node. The actual number
+ * of configured ports may be less and may also be disjoint.
+ *
+ * @param node Node to query
+ *
+ * @return Number of PCIe ports that are possible
+ */
+int bdk_pcie_get_num_ports(bdk_node_t node)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 6;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 4;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 3;
+ else
+ return 0;
+}
+
+
+/**
+ * Given a PCIe port, determine which SLI controls its memory regions
+ *
+ * @param node Node for the PCIe port
+ * @param pcie_port The PCIe port
+ * @param sli The SLI index is written to this integer pointer
+ * @param sli_group The index of the PCIe port on the SLI is returned here. This is a sequencial
+ * number for each PCIe on an SLI. Use this to index SLI regions.
+ */
+static void __bdk_pcie_get_sli(bdk_node_t node, int pcie_port, int *sli, int *sli_group)
+{
+ /* This mapping should be determined by find the SLI number on the
+ same ECAM bus as the PCIERC bridge. That is fairly complex, so it is
+ hardcoded for now */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ /* Ports 0-2 goto SLI0, ports 3-5 goto SLI1 */
+ *sli = (pcie_port >= 3) ? 1 : 0;
+ *sli_group = pcie_port - *sli * 3;
+ return;
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) || CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ {
+ /* Only one SLI */
+ *sli = 0;
+ *sli_group = pcie_port;
+ return;
+ }
+ else
+ bdk_fatal("Unable to determine SLI for PCIe port. Update __bdk_pcie_get_sli()\n");
+}
+
+/**
+ * Return the Core physical base address for PCIe MEM access. Memory is
+ * read/written as an offset from this address.
+ *
+ * @param node Node to use in a Numa setup
+ * @param pcie_port PCIe port the memory is on
+ * @param mem_type Type of memory
+ *
+ * @return 64bit physical address for read/write
+ */
+uint64_t bdk_pcie_get_base_address(bdk_node_t node, int pcie_port, bdk_pcie_mem_t mem_type)
+{
+ /* See __bdk_pcie_sli_initialize() for a description about how SLI regions work */
+ int sli;
+ int sli_group;
+ __bdk_pcie_get_sli(node, pcie_port, &sli, &sli_group);
+ int region = (sli_group << 6) | (mem_type << 4);
+ union bdk_sli_s2m_op_s s2m_op;
+ s2m_op.u = 0;
+ s2m_op.s.io = 1;
+ s2m_op.s.node = node;
+ s2m_op.s.did_hi = 0x8 + sli;
+ s2m_op.s.region = region;
+ return s2m_op.u;
+}
+
+/**
+ * Size of the Mem address region returned at address
+ * bdk_pcie_get_base_address()
+ *
+ * @param node Node to use in a Numa setup
+ * @param pcie_port PCIe port the IO is for
+ * @param mem_type Type of memory
+ *
+ * @return Size of the Mem window
+ */
+uint64_t bdk_pcie_get_base_size(bdk_node_t node, int pcie_port, bdk_pcie_mem_t mem_type)
+{
+ return 1ull << 36;
+}
+
+/**
+ * @INTERNAL
+ * Initialize the RC config space CSRs
+ *
+ * @param pcie_port PCIe port to initialize
+ */
+static void __bdk_pcie_rc_initialize_config_space(bdk_node_t node, int pcie_port)
+{
+ int sli;
+ int sli_group;
+ __bdk_pcie_get_sli(node, pcie_port, &sli, &sli_group);
+
+ /* The reset default for config retries is too short. Set it to 48ms, which
+ is what the Octeon SDK team is using. There is no documentation about
+ where they got the 48ms number */
+ int cfg_retry = 48 * 1000000 / (bdk_clock_get_rate(node, BDK_CLOCK_SCLK) >> 16);
+ if (cfg_retry >= 0x10000)
+ cfg_retry = 0xfffff;
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_CTL_STATUS(pcie_port),
+ c.cn83xx.cfg_rtry = cfg_retry);
+
+
+ /* Max Payload Size (PCIE*_CFG030[MPS]) */
+ /* Max Read Request Size (PCIE*_CFG030[MRRS]) */
+ /* Relaxed-order, no-snoop enables (PCIE*_CFG030[RO_EN,NS_EN] */
+ /* Error Message Enables (PCIE*_CFG030[CE_EN,NFE_EN,FE_EN,UR_EN]) */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG030(pcie_port),
+ c.s.mps = 1; /* Support 256 byte MPS */
+ c.s.mrrs = 0x5; /* Support 4KB MRRS */
+ c.s.ro_en = 1; /* Enable relaxed order processing. This will allow devices to affect read response ordering */
+ c.s.ns_en = 1; /* Enable no snoop processing. Not used */
+ c.s.ce_en = 1; /* Correctable error reporting enable. */
+ c.s.nfe_en = 1; /* Non-fatal error reporting enable. */
+ c.s.fe_en = 1; /* Fatal error reporting enable. */
+ c.s.ur_en = 1); /* Unsupported request reporting enable. */
+
+ /* Configure the PCIe slot number if specified */
+ int slot_num = bdk_config_get_int(BDK_CONFIG_PCIE_PHYSICAL_SLOT, node, pcie_port);
+ if (slot_num != -1)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG028(pcie_port),
+ c.s.si = 1); /* Slot Implemented*/
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG033(pcie_port),
+ c.s.ps_num = slot_num);
+ }
+
+ /* Disable ECRC Generation as not all card support it. The OS can enable it
+ later if desired (PCIE*_CFG070[GE,CE]) */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG070(pcie_port),
+ c.s.ge = 0; /* ECRC generation disable. */
+ c.s.ce = 0); /* ECRC check disable. */
+
+ /* Access Enables (PCIE*_CFG001[MSAE,ME]) */
+ /* ME and MSAE should always be set. */
+ /* Interrupt Disable (PCIE*_CFG001[I_DIS]) */
+ /* System Error Message Enable (PCIE*_CFG001[SEE]) */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG001(pcie_port),
+ c.s.msae = 1; /* Memory space enable. */
+ c.s.me = 1; /* Bus master enable. */
+ c.s.i_dis = 1; /* INTx assertion disable. */
+ c.s.see = 1); /* SERR# enable */
+
+ /* Advanced Error Recovery Message Enables */
+ /* (PCIE*_CFG066,PCIE*_CFG067,PCIE*_CFG069) */
+ BDK_CSR_WRITE(node, BDK_PCIERCX_CFG066(pcie_port), 0);
+ /* Use BDK_PCIERCX_CFG067 hardware default */
+ BDK_CSR_WRITE(node, BDK_PCIERCX_CFG069(pcie_port), 0);
+
+
+ /* Active State Power Management (PCIE*_CFG032[ASLPC]) */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG032(pcie_port),
+ c.s.aslpc = 0); /* Active state Link PM control. */
+
+ /* Link Width Mode (PCIERCn_CFG452[LME]) - Set during bdk_pcie_rc_initialize_link() */
+ /* Primary Bus Number (PCIERCn_CFG006[PBNUM]) */
+ /* Use bus numbers as follows:
+ 0 - 31: Reserved for internal ECAM
+ 32 - 87: First PCIe on SLI
+ 88 - 143: Second PCIe on SLI
+ 144 - 199: Third PCIe on SLI
+ 200 - 255: Fourth PCIe on SLI
+ Start bus = 32 + pcie * 56 */
+ const int BUSSES_PER_PCIE = 56;
+ int bus = 32 + sli_group * BUSSES_PER_PCIE;
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG006(pcie_port),
+ c.s.pbnum = 0;
+ c.s.sbnum = bus;
+ c.s.subbnum = bus + BUSSES_PER_PCIE - 1);
+
+ /* Memory-mapped I/O BAR (PCIERCn_CFG008) */
+ uint64_t mem_base = bdk_pcie_get_base_address(node, pcie_port, BDK_PCIE_MEM_NORMAL);
+ uint64_t mem_limit = mem_base + bdk_pcie_get_base_size(node, pcie_port, BDK_PCIE_MEM_NORMAL) - 1;
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG008(pcie_port),
+ c.s.mb_addr = mem_base >> 16;
+ c.s.ml_addr = mem_limit >> 16);
+
+ /* Prefetchable BAR (PCIERCn_CFG009,PCIERCn_CFG010,PCIERCn_CFG011) */
+ uint64_t prefetch_base = bdk_pcie_get_base_address(node, pcie_port, BDK_PCIE_MEM_PREFETCH);
+ uint64_t prefetch_limit = prefetch_base + bdk_pcie_get_base_size(node, pcie_port, BDK_PCIE_MEM_PREFETCH) - 1;
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG009(pcie_port),
+ c.s.lmem_base = prefetch_base >> 16;
+ c.s.lmem_limit = prefetch_limit >> 16);
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG010(pcie_port),
+ c.s.umem_base = prefetch_base >> 32);
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG011(pcie_port),
+ c.s.umem_limit = prefetch_limit >> 32);
+
+ /* System Error Interrupt Enables (PCIERCn_CFG035[SECEE,SEFEE,SENFEE]) */
+ /* PME Interrupt Enables (PCIERCn_CFG035[PMEIE]) */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG035(pcie_port),
+ c.s.secee = 1; /* System error on correctable error enable. */
+ c.s.sefee = 1; /* System error on fatal error enable. */
+ c.s.senfee = 1; /* System error on non-fatal error enable. */
+ c.s.pmeie = 1); /* PME interrupt enable. */
+
+ /* Advanced Error Recovery Interrupt Enables */
+ /* (PCIERCn_CFG075[CERE,NFERE,FERE]) */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG075(pcie_port),
+ c.s.cere = 1; /* Correctable error reporting enable. */
+ c.s.nfere = 1; /* Non-fatal error reporting enable. */
+ c.s.fere = 1); /* Fatal error reporting enable. */
+
+ /* Make sure the PEM agrees with GSERX about the speed its going to try */
+ BDK_CSR_INIT(pem_cfg, node, BDK_PEMX_CFG(pcie_port));
+ switch (pem_cfg.cn83xx.md)
+ {
+ case 0: /* Gen 1 */
+ /* Set the target link speed */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG040(pcie_port),
+ c.s.tls = 1);
+ break;
+ case 1: /* Gen 2 */
+ /* Set the target link speed */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG040(pcie_port),
+ c.s.tls = 2);
+ break;
+ case 2: /* Gen 3 */
+ /* Set the target link speed */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG040(pcie_port),
+ c.s.tls = 3);
+ break;
+ default:
+ bdk_error("N%d.PCIe%d: Unexpected rate of %d\n", node, pcie_port, pem_cfg.cn83xx.md);
+ break;
+ }
+
+ BDK_CSR_INIT(pemx_cfg, node, BDK_PEMX_CFG(pcie_port));
+ BDK_CSR_INIT(cfg452, node, BDK_PCIERCX_CFG452(pcie_port));
+ BDK_CSR_INIT(cfg031, node, BDK_PCIERCX_CFG031(pcie_port));
+ int lme = cfg452.s.lme;
+ int mlw = cfg031.s.mlw;
+
+ /* Link Width Mode (PCIERCn_CFG452[LME]) */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ lme = (pemx_cfg.cn88xx.lanes8) ? 0xf : 0x7;
+ mlw = (pemx_cfg.cn88xx.lanes8) ? 8 : 4;
+ }
+ /* CN83XX can support 8 lanes on QLM0+1 or QLM2+3. 4 lanes on DLM5+6 */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ {
+ switch (pcie_port)
+ {
+ case 0: /* PEM0 on QLM0-1 */
+ lme = (pemx_cfg.cn83xx.lanes8) ? 0xf : 0x7;
+ mlw = (pemx_cfg.cn83xx.lanes8) ? 8 : 4;
+ break;
+ case 1: /* PEM1 on QLM1 */
+ lme = 0x7;
+ mlw = 4;
+ break;
+ case 2: /* PEM2 on QLM2-3 or DLM4 */
+ {
+ BDK_CSR_INIT(pemx_qlm, node, BDK_PEMX_QLM(pcie_port));
+ if (pemx_qlm.s.pem_bdlm) /* PEM2 is on DLM4 */
+ {
+ lme = 0x3;
+ mlw = 2;
+ }
+ else /* PEM2 is on QLM2 */
+ {
+ lme = (pemx_cfg.cn83xx.lanes8) ? 0xf : 0x7;
+ mlw = (pemx_cfg.cn83xx.lanes8) ? 8 : 4;
+ }
+ break;
+ }
+ case 3: /* PEM3 on QLM3 or DLM5-6 */
+ {
+ BDK_CSR_INIT(pemx_qlm, node, BDK_PEMX_QLM(pcie_port));
+ if (pemx_qlm.s.pem_bdlm) /* PEM3 is on DLM5-6 */
+ {
+ lme = (pemx_cfg.cn83xx.lanes8) ? 0x7 : 0x3;
+ mlw = (pemx_cfg.cn83xx.lanes8) ? 4 : 2;
+ }
+ else /* PEM3 is on QLM3 */
+ {
+ lme = 0x7;
+ mlw = 4;
+ }
+ break;
+ }
+ }
+ }
+ /* CN80XX only supports 1 lane on PEM0 */
+ if (cavium_is_altpkg(CAVIUM_CN81XX) && (pcie_port == 0))
+ {
+ lme = 1;
+ mlw = 1;
+ }
+
+ /* Allow override of hardware max link width */
+ int max_width = bdk_config_get_int(BDK_CONFIG_PCIE_WIDTH, node, pcie_port);
+ switch (max_width)
+ {
+ case 1:
+ lme = 1;
+ mlw = 1;
+ break;
+ case 2:
+ lme = 3;
+ mlw = 2;
+ break;
+ case 4:
+ lme = 7;
+ mlw = 4;
+ break;
+ case 8:
+ lme = 0xf;
+ mlw = 8;
+ break;
+ case 16:
+ lme = 0x1f;
+ mlw = 16;
+ break;
+ default:
+ /* No change */
+ break;
+ }
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG452(pcie_port),
+ c.s.lme = lme);
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG031(pcie_port),
+ c.s.mlw = mlw);
+
+ /* Errata PEM-25990 - Disable ASLPMS */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG031(pcie_port),
+ c.s.aslpms = 0);
+
+ /* Errata PEM-26189 - PEM EQ Preset Removal */
+ /* CFG554.PRV default changed from 16'h7ff to 16'h593. Should be
+ safe to apply to CN88XX, CN81XX, and CN83XX */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG554(pcie_port),
+ c.s.prv = bdk_config_get_int(BDK_CONFIG_PCIE_PRESET_REQUEST_VECTOR, node, pcie_port));
+
+ /* Errata PEM-26189 - Disable the 2ms timer on all chips */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG554(pcie_port),
+ c.s.p23td = 1);
+
+ /* Errata PEM-21178 - Change the CFG[089-092] LxUTP and LxDTP defaults.
+ Should be safe to apply to CN88XX, CN81XX, and CN83XX */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG089(pcie_port),
+ c.s.l0dtp = 0x7;
+ c.s.l0utp = 0x7;
+ c.cn83xx.l1dtp = 0x7;
+ c.s.l1utp = 0x7);
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG090(pcie_port),
+ c.s.l2dtp = 0x7;
+ c.s.l2utp = 0x7;
+ c.s.l3dtp = 0x7;
+ c.s.l3utp = 0x7);
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG091(pcie_port),
+ c.s.l4dtp = 0x7;
+ c.s.l4utp = 0x7;
+ c.s.l5dtp = 0x7;
+ c.s.l5utp = 0x7);
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG092(pcie_port),
+ c.s.l6dtp = 0x7;
+ c.s.l6utp = 0x7;
+ c.s.l7dtp = 0x7;
+ c.s.l7utp = 0x7);
+
+ /* (ECAM-27114) PCIERC has incorrect device code */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG002(pcie_port),
+ c.s.sc = 0x4;
+ c.s.bcc = 0x6);
+
+ /* Errata PCIE-29440 - Atomic Egress ATOM_OP/ATOM_OP_EP not implemented
+ correctly */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG038(pcie_port),
+ c.s.atom_op =0x1;
+ c.s.atom_op_eb=0);
+
+ /* Errata PCIE-29566 PEM Link Hangs after going into L1 */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG548(pcie_port),
+ c.s.grizdnc = 0x0);
+}
+
+/**
+ * Get the PCIe LTSSM state for the given port
+ *
+ * @param node Node to query
+ * @param pcie_port PEM to query
+ *
+ * @return LTSSM state
+ */
+static int __bdk_pcie_rc_get_ltssm_state(bdk_node_t node, int pcie_port)
+{
+ /* LTSSM state is in debug select 0 */
+ BDK_CSR_WRITE(node, BDK_DTX_PEMX_SELX(pcie_port, 0), 0);
+ BDK_CSR_WRITE(node, BDK_DTX_PEMX_ENAX(pcie_port, 0), 0xfffffffffull);
+ /* Read the value */
+ uint64_t debug = BDK_CSR_READ(node, BDK_DTX_PEMX_DATX(pcie_port, 0));
+ /* Disable the PEM from driving OCLA signals */
+ BDK_CSR_WRITE(node, BDK_DTX_PEMX_ENAX(pcie_port, 0), 0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return bdk_extract(debug, 0, 6); /* DBGSEL = 0x0, bits[5:0] */
+ else
+ return bdk_extract(debug, 3, 6); /* DBGSEL = 0x0, bits[8:3] */
+}
+
+/**
+ * Get the PCIe LTSSM state for the given port
+ *
+ * @param node Node to query
+ * @param pcie_port PEM to query
+ *
+ * @return LTSSM state
+ */
+static const char *ltssm_string(int ltssm)
+{
+ switch (ltssm)
+ {
+ case 0x00: return "DETECT_QUIET";
+ case 0x01: return "DETECT_ACT";
+ case 0x02: return "POLL_ACTIVE";
+ case 0x03: return "POLL_COMPLIANCE";
+ case 0x04: return "POLL_CONFIG";
+ case 0x05: return "PRE_DETECT_QUIET";
+ case 0x06: return "DETECT_WAIT";
+ case 0x07: return "CFG_LINKWD_START";
+ case 0x08: return "CFG_LINKWD_ACEPT";
+ case 0x09: return "CFG_LANENUM_WAIT";
+ case 0x0A: return "CFG_LANENUM_ACEPT";
+ case 0x0B: return "CFG_COMPLETE";
+ case 0x0C: return "CFG_IDLE";
+ case 0x0D: return "RCVRY_LOCK";
+ case 0x0E: return "RCVRY_SPEED";
+ case 0x0F: return "RCVRY_RCVRCFG";
+ case 0x10: return "RCVRY_IDLE";
+ case 0x11: return "L0";
+ case 0x12: return "L0S";
+ case 0x13: return "L123_SEND_EIDLE";
+ case 0x14: return "L1_IDLE";
+ case 0x15: return "L2_IDLE";
+ case 0x16: return "L2_WAKE";
+ case 0x17: return "DISABLED_ENTRY";
+ case 0x18: return "DISABLED_IDLE";
+ case 0x19: return "DISABLED";
+ case 0x1A: return "LPBK_ENTRY";
+ case 0x1B: return "LPBK_ACTIVE";
+ case 0x1C: return "LPBK_EXIT";
+ case 0x1D: return "LPBK_EXIT_TIMEOUT";
+ case 0x1E: return "HOT_RESET_ENTRY";
+ case 0x1F: return "HOT_RESET";
+ case 0x20: return "RCVRY_EQ0";
+ case 0x21: return "RCVRY_EQ1";
+ case 0x22: return "RCVRY_EQ2";
+ case 0x23: return "RCVRY_EQ3";
+ default: return "Unknown";
+ }
+}
+
+/**
+ * During PCIe link initialization we need to make config request to the attached
+ * device to verify its speed and width. These config access happen very early
+ * after the device is taken out of reset, so may fail for some amount of time.
+ * This function automatically retries these config accesses. The normal builtin
+ * hardware retry isn't enough for this very early access.
+ *
+ * @param node Note to read from
+ * @param pcie_port PCIe port to read from
+ * @param bus PCIe bus number
+ * @param dev PCIe device
+ * @param func PCIe function on the device
+ * @param reg Register to read
+ *
+ * @return Config register value, or all ones on failure
+ */
+static uint32_t cfg_read32_retry(bdk_node_t node, int pcie_port, int bus, int dev, int func, int reg)
+{
+ /* Read the PCI config register until we get a valid value. Some cards
+ require time after link up to return data. Wait at most 3 seconds */
+ uint64_t timeout = bdk_clock_get_count(BDK_CLOCK_TIME) + bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) * 3;
+ uint32_t val;
+ do
+ {
+ /* Read PCI capability pointer */
+ val = bdk_pcie_config_read32(node, pcie_port, bus, dev, func, reg);
+ /* Check the read succeeded */
+ if (val != 0xffffffff)
+ return val;
+ /* Failed, wait a little and try again */
+ bdk_wait_usec(10000);
+ } while (bdk_clock_get_count(BDK_CLOCK_TIME) < timeout);
+
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Config read failed, can't communicate with device\n",
+ node, pcie_port);
+ return 0xffffffff;
+}
+
+/**
+ * Initialize a host mode PCIe link. This function assumes the PEM has already
+ * been taken out of reset and configure. It brings up the link and checks that
+ * the negotiated speed and width is correct for the configured PEM and the
+ * device plugged into it. Note that the return code will signal a retry needed
+ * for some link failures. The caller is responsible for PEM reset and retry.
+ *
+ * @param node Node the PEM is on
+ * @param pcie_port PCIe port to initialize link on
+ *
+ * @return Zero on success
+ * Negative on failures where retries are not needed
+ * Positive if a retry is needed to fix a failure
+ */
+static int __bdk_pcie_rc_initialize_link(bdk_node_t node, int pcie_port)
+{
+ #define LTSSM_HISTORY_SIZE 64 /* Number of LTSSM transitions to record, must be a power of 2 */
+ uint8_t ltssm_history[LTSSM_HISTORY_SIZE];
+ int ltssm_history_loc;
+ bool do_retry_speed = false;
+
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Checking the PEM is out of reset\n", node, pcie_port);
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_PEMX_ON(pcie_port), pemoor, ==, 1, 100000))
+ {
+ printf("N%d.PCIe%d: PEM in reset, skipping.\n", node, pcie_port);
+ return -1;
+ }
+
+ /* Determine the maximum link speed and width */
+ BDK_CSR_INIT(pciercx_cfg031, node, BDK_PCIERCX_CFG031(pcie_port));
+ int max_gen = pciercx_cfg031.s.mls; /* Max speed of PEM from config (1-3) */
+ int max_width = pciercx_cfg031.s.mlw; /* Max lane width of PEM (1-8) */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link supports up to %d lanes, speed gen%d\n",
+ node, pcie_port, max_width, max_gen);
+
+ /* Record starting LTSSM state for debug */
+ memset(ltssm_history, -1, sizeof(ltssm_history));
+ ltssm_history[0] = __bdk_pcie_rc_get_ltssm_state(node, pcie_port);
+ ltssm_history_loc = 0;
+
+ /* Bring up the link */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Enabling the link\n", node, pcie_port);
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_CTL_STATUS(pcie_port), c.cn83xx.lnk_enb = 1);
+
+ if (bdk_config_get_int(BDK_CONFIG_PCIE_SKIP_LINK_TRAIN, node, pcie_port)) {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Skipping link configuration\n", node, pcie_port);
+ return 0;
+ }
+
+retry_speed:
+ /* Clear RC Correctable Error Status Register */
+ BDK_CSR_WRITE(node, BDK_PCIERCX_CFG068(pcie_port), -1);
+
+ /* Wait for the link to come up and link training to be complete */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Waiting for link\n", node, pcie_port);
+
+ uint64_t clock_rate = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME);
+ uint64_t hold_time = clock_rate / 5; /* 200ms */
+ uint64_t bounce_allow_time = clock_rate / 100; /* 10ms */
+ uint64_t timeout = bdk_clock_get_count(BDK_CLOCK_TIME) + clock_rate; /* Timeout = 1s */
+ uint64_t good_time = 0; /* Records when the link first went good */
+ BDK_CSR_DEFINE(pciercx_cfg032, BDK_PCIERCX_CFG032(pcie_port));
+ bool link_up;
+ bool is_loop_done;
+ do
+ {
+ /* Read link state */
+ pciercx_cfg032.u = BDK_CSR_READ(node, BDK_PCIERCX_CFG032(pcie_port));
+
+ /* Record LTSSM state for debug */
+ int ltssm_state = __bdk_pcie_rc_get_ltssm_state(node, pcie_port);
+ if (ltssm_history[ltssm_history_loc] != ltssm_state)
+ {
+ ltssm_history_loc = (ltssm_history_loc + 1) & (LTSSM_HISTORY_SIZE - 1);
+ ltssm_history[ltssm_history_loc] = ltssm_state;
+ }
+
+ /* Check if the link is up */
+ uint64_t current_time = bdk_clock_get_count(BDK_CLOCK_TIME);
+ link_up = (pciercx_cfg032.s.dlla && !pciercx_cfg032.s.lt);
+ if (link_up)
+ {
+ /* Is this the first link up? */
+ if (!good_time)
+ {
+ /* Mark the time when the link transitioned to good */
+ good_time = current_time;
+ }
+ else
+ {
+ /* Check for a link error */
+ BDK_CSR_INIT(cfg068, node, BDK_PCIERCX_CFG068(pcie_port));
+ if (cfg068.s.res)
+ {
+ /* Ignore errors before we've been stable for bounce_allow_time */
+ if (good_time + bounce_allow_time <= current_time)
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link errors after link up\n", node, pcie_port);
+ return 1; /* Link error, signal a retry */
+ }
+ else
+ {
+ /* Clear RC Correctable Error Status Register */
+ BDK_CSR_WRITE(node, BDK_PCIERCX_CFG068(pcie_port), -1);
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Ignored error during settling time\n", node, pcie_port);
+ }
+ }
+ }
+ }
+ else if (good_time)
+ {
+ if (good_time + bounce_allow_time <= current_time)
+ {
+ /* We allow bounces for bounce_allow_time after the link is good.
+ Once this time passes any bounce requires a retry */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link bounce detected\n", node, pcie_port);
+ return 1; /* Link bounce, signal a retry */
+ }
+ else
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Ignored bounce during settling time\n", node, pcie_port);
+ }
+ }
+
+ /* Determine if we've hit the timeout */
+ is_loop_done = (current_time >= timeout);
+ /* Determine if we've had a good link for the required hold time */
+ is_loop_done |= link_up && (good_time + hold_time <= current_time);
+ } while (!is_loop_done);
+
+ /* Trace the LTSSM state */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: LTSSM History\n", node, pcie_port);
+ for (int i = 0; i < LTSSM_HISTORY_SIZE; i++)
+ {
+ ltssm_history_loc = (ltssm_history_loc + 1) & (LTSSM_HISTORY_SIZE - 1);
+ if (ltssm_history[ltssm_history_loc] != 0xff)
+ BDK_TRACE(PCIE, "N%d.PCIe%d: %s\n",
+ node, pcie_port, ltssm_string(ltssm_history[ltssm_history_loc]));
+ }
+
+ if (!link_up)
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link down, Data link layer %s(DLLA=%d), Link training %s(LT=%d), LTSSM %s\n",
+ node, pcie_port,
+ pciercx_cfg032.s.dlla ? "active" : "down", pciercx_cfg032.s.dlla,
+ pciercx_cfg032.s.lt ? "active" : "complete", pciercx_cfg032.s.lt,
+ ltssm_string(__bdk_pcie_rc_get_ltssm_state(node, pcie_port)));
+ return 1; /* Link down, signal a retry */
+ }
+
+ /* Report the negotiated link speed and width */
+ int neg_gen = pciercx_cfg032.s.ls; /* Current speed of PEM (1-3) */
+ int neg_width = pciercx_cfg032.s.nlw; /* Current lane width of PEM (1-8) */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link negotiated %d lanes, speed gen%d\n",
+ node, pcie_port, neg_width, neg_gen);
+
+ /* Determine PCIe bus number the directly attached device uses */
+ BDK_CSR_INIT(pciercx_cfg006, node, BDK_PCIERCX_CFG006(pcie_port));
+ int bus = pciercx_cfg006.s.sbnum;
+
+ int dev_gen = 1; /* Device max speed (1-3) */
+ int dev_width = 1; /* Device max lane width (1-16) */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Reading device max speed and width\n",
+ node, pcie_port);
+
+ /* Read PCI capability pointer */
+ uint32_t cap = cfg_read32_retry(node, pcie_port, bus, 0, 0, 0x34);
+
+ /* Check if we were able to read capabilities pointer */
+ if (cap == 0xffffffff)
+ return 1; /* Signal retry needed */
+
+ /* Read device max speed and width */
+ int cap_next = cap & 0xff;
+ while (cap_next)
+ {
+ cap = cfg_read32_retry(node, pcie_port, bus, 0, 0, cap_next);
+ if (cap == 0xffffffff)
+ return 1; /* Signal retry needed */
+
+ /* Is this a PCIe capability (0x10)? */
+ if ((cap & 0xff) == 0x10)
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Found PCIe capability at offset 0x%x\n",
+ node, pcie_port, cap_next);
+ /* Offset 0xc contains the max link info */
+ cap = cfg_read32_retry(node, pcie_port, bus, 0, 0, cap_next + 0xc);
+ if (cap == 0xffffffff)
+ return 1; /* Signal retry needed */
+ dev_gen = cap & 0xf; /* Max speed of PEM from config (1-3) */
+ dev_width = (cap >> 4) & 0x3f; /* Max lane width of PEM (1-16) */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Device supports %d lanes, speed gen%d\n",
+ node, pcie_port, dev_width, dev_gen);
+ break;
+ }
+ /* Move to next capability */
+ cap_next = (cap >> 8) & 0xff;
+ }
+
+ /* Desired link speed and width is either limited by the device or our PEM
+ configuration. Choose the most restrictive limit */
+ int desired_gen = (dev_gen < max_gen) ? dev_gen : max_gen;
+ int desired_width = (dev_width < max_width) ? dev_width : max_width;
+
+ /* We need a change if we don't match the desired speed or width. Note that
+ we allow better than expected in case the device lied about its
+ capabilities */
+ bool need_speed_change = (neg_gen < desired_gen);
+ bool need_lane_change = (neg_width < desired_width);
+
+ if (need_lane_change)
+ {
+ /* We didn't get the maximum number of lanes */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link width (%d) less that supported (%d)\n",
+ node, pcie_port, neg_width, desired_width);
+ return 2; /* Link wrong width, signal a retry */
+ }
+ else if (need_speed_change)
+ {
+ if (do_retry_speed)
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link speed (gen%d) less that supported (gen%d)\n",
+ node, pcie_port, neg_gen, desired_gen);
+ return 1; /* Link at width, but speed low. Request a retry */
+ }
+ else
+ {
+ /* We didn't get the maximum speed. Request a speed change */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link speed (gen%d) less that supported (gen%d), requesting a speed change\n",
+ node, pcie_port, neg_gen, desired_gen);
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG515(pcie_port),
+ c.s.dsc = 1);
+ bdk_wait_usec(100000);
+ do_retry_speed = true;
+ goto retry_speed;
+ }
+ }
+ else
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link at best speed and width\n", node, pcie_port);
+ /* For gen3 links check if we are getting errors over the link */
+ if (neg_gen == 3)
+ {
+ /* Read RC Correctable Error Status Register */
+ BDK_CSR_INIT(cfg068, node, BDK_PCIERCX_CFG068(pcie_port));
+ if (cfg068.s.res)
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link reporting error status\n", node, pcie_port);
+ return 1; /* Getting receiver errors, request a retry */
+ }
+ }
+ return 0; /* Link at correct speed and width */
+ }
+}
+
+/**
+ * Setup the SLI memory mapped regions to allow access to PCIe by the cores
+ * using addresses returned by bdk_pcie_get_base_address().
+ *
+ * @param node Node to configure
+ * @param pcie_port PCIe port to configure
+ */
+static void __bdk_pcie_sli_initialize(bdk_node_t node, int pcie_port)
+{
+ int sli;
+ int sli_group;
+ __bdk_pcie_get_sli(node, pcie_port, &sli, &sli_group);
+
+ /* Setup store merge timer */
+ BDK_CSR_MODIFY(c, node, BDK_SLIX_S2M_CTL(sli),
+ c.s.max_word = 0; /* Allow 16 words to combine */
+ c.s.timer = 50); /* Wait up to 50 cycles for more data */
+
+ /* There are 256 regions per SLI. We need four regions per PCIe port to
+ support config, IO, normal, and prefetchable regions. The 256 regions
+ are shared across PCIe, so we need three groups of these (one group
+ for each PCIe). The setup is:
+ SLI bit[7:6]: PCIe port, relative to SLI (max of 4)
+ SLI bit[5:4]: Region. See bdk_pcie_mem_t enumeration
+ SLI bit[3:0]: Address extension from 32 bits to 36 bits
+ */
+ for (bdk_pcie_mem_t mem_region = BDK_PCIE_MEM_CONFIG; mem_region <= BDK_PCIE_MEM_IO; mem_region++)
+ {
+ /* Use top two bits for PCIe port, next two bits for memory region */
+ int sli_region = sli_group << 6;
+ /* Use next two bits for mem region type */
+ sli_region |= mem_region << 4;
+ /* Figure out the hardware setting for each region */
+ int ctype = 3;
+ int nmerge = 1;
+ int ordering = 0;
+ switch (mem_region)
+ {
+ case BDK_PCIE_MEM_CONFIG: /* Config space */
+ ctype = 1; /* Config space */
+ nmerge = 1; /* No merging allowed */
+ ordering = 0; /* NO "relaxed ordering" or "no snoop" */
+ break;
+ case BDK_PCIE_MEM_NORMAL: /* Memory, not prefetchable */
+ ctype = 0; /* Memory space */
+ nmerge = 1; /* No merging allowed */
+ ordering = 0; /* NO "relaxed ordering" or "no snoop" */
+ break;
+ case BDK_PCIE_MEM_PREFETCH: /* Memory, prefetchable */
+ ctype = 0; /* Memory space */
+ nmerge = 0; /* Merging allowed */
+ ordering = 1; /* Yes "relaxed ordering" and "no snoop" */
+ break;
+ case BDK_PCIE_MEM_IO: /* IO */
+ ctype = 2; /* I/O space */
+ nmerge = 1; /* No merging allowed */
+ ordering = 0; /* NO "relaxed ordering" or "no snoop" */
+ break;
+ }
+ /* Use the lower order bits to work as an address extension, allowing
+ each PCIe port to map a total of 36 bits (32bit each region, 16
+ regions) */
+ int epf = sli_group;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) || CAVIUM_IS_MODEL(CAVIUM_CN81XX)) {
+ BDK_CSR_INIT(lmac_const0,node,BDK_SLIX_LMAC_CONST0X(sli,pcie_port));
+ epf = lmac_const0.s.epf;
+ }
+ for (int r = sli_region; r < sli_region + 16; r++)
+ {
+ uint64_t address = 0;
+ /* Address only applies to memory space */
+ if (mem_region == BDK_PCIE_MEM_NORMAL)
+ {
+ /* Normal starts at bus address 0 */
+ address = r - sli_region;
+ } else if (mem_region == BDK_PCIE_MEM_PREFETCH)
+ {
+ /* Normal starts at bus address 0x10.0000.0000 */
+ address = r - sli_region + 16;
+ }
+ BDK_CSR_MODIFY(c, node, BDK_SLIX_S2M_REGX_ACC(sli, r),
+ c.s.ctype = ctype;
+ c.s.zero = 0;
+ c.cn83xx.epf = epf; /* Superimposed onto c.cn81xx.mac. EPF value works for both */
+ c.s.nmerge = nmerge;
+ c.s.wtype = ordering;
+ c.s.rtype = ordering;
+ c.s.ba = address);
+ }
+ }
+
+ /* Setup MAC control */
+ BDK_CSR_MODIFY(c, node, BDK_SLIX_M2S_MACX_CTL(sli, sli_group),
+ c.s.waitl_com = 1; /* Improves ordering in Ali flash testing */
+ c.s.ctlp_ro = 1;
+ c.s.ptlp_ro = 1;
+ c.s.wind_d = 1;
+ c.s.bar0_d = 1;
+ c.s.wait_com = (bdk_config_get_int(BDK_CONFIG_PCIE_ORDERING) == 1));
+}
+
+
+/**
+ * Perform a complete PCIe RC reset. This is documented in the HRM as issuing a
+ * fundamental reset
+ *
+ * @param node Node to reset
+ * @param pcie_port PCIe port to reset
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __bdk_pcie_rc_reset(bdk_node_t node, int pcie_port)
+{
+ /* Find which QLM/DLM is associated with this PCIe port */
+ int qlm = bdk_qlm_get_qlm_num(node, BDK_IF_PCIE, pcie_port, 0);
+ if (qlm < 0)
+ return -1;
+
+ /* Check if this PCIe port combines two QLM/DLM */
+ BDK_CSR_INIT(pemx_cfg, node, BDK_PEMX_CFG(pcie_port));
+ int is_dual = CAVIUM_IS_MODEL(CAVIUM_CN81XX) ? pemx_cfg.cn81xx.lanes4 : pemx_cfg.cn83xx.lanes8;
+
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Performing PCIe fundamental reset\n", node, pcie_port);
+
+ /* Host software may want to issue a fundamental reset to the PCIe bus.
+ Software should perform the following steps:
+ 1. Write PEM(0..1)_ON[PEMON] = 0. */
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_ON(pcie_port),
+ c.s.pemon = 0);
+ /* 2. Write RST_SOFT_PRST(0..3)[SOFT_PRST] = 1.
+ - This reassertion of [SOFT_PRST] causes the chip to drive PERSTn_L
+ low (if RST_CTL(0..3)[RST_DRV] = 1). */
+ BDK_CSR_MODIFY(c, node, BDK_RST_SOFT_PRSTX(pcie_port),
+ c.s.soft_prst = 1);
+ /* 3. Read RST_SOFT_PRST(0..3). This ensures the PCIe bus is now in reset.
+ - Note that PCIERCn_CFGn registers cannot be accessed when
+ RST_SOFT_PRST(0..3)[SOFT_PRST] = 1. */
+ BDK_CSR_READ(node, BDK_RST_SOFT_PRSTX(pcie_port));
+ /* 4. Write GSER(0..8)_PHY_CTL[PHY_RESET] = 1.
+ - This puts the PHY in reset. */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm),
+ c.s.phy_reset = 1);
+ if (is_dual)
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm + 1),
+ c.s.phy_reset = 1);
+ /* Wait 10 us before proceeding to step 5. */
+ bdk_wait_usec(10);
+ /* 5. Write GSERx_PHY_CTL[PHY_RESET] = 0 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm),
+ c.s.phy_reset = 0);
+ if (is_dual)
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm + 1),
+ c.s.phy_reset = 0);
+
+ /* Turn on PEM clocks */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_CLK_EN(pcie_port),
+ c.cn83xx.pceclk_gate = 0;
+ c.cn83xx.csclk_gate = 0);
+
+ /* 6. Wait 2 ms or more before taking the PCIe port out of reset. */
+ bdk_wait_usec(2000);
+
+ /* To take PCIe port out of reset, perform the following steps: */
+ /* 1. Write PEM(0..1)_ON[PEMON] = 1. */
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_ON(pcie_port),
+ c.s.pemon = 1);
+ /* 2. Write RST_SOFT_PRST(0..3)[SOFT_PRST] = 0. */
+ /* 3. After RST_CTL(0..3)[RST_DONE], perform any configuration as the
+ PCIe MAC has been reset. Set the PEM(0..1)_CTL_STATUS[LNK_ENB] = 1. */
+ /* These steps are executed when we bring the link up. See
+ bdk_pcie_rc_initialize() */
+ return 0;
+}
+
+/**
+ * Before PCIe link can be brought up a number of steps must be performed to
+ * reset the PEM, take the PEM out of reset, initialize the PEM, initialize
+ * RC config space, and initialize SLI. These steps must be performed every
+ * time the PEM is reset, which may be repeated if the PCIe link doesn't come
+ * up at the desired speed and width.
+ *
+ * @param node Node to initialize
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success, negative on failure
+ */
+static int __bdk_pcie_rc_pre_link_init(bdk_node_t node, int pcie_port)
+{
+ /* Make sure the PEM and GSER do a full reset before starting PCIe */
+ if (__bdk_pcie_rc_reset(node, pcie_port))
+ {
+ bdk_error("N%d.PCIe%d: Reset failed.\n", node, pcie_port);
+ return -1;
+ }
+
+ /* Bring the PCIe out of reset */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Taking port out of reset\n", node, pcie_port);
+ BDK_CSR_WRITE(node, BDK_RST_SOFT_PRSTX(pcie_port), 0);
+
+ /* Check and make sure PCIe came out of reset. If it doesn't the board
+ probably hasn't wired the clocks up and the interface should be
+ skipped */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Waiting for reset to complete\n", node, pcie_port);
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_RST_CTLX(pcie_port), rst_done, ==, 1, 10000))
+ {
+ printf("N%d.PCIe%d: Stuck in reset, skipping.\n", node, pcie_port);
+ return -1;
+ }
+
+ /* Check BIST status */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Checking BIST\n", node, pcie_port);
+ BDK_CSR_INIT(pemx_bist_status, node, BDK_PEMX_BIST_STATUS(pcie_port));
+ if (pemx_bist_status.u)
+ bdk_warn("N%d.PCIe%d: BIST FAILED (0x%016llx)\n", node, pcie_port, pemx_bist_status.u);
+
+ /* Initialize the config space CSRs */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Setting up internal config space\n", node, pcie_port);
+ __bdk_pcie_rc_initialize_config_space(node, pcie_port);
+
+ /* Enable gen2 speed selection */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Enabling dynamic speed changes\n", node, pcie_port);
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG515(pcie_port),
+ c.s.dsc = 1);
+
+ /* Setup the SLI windows to allow access to this PCIe from the core */
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Initializing SLI\n", node, pcie_port);
+ __bdk_pcie_sli_initialize(node, pcie_port);
+ return 0;
+}
+
+/**
+ * Initialize a PCIe port for use in host(RC) mode. It doesn't
+ * enumerate the bus.
+ *
+ * @param pcie_port PCIe port to initialize
+ *
+ * @return Zero on success
+ */
+int bdk_pcie_rc_initialize(bdk_node_t node, int pcie_port)
+{
+ const int MAX_RETRIES = 2; /* Total of 3 attempts: First + 2 retries */
+ int retry_count = 0;
+ int result= -1,i;
+ bdk_pemx_bar1_indexx_t bar1_idx;
+
+ /* Make sure we aren't trying to setup a target mode interface in host
+ mode. Sadly this bit is RAZ for CN88XX and CN81XX because the hardware
+ team removed it. So much for backward compatibility */
+ BDK_CSR_INIT(pemx_cfg, node, BDK_PEMX_CFG(pcie_port));
+ int host_mode = CAVIUM_IS_MODEL(CAVIUM_CN83XX) ? pemx_cfg.cn83xx.hostmd : 1;
+ if (!host_mode)
+ {
+ printf("N%d.PCIe%d: Port in endpoint mode.\n", node, pcie_port);
+ return -1;
+ }
+
+ while (retry_count <= MAX_RETRIES)
+ {
+ if (retry_count)
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Starting link retry %d\n", node, pcie_port, retry_count);
+ /* Perform init that must be done after PEM reset, but before link */
+ if (__bdk_pcie_rc_pre_link_init(node, pcie_port))
+ return -1;
+
+ if (retry_count == MAX_RETRIES)
+ {
+ BDK_CSR_INIT(pciercx_cfg031, node, BDK_PCIERCX_CFG031(pcie_port));
+ /* Drop speed to gen2 if link bouncing */
+ /* Result =-1 PEM in reset */
+ /* Result = 0: link speed and width ok no retry needed */
+ /* Result = 1: Link errors or speed change needed */
+ /* Result = 2: lane width error */
+ if ((pciercx_cfg031.s.mls == 3) && (result != 2))
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Dropping speed to gen2\n", node, pcie_port);
+ pciercx_cfg031.s.mls = 2;
+ BDK_CSR_WRITE(node, BDK_PCIERCX_CFG031(pcie_port), pciercx_cfg031.u);
+ /* Set the target link speed */
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG040(pcie_port),
+ c.s.tls = 2);
+ }
+ }
+ /* Bring the link up */
+ result = __bdk_pcie_rc_initialize_link(node, pcie_port);
+ if (result == 0)
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link does not need a retry\n", node, pcie_port);
+ break;
+ }
+ else if (result > 0)
+ {
+ if (retry_count >= MAX_RETRIES)
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link requested a retry, but hit the max retries\n", node, pcie_port);
+ /* If the link is down, report failure */
+ BDK_CSR_INIT(pciercx_cfg032, node, BDK_PCIERCX_CFG032(pcie_port));
+ bool link_up = (pciercx_cfg032.s.dlla && !pciercx_cfg032.s.lt);
+ if (!link_up)
+ result = -1;
+ }
+ else
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Link requested a retry\n", node, pcie_port);
+ }
+ if (result < 0)
+ {
+ int ltssm_state = __bdk_pcie_rc_get_ltssm_state(node, pcie_port);
+ printf("N%d.PCIe%d: Link timeout, probably the slot is empty (LTSSM %s)\n",
+ node, pcie_port, ltssm_string(ltssm_state));
+ return -1;
+ }
+ retry_count++;
+ }
+
+ /* Errata PCIE-28816: Link retrain initiated at GEN1 can cause PCIE
+ link to hang. For Gen1 links we must disable equalization */
+ BDK_CSR_INIT(pciercx_cfg032, node, BDK_PCIERCX_CFG032(pcie_port));
+ if (pciercx_cfg032.s.ls == 1)
+ {
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Disabling equalization for Gen1 link\n", node, pcie_port);
+ BDK_CSR_MODIFY(c, node, BDK_PCIERCX_CFG548(pcie_port),
+ c.s.ed = 1);
+ }
+
+ BDK_TRACE(PCIE, "N%d.PCIe%d: Setting up internal BARs\n", node, pcie_port);
+ /* Disable BAR0 */
+ BDK_CSR_WRITE(node, BDK_PEMX_P2N_BAR0_START(pcie_port), -1);
+ /* BAR1 Starting at address 0 */
+ BDK_CSR_WRITE(node, BDK_PEMX_P2N_BAR1_START(pcie_port), 0);
+ /* Set BAR2 to cover all memory starting at address 0 */
+ BDK_CSR_WRITE(node, BDK_PEMX_P2N_BAR2_START(pcie_port), 0);
+ /* Setup BAR attributes */
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_BAR_CTL(pcie_port),
+ c.cn83xx.bar1_siz = 1; /* 64MB BAR1 */
+ c.s.bar2_enb = 1; /* BAR2 is enabled */
+ c.s.bar2_cax = 0); /* Cache in L2 */
+
+ /* Allow devices that truncate the bus address to 32-bits to reach the GITS_TRANSLATER */
+ bar1_idx.u = 0;
+ bar1_idx.s.addr_idx = bdk_numa_get_address(node, BDK_GITS_TRANSLATER) >> 22;
+ bar1_idx.s.addr_v = 1;
+
+ BDK_CSR_WRITE(node, BDK_PEMX_BAR1_INDEXX(pcie_port, 0), bar1_idx.u);
+
+ /* The rest of the windows map linearly to match the BAR2 translation. */
+ for (i = 1; i < 16; i++)
+ {
+ bar1_idx.s.addr_idx = i;
+ BDK_CSR_WRITE(node, BDK_PEMX_BAR1_INDEXX(pcie_port, i), bar1_idx.u);
+ }
+
+ /* Display the link status */
+ printf("N%d.PCIe%d: Link active, %d lanes, speed gen%d\n",
+ node, pcie_port, pciercx_cfg032.s.nlw, pciercx_cfg032.s.ls);
+
+ return 0;
+
+}
+
+/**
+ * Return PCIe state
+ *
+ * @param pcie_port PCIe port to query
+ *
+ * @return True if port is up and running
+ */
+int bdk_pcie_is_running(bdk_node_t node, int pcie_port)
+{
+ BDK_CSR_INIT(pemx_on, node, BDK_PEMX_ON(pcie_port));
+ BDK_CSR_INIT(rst_soft_prstx, node, BDK_RST_SOFT_PRSTX(pcie_port));
+ BDK_CSR_INIT(pciercx_cfg032, node, BDK_PCIERCX_CFG032(pcie_port));
+
+ if (!pemx_on.s.pemon || rst_soft_prstx.s.soft_prst)
+ return 0;
+
+ return bdk_config_get_int(BDK_CONFIG_PCIE_SKIP_LINK_TRAIN, node, pcie_port) ||
+ (pciercx_cfg032.s.dlla && !pciercx_cfg032.s.lt);
+}
+
+/**
+ * Shutdown a PCIe port and put it in reset
+ *
+ * @param pcie_port PCIe port to shutdown
+ *
+ * @return Zero on success
+ */
+int bdk_pcie_rc_shutdown(bdk_node_t node, int pcie_port)
+{
+ /* Check that the controller is out of reset */
+ BDK_CSR_INIT(rst_ctlx, node, BDK_RST_CTLX(pcie_port));
+ if (!rst_ctlx.s.rst_done)
+ goto skip_idle_wait;
+
+ /* Check if link is up */
+ BDK_CSR_INIT(pciercx_cfg032, node, BDK_PCIERCX_CFG032(pcie_port));
+ if ((pciercx_cfg032.s.dlla == 0) || (pciercx_cfg032.s.lt == 1))
+ goto skip_idle_wait;
+#if 0 // FIXME
+ /* Wait for all pending operations to complete */
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_PEMX_CPL_LUT_VALID(pcie_port), tag, ==, 0, 2000))
+ printf("N%d.PCIe%d: Shutdown timeout\n", node, pcie_port);
+#endif
+skip_idle_wait:
+ /* Bring down the link */
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_CTL_STATUS(pcie_port), c.cn83xx.lnk_enb = 0);
+ /* Force reset */
+ __bdk_pcie_rc_reset(node, pcie_port);
+ return 0;
+}
+
+/**
+ * @INTERNAL
+ * Build a PCIe config space request address for a device
+ *
+ * @param pcie_port PCIe port to access
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return 64bit IO address
+ */
+uint64_t pcie_build_config_addr(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg)
+{
+ int num_pems = bdk_pcie_get_num_ports(node);
+ if (pcie_port < num_pems)
+ {
+ /* Errata (SLI-22555) ECAM to off-chip PCI misroutes address. Use
+ the SLI regions instead of ECAMs for config space access */
+ uint64_t address = bdk_pcie_get_base_address(node, pcie_port, BDK_PCIE_MEM_CONFIG);
+ /* Display the link status */
+ address += (uint64_t)bus << 24; /* Bus is bits 31:24 */
+ address += dev << 19; /* device+func is bits 23:16 */
+ address += fn << 16;
+ address += reg; /* Offset is bits 11:0 */
+ return address;
+ }
+ else if (pcie_port >= 100)
+ {
+ bdk_device_t device;
+ memset(&device, 0, sizeof(device));
+ device.node = node;
+ device.ecam = pcie_port - 100;
+ device.bus = bus;
+ device.dev = dev;
+ device.func = fn;
+ return __bdk_ecam_build_address(&device, reg);
+ }
+ return 0;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-pcie.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-pcie.c
new file mode 100644
index 0000000000..769550d6b2
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-pcie.c
@@ -0,0 +1,221 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-hal/bdk-pcie.h"
+#include "libbdk-hal/bdk-utils.h"
+#include "libbdk-hal/if/bdk-if.h"
+
+/* This code is an optional part of the BDK. It is only linked in
+ if BDK_REQUIRE() needs it */
+BDK_REQUIRE_DEFINE(PCIE);
+
+/**
+ * Read 8bits from a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint8_t bdk_pcie_config_read8(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg)
+{
+ uint64_t address = pcie_build_config_addr(node, pcie_port, bus, dev, fn, reg);
+ BDK_TRACE(PCIE_CONFIG, "N%d.PCIe%d: Config Read8(bus=%d, dev=%d, fn=%d, reg=0x%x, internal=0x%llx)\n",
+ node, pcie_port, bus, dev, fn, reg, address);
+ uint8_t result;
+ if (address)
+ result = bdk_read64_uint8(address);
+ else
+ result = 0xff;
+ BDK_TRACE(PCIE_CONFIG, "N%d.PCIe%d: Result=0x%02x\n", node, pcie_port, result);
+ return result;
+}
+
+
+/**
+ * Read 16bits from a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint16_t bdk_pcie_config_read16(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg)
+{
+ uint64_t address = pcie_build_config_addr(node, pcie_port, bus, dev, fn, reg);
+ BDK_TRACE(PCIE_CONFIG, "N%d.PCIe%d: Config Read16(bus=%d, dev=%d, fn=%d, reg=0x%x, internal=0x%llx)\n",
+ node, pcie_port, bus, dev, fn, reg, address);
+ uint16_t result;
+ if (address)
+ result = bdk_le16_to_cpu(bdk_read64_uint16(address));
+ else
+ result = 0xffff;
+ BDK_TRACE(PCIE_CONFIG, "N%d.PCIe%d: Result=0x%04x\n", node, pcie_port, result);
+ return result;
+}
+
+
+/**
+ * Read 32bits from a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return Result of the read
+ */
+uint32_t bdk_pcie_config_read32(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg)
+{
+ uint64_t address = pcie_build_config_addr(node, pcie_port, bus, dev, fn, reg);
+ BDK_TRACE(PCIE_CONFIG, "N%d.PCIe%d: Config Read32(bus=%d, dev=%d, fn=%d, reg=0x%x, internal=0x%llx)\n",
+ node, pcie_port, bus, dev, fn, reg, address);
+
+ uint32_t result;
+ if (address)
+ result = bdk_le32_to_cpu(bdk_read64_uint32(address));
+ else
+ result = 0xffffffff;
+ BDK_TRACE(PCIE_CONFIG, "N%d.PCIe%d: Result=0x%08x\n", node, pcie_port, result);
+
+ /* Errata ECAM-22630: CN88XX pass 1.x, except pass 1.0, will return zero
+ for non-existent devices instead of ones. We look for this special case
+ for 32bit reads for reg=0 so we can scan device properly */
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (reg == 0) && (result == 0))
+ result = 0xffffffff;
+
+ return result;
+}
+
+
+/**
+ * Write 8bits to a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void bdk_pcie_config_write8(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg, uint8_t val)
+{
+ uint64_t address = pcie_build_config_addr(node, pcie_port, bus, dev, fn, reg);
+ BDK_TRACE(PCIE_CONFIG, "N%d.PCIe%d: Config Write8(bus=%d, dev=%d, fn=%d, reg=0x%x, val=0x%02x, internal=0x%llx)\n",
+ node, pcie_port, bus, dev, fn, reg, val, address);
+ if (address)
+ bdk_write64_uint8(address, val);
+}
+
+
+/**
+ * Write 16bits to a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void bdk_pcie_config_write16(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg, uint16_t val)
+{
+ uint64_t address = pcie_build_config_addr(node, pcie_port, bus, dev, fn, reg);
+ BDK_TRACE(PCIE_CONFIG, "N%d.PCIe%d: Config Write16(bus=%d, dev=%d, fn=%d, reg=0x%x, val=0x%04x, internal=0x%llx)\n",
+ node, pcie_port, bus, dev, fn, reg, val, address);
+ if (address)
+ bdk_write64_uint16(address, bdk_cpu_to_le16(val));
+}
+
+
+/**
+ * Write 32bits to a Device's config space
+ *
+ * @param pcie_port PCIe port the device is on
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ * @param val Value to write
+ */
+void bdk_pcie_config_write32(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg, uint32_t val)
+{
+ uint64_t address = pcie_build_config_addr(node, pcie_port, bus, dev, fn, reg);
+ BDK_TRACE(PCIE_CONFIG, "N%d.PCIe%d: Config Write32(bus=%d, dev=%d, fn=%d, reg=0x%x, val=0x%08x, internal=0x%llx)\n",
+ node, pcie_port, bus, dev, fn, reg, val, address);
+ if (address)
+ bdk_write64_uint32(address, bdk_cpu_to_le32(val));
+}
+
+/**
+ * Read 64bits from PCIe using a memory transaction
+ *
+ * @param node Node to read from
+ * @param pcie_port PCIe port to read
+ * @param address PCIe address to read
+ *
+ * @return Result of the read
+ */
+uint64_t bdk_pcie_mem_read64(bdk_node_t node, int pcie_port, uint64_t address)
+{
+ uint64_t base_address = bdk_pcie_get_base_address(node, pcie_port, BDK_PCIE_MEM_NORMAL);
+ return bdk_read64_uint64(base_address + address);
+}
+
+/**
+ * Write 64bits to PCIe memory
+ *
+ * @param node Node to write to
+ * @param pcie_port PCIe port to use
+ * @param address Address to write
+ * @param data Data to write
+ */
+void bdk_pcie_mem_write64(bdk_node_t node, int pcie_port, uint64_t address, uint64_t data)
+{
+ uint64_t base_address = bdk_pcie_get_base_address(node, pcie_port, BDK_PCIE_MEM_NORMAL);
+ bdk_write64_uint64(base_address + address, data);
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-qlm.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-qlm.c
new file mode 100644
index 0000000000..f7d631fb5b
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-qlm.c
@@ -0,0 +1,423 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <string.h>
+#include "libbdk-arch/bdk-csrs-gser.h"
+#include "libbdk-arch/bdk-csrs-gsern.h"
+#include "libbdk-hal/if/bdk-if.h"
+#include "libbdk-hal/bdk-qlm.h"
+#include "libbdk-hal/qlm/bdk-qlm-common.h"
+
+/* This code is an optional part of the BDK. It is only linked in
+ if BDK_REQUIRE() needs it */
+BDK_REQUIRE_DEFINE(QLM);
+
+/**
+ * Convert a mode into a configuration variable string value
+ *
+ * @param mode Mode to convert
+ *
+ * @return configuration value string
+ */
+const char *bdk_qlm_mode_to_cfg_str(bdk_qlm_modes_t mode)
+{
+#define MODE_CASE(m) case m: return #m+13
+ switch (mode)
+ {
+ MODE_CASE(BDK_QLM_MODE_DISABLED);
+ MODE_CASE(BDK_QLM_MODE_PCIE_1X1);
+ MODE_CASE(BDK_QLM_MODE_PCIE_2X1);
+ MODE_CASE(BDK_QLM_MODE_PCIE_1X2);
+ MODE_CASE(BDK_QLM_MODE_PCIE_1X4);
+ MODE_CASE(BDK_QLM_MODE_PCIE_1X8);
+ MODE_CASE(BDK_QLM_MODE_PCIE_1X16);
+
+ MODE_CASE(BDK_QLM_MODE_SATA_4X1);
+ MODE_CASE(BDK_QLM_MODE_SATA_2X1);
+
+ MODE_CASE(BDK_QLM_MODE_ILK);
+ MODE_CASE(BDK_QLM_MODE_SGMII_4X1);
+ MODE_CASE(BDK_QLM_MODE_SGMII_2X1);
+ MODE_CASE(BDK_QLM_MODE_SGMII_1X1);
+ MODE_CASE(BDK_QLM_MODE_XAUI_1X4);
+ MODE_CASE(BDK_QLM_MODE_RXAUI_2X2);
+ MODE_CASE(BDK_QLM_MODE_RXAUI_1X2);
+ MODE_CASE(BDK_QLM_MODE_OCI);
+ MODE_CASE(BDK_QLM_MODE_XFI_4X1);
+ MODE_CASE(BDK_QLM_MODE_XFI_2X1);
+ MODE_CASE(BDK_QLM_MODE_XFI_1X1);
+ MODE_CASE(BDK_QLM_MODE_XLAUI_1X4);
+ MODE_CASE(BDK_QLM_MODE_10G_KR_4X1);
+ MODE_CASE(BDK_QLM_MODE_10G_KR_2X1);
+ MODE_CASE(BDK_QLM_MODE_10G_KR_1X1);
+ MODE_CASE(BDK_QLM_MODE_40G_KR4_1X4);
+ MODE_CASE(BDK_QLM_MODE_QSGMII_4X1);
+ MODE_CASE(BDK_QLM_MODE_25G_4X1);
+ MODE_CASE(BDK_QLM_MODE_25G_2X1);
+ MODE_CASE(BDK_QLM_MODE_50G_2X2);
+ MODE_CASE(BDK_QLM_MODE_50G_1X2);
+ MODE_CASE(BDK_QLM_MODE_100G_1X4);
+ MODE_CASE(BDK_QLM_MODE_25G_KR_4X1);
+ MODE_CASE(BDK_QLM_MODE_25G_KR_2X1);
+ MODE_CASE(BDK_QLM_MODE_50G_KR_2X2);
+ MODE_CASE(BDK_QLM_MODE_50G_KR_1X2);
+ MODE_CASE(BDK_QLM_MODE_100G_KR4_1X4);
+ MODE_CASE(BDK_QLM_MODE_USXGMII_4X1);
+ MODE_CASE(BDK_QLM_MODE_USXGMII_2X1);
+
+ case BDK_QLM_MODE_LAST: break; /* fall through error */
+ }
+ return "INVALID_QLM_MODE_VALUE";
+}
+
+/**
+ * Convert a configuration variable value string into a mode
+ *
+ * @param val Configuration variable value
+ *
+ * @return mode
+ */
+bdk_qlm_modes_t bdk_qlm_cfg_string_to_mode(const char *val)
+{
+ bdk_qlm_modes_t mode;
+
+ for (mode = 0; mode < BDK_QLM_MODE_LAST; mode++)
+ {
+ if (0 == strcmp(val, bdk_qlm_mode_to_cfg_str(mode)))
+ {
+ return mode;
+ }
+ }
+ return -1;
+}
+
+/**
+ * Convert a mode into a human understandable string
+ *
+ * @param mode Mode to convert
+ *
+ * @return Easy to read string
+ */
+const char *bdk_qlm_mode_tostring(bdk_qlm_modes_t mode)
+{
+ const char *result = "Unknown, update bdk_qlm_mode_tostring()";
+ switch (mode)
+ {
+ case BDK_QLM_MODE_DISABLED:
+ result = "Disabled";
+ break;
+ case BDK_QLM_MODE_PCIE_1X1:
+ result = "1 PCIe, 1 lane";
+ break;
+ case BDK_QLM_MODE_PCIE_2X1:
+ result = "2 PCIe, 1 lane each";
+ break;
+ case BDK_QLM_MODE_PCIE_1X2:
+ result = "1 PCIe, 2 lanes";
+ break;
+ case BDK_QLM_MODE_PCIE_1X4:
+ result = "1 PCIe, 4 lanes";
+ break;
+ case BDK_QLM_MODE_PCIE_1X8:
+ result = "1 PCIe, 8 lanes";
+ break;
+ case BDK_QLM_MODE_PCIE_1X16:
+ result = "1 PCIe, 16 lanes";
+ break;
+
+ case BDK_QLM_MODE_SATA_4X1:
+ result = "4 SATA, one lane each";
+ break;
+ case BDK_QLM_MODE_SATA_2X1:
+ result = "2 SATA, one lane each";
+ break;
+
+ case BDK_QLM_MODE_ILK:
+ result = "Interlaken";
+ break;
+ case BDK_QLM_MODE_SGMII_4X1:
+ result = "4 SGMII, 1 lane each";
+ break;
+ case BDK_QLM_MODE_SGMII_2X1:
+ result = "2 SGMII, 1 lane each";
+ break;
+ case BDK_QLM_MODE_SGMII_1X1:
+ result = "1 SGMII, 1 lane";
+ break;
+ case BDK_QLM_MODE_XAUI_1X4:
+ result = "1 XAUI, 4 lanes";
+ break;
+ case BDK_QLM_MODE_RXAUI_2X2:
+ result = "2 RXAUI, 2 lanes each";
+ break;
+ case BDK_QLM_MODE_RXAUI_1X2:
+ result = "1 RXAUI, 2 lanes each";
+ break;
+ case BDK_QLM_MODE_OCI:
+ result = "Cavium Coherent Processor Interconnect";
+ break;
+ case BDK_QLM_MODE_XFI_4X1:
+ result = "4 XFI, 1 lane each";
+ break;
+ case BDK_QLM_MODE_XFI_2X1:
+ result = "2 XFI, 1 lane each";
+ break;
+ case BDK_QLM_MODE_XFI_1X1:
+ result = "1 XFI, 1 lane";
+ break;
+ case BDK_QLM_MODE_XLAUI_1X4:
+ result = "1 XLAUI, 4 lanes";
+ break;
+ case BDK_QLM_MODE_10G_KR_4X1:
+ result = "4 10GBASE-KR, 1 lane each";
+ break;
+ case BDK_QLM_MODE_10G_KR_2X1:
+ result = "2 10GBASE-KR, 1 lane each";
+ break;
+ case BDK_QLM_MODE_10G_KR_1X1:
+ result = "1 10GBASE-KR, 1 lane";
+ break;
+ case BDK_QLM_MODE_40G_KR4_1X4:
+ result = "1 40GBASE-KR4, 4 lanes";
+ break;
+ case BDK_QLM_MODE_QSGMII_4X1:
+ result = "4 QSGMII, 1 lane";
+ break;
+ case BDK_QLM_MODE_25G_4X1:
+ result = "4 25G, 1 lane each";
+ break;
+ case BDK_QLM_MODE_25G_2X1:
+ result = "2 25G, 1 lane each";
+ break;
+ case BDK_QLM_MODE_50G_2X2:
+ result = "2 50G, 2 lanes each";
+ break;
+ case BDK_QLM_MODE_50G_1X2:
+ result = "1 50G, 2 lanes";
+ break;
+ case BDK_QLM_MODE_100G_1X4:
+ result = "1 100G, 4 lanes";
+ break;
+ case BDK_QLM_MODE_25G_KR_4X1:
+ result = "4 25G, 1 lane each";
+ break;
+ case BDK_QLM_MODE_25G_KR_2X1:
+ result = "2 25G, 1 lane each";
+ break;
+ case BDK_QLM_MODE_50G_KR_2X2:
+ result = "2 50G, 2 lanes each";
+ break;
+ case BDK_QLM_MODE_50G_KR_1X2:
+ result = "1 50G, 2 lanes";
+ break;
+ case BDK_QLM_MODE_100G_KR4_1X4:
+ result = "1 100G, 4 lanes";
+ break;
+ case BDK_QLM_MODE_USXGMII_4X1:
+ result = "4 USXGMII, 1 lane each";
+ break;
+ case BDK_QLM_MODE_USXGMII_2X1:
+ result = "2 USXGMII, 1 lane each";
+ break;
+
+ case BDK_QLM_MODE_LAST:
+ break; /* fallthrough error */
+ }
+ return result;
+}
+
+int bdk_qlm_measure_clock(bdk_node_t node, int qlm)
+{
+ int ref_clock = __bdk_qlm_measure_refclock(node, qlm);
+ BDK_TRACE(QLM, "N%d.QLM%d: Ref clock %d Hz\n", node, qlm, ref_clock);
+
+ return ref_clock;
+}
+
+/**
+ * Set the QLM's clock source.
+ *
+ * @param node Node to use in a Numa setup
+ * @param qlm QLM to configure
+ * @param clk Clock source for QLM
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_qlm_set_clock(bdk_node_t node, int qlm, bdk_qlm_clock_t clk)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ int sel;
+ int com1;
+ switch (clk)
+ {
+ case BDK_QLM_CLK_COMMON_0:
+ sel = 1;
+ com1 = 0;
+ break;
+ case BDK_QLM_CLK_COMMON_1:
+ sel = 1;
+ com1 = 1;
+ break;
+ case BDK_QLM_CLK_EXTERNAL:
+ sel = 0;
+ com1 = 0;
+ break;
+ default:
+ bdk_warn("Unrecognized clock mode %d for QLM%d on node %d.\n",
+ clk, qlm, node);
+ return -1;
+ }
+
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_REFCLK_SEL(qlm),
+ c.s.com_clk_sel = sel;
+ c.s.use_com1 = com1);
+ }
+ else
+ {
+ int cclksel;
+ switch (clk)
+ {
+ case BDK_QLM_CLK_COMMON_0:
+ cclksel = 0;
+ break;
+ case BDK_QLM_CLK_COMMON_1:
+ cclksel = 1;
+ break;
+ case BDK_QLM_CLK_COMMON_2:
+ cclksel = 2;
+ break;
+ case BDK_QLM_CLK_EXTERNAL:
+ cclksel = 3;
+ break;
+ default:
+ bdk_warn("Unrecognized clock mode %d for QLM%d on node %d.\n",
+ clk, qlm, node);
+ return -1;
+ }
+ BDK_CSR_MODIFY(c, node, BDK_GSERNX_COMMON_REFCLK_BCFG(qlm),
+ c.s.pwdn = (clk == BDK_QLM_CLK_EXTERNAL) ? 0 : 1;
+ c.s.cclksel = cclksel);
+ }
+ return 0;
+}
+
+/**
+ * Display an eye diagram for the given QLM lane. The eye data can be in "eye", or
+ * captured during the call if "eye" is NULL.
+ *
+ * @param node Node to use in numa setup
+ * @param qlm QLM to use
+ * @param qlm_lane Which lane
+ * @param format Display format. 0 = raw, 1 = Color ASCII
+ * @param eye Eye data to display, or NULL if the data should be captured.
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_qlm_eye_display(bdk_node_t node, int qlm, int qlm_lane, int format, const bdk_qlm_eye_t *eye)
+{
+ int result;
+ int need_free = 0;
+ if (eye == NULL)
+ {
+ bdk_qlm_eye_t *eye_data = malloc(sizeof(bdk_qlm_eye_t));
+ if (eye_data == NULL)
+ {
+ bdk_error("Failed to allocate space for eye\n");
+ return -1;
+ }
+ if (bdk_qlm_eye_capture(node, qlm, qlm_lane, eye_data))
+ return -1;
+ eye = eye_data;
+ }
+
+ /* Calculate the max eye width */
+ int eye_area = 0;
+ int eye_width = 0;
+ for (int y = 0; y < eye->height; y++)
+ {
+ int width = 0;
+ for (int x = 0; x < eye->width; x++)
+ {
+ if (eye->data[y][x] == 0)
+ {
+ width++;
+ eye_area++;
+ }
+ }
+ if (width > eye_width)
+ eye_width = width;
+ }
+
+ /* Calculate the max eye height */
+ int eye_height = 0;
+ for (int x = 0; x < eye->width; x++)
+ {
+ int height = 0;
+ for (int y = 0; y < eye->height; y++)
+ {
+ if (eye->data[y][x] == 0)
+ {
+ height++;
+ eye_area++;
+ }
+ }
+ if (height > eye_height)
+ eye_height = height;
+ }
+
+ printf("\nEye Diagram for Node %d, QLM %d, Lane %d\n", node, qlm, qlm_lane);
+
+ if (format == 0) /* Raw */
+ {
+ for (int y = 0; y < eye->height; y++)
+ {
+ for (int x = 0; x < eye->width; x++)
+ printf("%u\t", eye->data[y][x]);
+ printf("\n");
+ }
+ result = 0;
+ }
+ else
+ result = -1;
+
+ if (need_free)
+ free((void*)eye);
+ return result;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-sata.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-sata.c
new file mode 100644
index 0000000000..82e2d3da36
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-sata.c
@@ -0,0 +1,1117 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <malloc.h>
+#include "libbdk-arch/bdk-csrs-sata.h"
+
+/* This code is an optional part of the BDK. It is only linked in
+ if BDK_REQUIRE() needs it */
+BDK_REQUIRE_DEFINE(SATA);
+
+/* Most all information used to create this code was gotten from this wiki
+ page: http://wiki.osdev.org/AHCI */
+
+/**
+ * Following code defines different kinds of FIS specified in Serial
+ * ATA Revision 3.0.
+ */
+typedef enum
+{
+ FIS_TYPE_REG_H2D = 0x27, /**< Register FIS - host to device */
+ FIS_TYPE_REG_D2H = 0x34, /**< Register FIS - device to host */
+ FIS_TYPE_DMA_ACT = 0x39, /**< DMA activate FIS - device to host */
+ FIS_TYPE_DMA_SETUP = 0x41, /**< DMA setup FIS - bidirectional */
+ FIS_TYPE_DATA = 0x46, /**< Data FIS - bidirectional */
+ FIS_TYPE_BIST = 0x58, /**< BIST activate FIS - bidirectional */
+ FIS_TYPE_PIO_SETUP = 0x5F, /**< PIO setup FIS - device to host */
+ FIS_TYPE_DEV_BITS = 0xA1, /**< Set device bits FIS - device to host */
+} fis_type_t;
+
+/**
+ * A host to device register FIS is used by the host to send
+ * command or control to a device. As illustrated in the
+ * following data structure, it contains the IDE registers such
+ * as command, LBA, device, feature, count and control. An ATA
+ * command is constructed in this structure and issued to the
+ * device. All reserved fields in an FIS should be cleared to
+ * zero.
+ */
+typedef struct
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ // DWORD 0
+ uint8_t fis_type; /**< FIS_TYPE_REG_H2D */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t rsv0:3; /**< Reserved */
+ uint8_t c:1; /**< 1: Command, 0: Control */
+ uint8_t command; /**< Command register */
+ uint8_t featurel; /**< Feature register, 7:0 */
+ // DWORD 1
+ uint8_t lba0; /**< LBA low register, 7:0 */
+ uint8_t lba1; /**< LBA mid register, 15:8 */
+ uint8_t lba2; /**< LBA high register, 23:16 */
+ uint8_t device; /**< Device register */
+ // DWORD 2
+ uint8_t lba3; /**< LBA register, 31:24 */
+ uint8_t lba4; /**< LBA register, 39:32 */
+ uint8_t lba5; /**< LBA register, 47:40 */
+ uint8_t featureh; /**< Feature register, 15:8 */
+ // DWORD 3
+ uint16_t count; /**< Count register */
+ uint8_t icc; /**< Isochronous command completion */
+ uint8_t control; /**< Control register */
+ // DWORD 4
+ uint8_t rsv1[4]; /**< Reserved */
+#else
+ // DWORD 0
+ uint8_t fis_type; /**< FIS_TYPE_REG_H2D */
+ uint8_t c:1; /**< 1: Command, 0: Control */
+ uint8_t rsv0:3; /**< Reserved */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t command; /**< Command register */
+ uint8_t featurel; /**< Feature register, 7:0 */
+ // DWORD 1
+ uint8_t lba0; /**< LBA low register, 7:0 */
+ uint8_t lba1; /**< LBA mid register, 15:8 */
+ uint8_t lba2; /**< LBA high register, 23:16 */
+ uint8_t device; /**< Device register */
+ // DWORD 2
+ uint8_t lba3; /**< LBA register, 31:24 */
+ uint8_t lba4; /**< LBA register, 39:32 */
+ uint8_t lba5; /**< LBA register, 47:40 */
+ uint8_t featureh; /**< Feature register, 15:8 */
+ // DWORD 3
+ uint16_t count; /**< Count register */
+ uint8_t icc; /**< Isochronous command completion */
+ uint8_t control; /**< Control register */
+ // DWORD 4
+ uint8_t rsv1[4]; /**< Reserved */
+#endif
+} fis_reg_h2d_t;
+
+/**
+ * A device to host register FIS is used by the device to notify
+ * the host that some ATA register has changed. It contains the
+ * updated task files such as status, error and other registers.
+ */
+typedef struct
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ // DWORD 0
+ uint8_t fis_type; /**< FIS_TYPE_REG_D2H */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t rsv0:2; /**< Reserved */
+ uint8_t i:1; /**< Interrupt bit */
+ uint8_t rsv1:1; /**< Reserved */
+ uint8_t status; /**< Status register */
+ uint8_t error; /**< Error register */
+ // DWORD 1
+ uint8_t lba0; /**< LBA low register, 7:0 */
+ uint8_t lba1; /**< LBA mid register, 15:8 */
+ uint8_t lba2; /**< LBA high register, 23:16 */
+ uint8_t device; /**< Device register */
+ // DWORD 2
+ uint8_t lba3; /**< LBA register, 31:24 */
+ uint8_t lba4; /**< LBA register, 39:32 */
+ uint8_t lba5; /**< LBA register, 47:40 */
+ uint8_t rsv2; /**< Reserved */
+ // DWORD 3
+ uint8_t countl; /**< Count register, 7:0 */
+ uint8_t counth; /**< Count register, 15:8 */
+ uint8_t rsv3[2]; /**< Reserved */
+ // DWORD 4
+ uint8_t rsv4[4]; /**< Reserved */
+#else
+ // DWORD 0
+ uint8_t fis_type; /**< FIS_TYPE_REG_D2H */
+ uint8_t rsv1:1; /**< Reserved */
+ uint8_t i:1; /**< Interrupt bit */
+ uint8_t rsv0:2; /**< Reserved */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t status; /**< Status register */
+ uint8_t error; /**< Error register */
+ // DWORD 1
+ uint8_t lba0; /**< LBA low register, 7:0 */
+ uint8_t lba1; /**< LBA mid register, 15:8 */
+ uint8_t lba2; /**< LBA high register, 23:16 */
+ uint8_t device; /**< Device register */
+ // DWORD 2
+ uint8_t lba3; /**< LBA register, 31:24 */
+ uint8_t lba4; /**< LBA register, 39:32 */
+ uint8_t lba5; /**< LBA register, 47:40 */
+ uint8_t rsv2; /**< Reserved */
+ // DWORD 3
+ uint8_t countl; /**< Count register, 7:0 */
+ uint8_t counth; /**< Count register, 15:8 */
+ uint8_t rsv3[2]; /**< Reserved */
+ // DWORD 4
+ uint8_t rsv4[4]; /**< Reserved */
+#endif
+} fis_reg_d2h_t;
+
+/**
+ * This FIS is used by the host or device to send data payload.
+ * The data size can be varied.
+ */
+typedef struct
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ // DWORD 0
+ uint8_t fis_type; /**< FIS_TYPE_DATA */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t rsv0:4; /**< Reserved */
+ uint8_t rsv1[2]; /**< Reserved */
+ // DWORD 1 ~ N
+ uint32_t data[1]; /**< Payload */
+#else
+ // DWORD 0
+ uint8_t fis_type; /**< FIS_TYPE_DATA */
+ uint8_t rsv0:4; /**< Reserved */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t rsv1[2]; /**< Reserved */
+ // DWORD 1 ~ N
+ uint32_t data[1]; /**< Payload */
+#endif
+} fis_data_t;
+
+/**
+ * This FIS is used by the device to tell the host that it's
+ * about to send or ready to receive a PIO data payload.
+ */
+typedef struct
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ // DWORD 0
+ uint8_t fis_type; /**< FIS_TYPE_PIO_SETUP */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t rsv0:1; /**< Reserved */
+ uint8_t d:1; /**< Data transfer direction, 1 - device to host */
+ uint8_t i:1; /**< Interrupt bit */
+ uint8_t rsv1:1;
+ uint8_t status; /**< Status register */
+ uint8_t error; /**< Error register */
+ // DWORD 1
+ uint8_t lba0; /**< LBA low register, 7:0 */
+ uint8_t lba1; /**< LBA mid register, 15:8 */
+ uint8_t lba2; /**< LBA high register, 23:16 */
+ uint8_t device; /**< Device register */
+ // DWORD 2
+ uint8_t lba3; /**< LBA register, 31:24 */
+ uint8_t lba4; /**< LBA register, 39:32 */
+ uint8_t lba5; /**< LBA register, 47:40 */
+ uint8_t rsv2; /**< Reserved */
+ // DWORD 3
+ uint8_t countl; /**< Count register, 7:0 */
+ uint8_t counth; /**< Count register, 15:8 */
+ uint8_t rsv3; /**< Reserved */
+ uint8_t e_status; /**< New value of status register */
+ // DWORD 4
+ uint16_t tc; /**< Transfer count */
+ uint8_t rsv4[2]; /**< Reserved */
+#else
+ // DWORD 0
+ uint8_t fis_type; /**< FIS_TYPE_PIO_SETUP */
+ uint8_t rsv1:1;
+ uint8_t i:1; /**< Interrupt bit */
+ uint8_t d:1; /**< Data transfer direction, 1 - device to host */
+ uint8_t rsv0:1; /**< Reserved */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t status; /**< Status register */
+ uint8_t error; /**< Error register */
+ // DWORD 1
+ uint8_t lba0; /**< LBA low register, 7:0 */
+ uint8_t lba1; /**< LBA mid register, 15:8 */
+ uint8_t lba2; /**< LBA high register, 23:16 */
+ uint8_t device; /**< Device register */
+ // DWORD 2
+ uint8_t lba3; /**< LBA register, 31:24 */
+ uint8_t lba4; /**< LBA register, 39:32 */
+ uint8_t lba5; /**< LBA register, 47:40 */
+ uint8_t rsv2; /**< Reserved */
+ // DWORD 3
+ uint8_t countl; /**< Count register, 7:0 */
+ uint8_t counth; /**< Count register, 15:8 */
+ uint8_t rsv3; /**< Reserved */
+ uint8_t e_status; /**< New value of status register */
+ // DWORD 4
+ uint16_t tc; /**< Transfer count */
+ uint8_t rsv4[2]; /**< Reserved */
+#endif
+} fis_pio_setup_t;
+
+/**
+ * DMA Setup ? Device to Host
+ */
+typedef struct __attribute__ ((__packed__))
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ // DWORD 0
+ uint8_t fis_type; /**< FIS_TYPE_DMA_SETUP */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t rsv0:1; /**< Reserved */
+ uint8_t d:1; /**< Data transfer direction, 1 - device to host */
+ uint8_t i:1; /**< Interrupt bit */
+ uint8_t a:1; /**< Auto-activate. Specifies if DMA Activate FIS is needed */
+ uint8_t rsved[2]; /**< Reserved */
+ //DWORD 1&2
+ uint64_t DMAbufferID; /**< DMA Buffer Identifier. Used to Identify DMA buffer in host memory. SATA Spec says host specific and not in Spec. Trying AHCI spec might work. */
+ //DWORD 3
+ uint32_t rsvd; /**< More reserved */
+ //DWORD 4
+ uint32_t DMAbufOffset; /**< Byte offset into buffer. First 2 bits must be 0 */
+ //DWORD 5
+ uint32_t TransferCount; /**< Number of bytes to transfer. Bit 0 must be 0 */
+ //DWORD 6
+ uint32_t resvd; /**< Reserved */
+#else
+ // DWORD 0
+ uint8_t fis_type; /**< FIS_TYPE_DMA_SETUP */
+ uint8_t a:1; /**< Auto-activate. Specifies if DMA Activate FIS is needed */
+ uint8_t i:1; /**< Interrupt bit */
+ uint8_t d:1; /**< Data transfer direction, 1 - device to host */
+ uint8_t rsv0:1; /**< Reserved */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t rsved[2]; /**< Reserved */
+ //DWORD 1&2
+ uint64_t DMAbufferID; /**< DMA Buffer Identifier. Used to Identify DMA buffer in host memory. SATA Spec says host specific and not in Spec. Trying AHCI spec might work. */
+ //DWORD 3
+ uint32_t rsvd; /**< More reserved */
+ //DWORD 4
+ uint32_t DMAbufOffset; /**< Byte offset into buffer. First 2 bits must be 0 */
+ //DWORD 5
+ uint32_t TransferCount; /**< Number of bytes to transfer. Bit 0 must be 0 */
+ //DWORD 6
+ uint32_t resvd; /**< Reserved */
+#endif
+} fis_dma_setup_t;
+
+typedef struct __attribute__ ((__packed__))
+{
+ uint8_t fis_type; /**< FIS_TYPE_BIST */
+ uint8_t pmport:4; /**< Port multiplier */
+ uint8_t rsv0:4; /**< Reserved */
+ uint8_t v:1; /**< Vendor Specific */
+ uint8_t r:1; /**< Reserved */
+ uint8_t p:1; /**< Primitive bit */
+ uint8_t f:1; /**< Far end analog loopback */
+ uint8_t l:1; /**< Far end retimed loopback */
+ uint8_t s:1; /**< Scrambling bypass */
+ uint8_t a:1; /**< Align bypass */
+ uint8_t t:1; /**< Far end transmit only */
+ uint8_t rsv1; /**< Reserved */
+ uint32_t data1; /**< Only valid when "t" is set */
+ uint32_t data2; /**< Only valid when "t" is set */
+} fis_bist_t;
+
+/**
+ * Received FIS Structure - AHCI rev 1.3 page 35
+ */
+typedef struct
+{
+ // 0x00
+ fis_dma_setup_t dsfis; /**< DMA Setup FIS */
+ uint8_t pad0[4]; /* Filler 0x1c - 0x1f */
+ // 0x20
+ fis_pio_setup_t psfis; /**< PIO Setup FIS */
+ uint8_t pad1[12]; /* Filler 0x34 - 0x3f */
+ // 0x40
+ fis_reg_d2h_t rfis; /**< Device to Host (D2H) Register FIS */
+ uint8_t pad2[4]; /* Filler 0x54 - 0x57 */
+ // 0x58
+ uint8_t sdbfis[8]; /**< Set Device Bit FIS */
+ // 0x60
+ uint8_t ufis[64]; /**< Unknown FIS (up to 64 bytes) */
+ // 0xA0
+ uint8_t rsv[0x100-0xA0]; /* Reserved */
+} hba_fis_t;
+
+/**
+ * Command header - AHCI rev 1.3 page 36
+ */
+typedef struct
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ // DW0
+ uint8_t cfl:5; /**< Command FIS length in DWORDS, 2 ~ 16 */
+ uint8_t a:1; /**< ATAPI */
+ uint8_t w:1; /**< Write, 1: H2D, 0: D2H */
+ uint8_t p:1; /**< Prefetchable */
+ uint8_t r:1; /**< Reset */
+ uint8_t b:1; /**< BIST */
+ uint8_t c:1; /**< Clear busy upon R_OK */
+ uint8_t rsv0:1; /**< Reserved */
+ uint8_t pmp:4; /**< Port multiplier port */
+ uint16_t prdtl; /**< Physical region descriptor table length in entries */
+ // DW1
+ uint32_t prdbc; /**< Physical region descriptor byte count transferred */
+ // DW2, 3
+ uint64_t ctba; /**< Command table descriptor base address. Must be 128 byte aligned */
+ // DW4 - 7
+ uint32_t rsv1[4]; /**< Reserved */
+#else
+ // DW0
+ uint8_t p:1; /**< Prefetchable */
+ uint8_t w:1; /**< Write, 1: H2D, 0: D2H */
+ uint8_t a:1; /**< ATAPI */
+ uint8_t cfl:5; /**< Command FIS length in DWORDS, 2 ~ 16 */
+ uint8_t pmp:4; /**< Port multiplier port */
+ uint8_t c:1; /**< Clear busy upon R_OK */
+ uint8_t b:1; /**< BIST */
+ uint8_t r:1; /**< Reset */
+ uint8_t rsv0:1; /**< Reserved */
+ uint16_t prdtl; /**< Physical region descriptor table length in entries */
+ // DW1
+ uint32_t prdbc; /**< Physical region descriptor byte count transferred */
+ // DW2, 3
+ uint64_t ctba; /**< Command table descriptor base address */
+ // DW4 - 7
+ uint32_t rsv1[4]; /**< Reserved */
+#endif
+} hba_cmd_header_t;
+
+/**
+ * Physical Region Descriptor Table Entry - AHCI rev 1.3 page 39
+ */
+typedef struct
+{
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+ uint64_t dba; /**< Data base address. Must be 2 byte aligned */
+ uint32_t rsv0; /**< Reserved */
+ uint32_t dbc:22; /**< Byte count - 1, 4M max. Must be even number of bytes to transfer */
+ uint32_t rsv1:9; /**< Reserved */
+ uint32_t i:1; /**< Interrupt on completion */
+#else
+ uint64_t dba; /**< Data base address */
+ uint32_t rsv0; /**< Reserved */
+ uint32_t dbc;
+#endif
+} hba_prdt_entry_t;
+
+/**
+ * Command Table - AHCI rev 1.3 page 39
+ */
+typedef struct
+{
+ uint8_t cfis[64]; /**< Command FIS */
+ uint8_t acmd[16]; /**< ATAPI command, 12 or 16 bytes */
+ uint8_t rsv[48]; /**< Reserved */
+ hba_prdt_entry_t prdt_entry[1]; /**< Physical region descriptor table entries, 0 ~ 65535 */
+} hba_cmd_tbl_t;
+
+/**
+ * Return the number of SATA controllers on the chip
+ *
+ * @param node Node to query
+ *
+ * @return Number of controllers, could be zero.
+ */
+int bdk_sata_get_controllers(bdk_node_t node)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 16; /* 16 controllers on QLMs 2,3, 6-7 */
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 6; /* 6 controllers on DLMs 4-6 */
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 2; /* 2 controllers on DLM 2 */
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN93XX))
+ return 4; /* 4 controllers on DLM 4-5 */
+ else
+ return 0;
+}
+
+static int __bdk_sata_is_initialized(bdk_node_t node, int controller)
+{
+ /* Make sure port is clocked before proceeding */
+ BDK_CSR_INIT(uctl_ctl, node, BDK_SATAX_UCTL_CTL(controller));
+ if (!uctl_ctl.s.a_clk_en || uctl_ctl.s.a_clkdiv_rst)
+ return 0;
+
+ /* See if the controller is started */
+ BDK_CSR_INIT(cmd, node, BDK_SATAX_UAHC_P0_CMD(controller));
+ return cmd.s.st;
+}
+
+/**
+ * Initialize a SATA controller and begin device detection
+ *
+ * @param node Node to initialize
+ * @param controller Which controller to initialize
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sata_initialize(bdk_node_t node, int controller)
+{
+ _Static_assert(sizeof(fis_reg_h2d_t) == 5 * 4, "Size of fis_reg_h2d_t wrong");
+ _Static_assert(sizeof(fis_reg_d2h_t)== 5 * 4, "Size of fis_reg_d2h_t wrong");
+ _Static_assert(sizeof(fis_data_t) == 2 * 4, "Size of fis_data_t wrong");
+ _Static_assert(sizeof(fis_pio_setup_t) == 5 * 4, "Size of fis_pio_setup_t wrong");
+ _Static_assert(sizeof(fis_dma_setup_t) == 7 * 4, "Size of fis_dma_setup_t wrong");
+ _Static_assert(sizeof(fis_bist_t) == 3 * 4, "Size of fis_bist_t wrong");
+ _Static_assert(sizeof(hba_fis_t) == 256, "Size of hba_fis_t wrong");
+ _Static_assert(sizeof(hba_cmd_header_t) == 8 * 4, "Size of hba_cmd_header_t wrong");
+ _Static_assert(sizeof(hba_prdt_entry_t) == 4 * 4, "Size of hba_prdt_entry_t wrong");
+ _Static_assert(sizeof(hba_cmd_tbl_t)== 128 + sizeof(hba_prdt_entry_t), "Size of hba_cmd_tbl_t wrong");
+
+ /* Make sure port is clocked before proceeding */
+ BDK_CSR_INIT(uctl_ctl, node, BDK_SATAX_UCTL_CTL(controller));
+ if (!uctl_ctl.s.a_clk_en || uctl_ctl.s.a_clkdiv_rst)
+ {
+ bdk_error("N%d.SATA%d: Not in SATA mode\n", node, controller);
+ return -1;
+ }
+
+ /* The following SATA setup is from the AHCI 1.3 spec, section
+ 10.1.1, Firmware Specific Initialization. */
+ /* Early firmware setup was done in __bdk_qlm_set_sata(), we're not
+ starting the staggered spin-up process */
+
+ /* 1. Indicate that system software is AHCI aware by setting GHC.AE to '1'. */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_GHC(controller),
+ c.s.ae = 1); /* AHCI enable */
+
+ /* 2. Ensure that PxCMD.ST = '0', PxCMD.CR = '0', PxCMD.FRE = '0',
+ PxCMD.FR = '0', and PxSCTL.DET = '0'. */
+ BDK_CSR_INIT(p0_cmd, node, BDK_SATAX_UAHC_P0_CMD(controller));
+ if (p0_cmd.s.st)
+ bdk_error("N%d.SATA%d: PxCMD[ST] is illegally set during init\n", node, controller);
+ if (p0_cmd.s.cr)
+ bdk_error("N%d.SATA%d: PxCMD[CR] is illegally set during init\n", node, controller);
+ if (p0_cmd.s.fre)
+ bdk_error("N%d.SATA%d: PxCMD[FRE] is illegally set during init\n", node, controller);
+ if (p0_cmd.s.fr)
+ bdk_error("N%d.SATA%d: PxCMD[FR] is illegally set during init\n", node, controller);
+ BDK_CSR_INIT(p0_sctl, node, BDK_SATAX_UAHC_P0_SCTL(controller));
+ if (p0_sctl.s.det)
+ bdk_error("N%d.SATA%d: PxSCTL[DET] is illegally set during init\n", node, controller);
+
+ /* 3. Allocate memory for the command list and the FIS receive area. Set
+ PxCLB and PxCLBU to the physical address of the allocated command list.
+ Set PxFB and PxFBU to the physical address of the allocated FIS receive
+ area. Then set PxCMD.FRE to '1'. */
+ /* Allocate area for commands */
+ uint64_t clb_pa = BDK_CSR_READ(node, BDK_SATAX_UAHC_P0_CLB(controller));
+ if (clb_pa == 0)
+ {
+ void *clb = memalign(1024, sizeof(hba_cmd_header_t) * 32);
+ if (clb == NULL)
+ {
+ bdk_error("N%d.SATA%d: Failed to allocate command list\n", node, controller);
+ return -1;
+ }
+ memset(clb, 0, sizeof(hba_cmd_header_t) * 32);
+ BDK_CSR_WRITE(node, BDK_SATAX_UAHC_P0_CLB(controller),
+ bdk_ptr_to_phys(clb));
+ }
+ /* Allocate area for FIS DMAs */
+ uint64_t fb_pa = BDK_CSR_READ(node, BDK_SATAX_UAHC_P0_FB(controller));
+ if (fb_pa == 0)
+ {
+ hba_fis_t *fb = memalign(256, sizeof(hba_fis_t));
+ if (fb == NULL)
+ {
+ bdk_error("N%d.SATA%d: Failed to allocate FIS\n", node, controller);
+ return -1;
+ }
+ memset(fb, 0, sizeof(hba_fis_t));
+ BDK_CSR_WRITE(node, BDK_SATAX_UAHC_P0_FB(controller),
+ bdk_ptr_to_phys(fb));
+ }
+
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_P0_CMD(controller),
+ c.s.fre = 1); /* FIS-receive enable */
+
+ /* 4. Initiate a spin up of the SATA drive attached to the port; i.e. set
+ PxCMD.SUD to '1'.*/
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_P0_CMD(controller),
+ c.s.pod = 1; /* Power on the device, only has affect if SATAX_UAHC_P0_CMD[CPD]=1 */
+ c.s.sud = 1); /* Spin-up device */
+
+ /* 5. Wait for a positive indication that a device is attached to the port
+ (the maximum amount of time to wait for presence indication is specified
+ in the Serial ATA Revision 2.6 specification). This is done by polling
+ PxSSTS.DET. If PxSSTS.DET returns a value of 1h or 3h when read, then
+ system software shall continue to the next step, otherwise if the
+ polling process times out system software moves to the next implemented
+ port and returns to step 1. */
+ /* Waiting for device detection, up to 500ms. PxCMD[DET] must be 1 or 3 */
+ uint64_t timeout = bdk_clock_get_count(BDK_CLOCK_TIME) + bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) / 2;
+ BDK_CSR_INIT(p0_ssts, node, BDK_SATAX_UAHC_P0_SSTS(controller));
+ while ((p0_ssts.s.det != 1) && (p0_ssts.s.det != 3) &&
+ (bdk_clock_get_count(BDK_CLOCK_TIME) <= timeout))
+ {
+ p0_ssts.u = BDK_CSR_READ(node, BDK_SATAX_UAHC_P0_SSTS(controller));
+ bdk_thread_yield();
+ }
+ if ((p0_ssts.s.det != 1) && (p0_ssts.s.det != 3))
+ {
+ bdk_error("N%d.SATA%d: PxSCTL[DET]=%d failed to detect a device\n", node, controller, p0_ssts.s.det);
+ goto fail;
+ }
+
+ /* 6. Clear the PxSERR register, by writing '1s' to each implemented bit
+ location. */
+ BDK_CSR_WRITE(node, BDK_SATAX_UAHC_P0_SERR(controller), -1);
+
+ /* 7. Wait for indication that SATA drive is ready. This is determined via
+ an examination of PxTFD.STS. If PxTFD.STS.BSY, PxTFD.STS.DRQ, and
+ PxTFD.STS.ERR are all '0', prior to the maximum allowed time as
+ specified in the ATA/ATAPI-7 specification, the device is ready. */
+ /* Wait for the device to be ready. BSY(7), DRQ(3), and ERR(0) must be clear */
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_SATAX_UAHC_P0_TFD(controller), sts & 0x89, ==, 0, 5000000))
+ {
+ BDK_CSR_INIT(p0_tfd, node, BDK_SATAX_UAHC_P0_TFD(controller));
+ bdk_error("N%d.SATA%d: PxTFD[STS]=0x%x, Drive not ready\n", node, controller, p0_tfd.s.sts);
+ goto fail;
+ }
+
+ /* Enable AHCI command queuing */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_CCC_CTL(controller),
+ c.s.tv = 0;
+ c.s.en = 1);
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_CCC_PORTS(controller),
+ c.s.prt = 1);
+
+ /* Enable the FIS and clear any pending errors */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_P0_FBS(controller),
+ c.s.dec = 1;
+ c.s.en = 1);
+
+ /* Disable all interrupts */
+ BDK_CSR_WRITE(node, BDK_SATAX_UAHC_P0_IE(controller), 0);
+
+ /* Clear all status bits */
+ BDK_CSR_WRITE(node, BDK_SATAX_UAHC_P0_IS(controller), -1);
+
+ /* Start the port controller */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_P0_CMD(controller),
+ c.s.st = 1); /* Start the controller */
+ return 0;
+
+fail:
+ bdk_sata_shutdown(node, controller);
+ return -1;
+}
+
+/**
+ * Shutdown a SATA controller
+ *
+ * @param node Node to access
+ * @param controller Controller to shutdown
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sata_shutdown(bdk_node_t node, int controller)
+{
+ /* Remember the current speed limit and power management */
+ BDK_CSR_INIT(p0_sctl, node, BDK_SATAX_UAHC_P0_SCTL(controller));
+ /* Perform a HBA reset */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_GHC(controller),
+ c.s.hr = 1);
+ /* Wait for the reset to complete */
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_SATAX_UAHC_GBL_GHC(controller), hr, ==, 0, 100000))
+ {
+ bdk_error("N%d.SATA%d: Timeout waiting for HBA reset to complete\n", node, controller);
+ return -1;
+ }
+ /* Restore the speed limit and power management */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_P0_SCTL(controller),
+ c.s.ipm = p0_sctl.s.ipm;
+ c.s.spd = p0_sctl.s.spd);
+ return 0;
+}
+
+/**
+ * Return the number of SATA ports connected to this AHCI controller
+ *
+ * @param node Node to query
+ * @param controller SATA controller
+ *
+ * @return Number of ports. Zero if the controller doesn't connect to a QLM.
+ */
+int bdk_sata_get_ports(bdk_node_t node, int controller)
+{
+ BDK_CSR_INIT(ctl, node, BDK_SATAX_UAHC_GBL_CCC_CTL(controller));
+ return (ctl.s.en) ? 1 : 0;
+}
+
+/**
+ * Convert an IDE string into a C string with a NULL terminator
+ *
+ * @param buffer Buffer for new string. Must be one longer than length
+ * @param original IDE string of identify command
+ * @param length Length of the string in bytes
+ */
+static void get_ide_string(char *buffer, void *original, int length)
+{
+ /* Copy the IDE string 2 bytes at a time, swapping as we go */
+ uint16_t *newp = (uint16_t *)buffer;
+ uint16_t *oldp = (uint16_t *)original;
+ for (int i = 0; i < length / 2; i++)
+ newp[i] = bdk_swap16(oldp[i]);
+
+ /* Force a NULL terminator */
+ buffer[length] = 0;
+
+ /* Remove all trailing spaces */
+ while (buffer[length-1] == ' ')
+ {
+ buffer[length - 1] = 0;
+ length--;
+ }
+}
+
+static int issue_command(bdk_node_t node, int controller, int command, int is_write, uint64_t lba, void *buffer, int size)
+{
+ /* Pick a command slot to use */
+ int slot = 0;
+ hba_cmd_header_t *cmd_header = bdk_phys_to_ptr(BDK_CSR_READ(node, BDK_SATAX_UAHC_P0_CLB(controller)));
+ cmd_header += slot;
+
+ /* Build a command table with the command to execute */
+ hba_cmd_tbl_t cmd_table BDK_CACHE_LINE_ALIGNED;
+ memset(&cmd_table, 0, sizeof(hba_cmd_tbl_t));
+ /* Where the data is */
+ cmd_table.prdt_entry[0].dba = bdk_cpu_to_le64(bdk_ptr_to_phys(buffer));
+ cmd_table.prdt_entry[0].dbc = bdk_cpu_to_le32(size - 1);
+
+ /* The actual command */
+ fis_reg_h2d_t *cmd_fis = (fis_reg_h2d_t *)cmd_table.cfis;
+ cmd_fis->fis_type = FIS_TYPE_REG_H2D;
+ cmd_fis->command = command;
+ cmd_fis->device = 1 << 6; /* LBA mode */
+ cmd_fis->c = 1; /* Write command register */
+ cmd_fis->lba0 = (lba >> 0) & 0xff;
+ cmd_fis->lba1 = (lba >> 8) & 0xff;
+ cmd_fis->lba2 = (lba >> 16) & 0xff;
+ cmd_fis->lba3 = (lba >> 24) & 0xff;
+ cmd_fis->lba4 = (lba >> 32) & 0xff;
+ cmd_fis->lba5 = (lba >> 40) & 0xff;
+ cmd_fis->count = bdk_cpu_to_le16(size / 512);
+
+ /* Setup the command header */
+ cmd_header->cfl = sizeof(fis_reg_h2d_t) / 4;
+ cmd_header->w = is_write;
+ cmd_header->prdtl = bdk_cpu_to_le16(1);
+ cmd_header->ctba = bdk_cpu_to_le64(bdk_ptr_to_phys(&cmd_table));
+
+ BDK_WMB;
+
+ /* Check that the slot is idle */
+ BDK_CSR_INIT(ci, node, BDK_SATAX_UAHC_P0_CI(controller));
+ if (ci.u & (1<<slot))
+ {
+ bdk_error("N%d.SATA%d: Command slot busy before submit\n", node, controller);
+ return -1;
+ }
+
+ /* Clear all status bits */
+ BDK_CSR_WRITE(node, BDK_SATAX_UAHC_P0_IS(controller), -1);
+ BDK_CSR_READ(node, BDK_SATAX_UAHC_P0_IS(controller));
+
+ /* Issue command */
+ BDK_CSR_WRITE(node, BDK_SATAX_UAHC_P0_CI(controller), 1 << slot);
+
+ /* Wait for command accept */
+ const int TIMEOUT = 5000000; /* 5 seconds */
+ if (BDK_CSR_WAIT_FOR_FIELD(node,BDK_SATAX_UAHC_P0_CI(controller), ci & (1<<slot), ==, 0, TIMEOUT))
+ {
+ bdk_error("N%d.SATA%d: Command timeout\n", node, controller);
+ bdk_sata_shutdown(node, controller);
+ return -1;
+ }
+
+ /* Wait for completion */
+ if (BDK_CSR_WAIT_FOR_FIELD(node,BDK_SATAX_UAHC_P0_IS(controller), dhrs | c.s.pss | c.s.dss, !=, 0, TIMEOUT))
+ {
+ bdk_error("N%d.SATA%d: Command Response timeout\n", node, controller);
+ bdk_sata_shutdown(node, controller);
+ return -1;
+ }
+
+ /* Read status */
+ BDK_CSR_INIT(p_is, node, BDK_SATAX_UAHC_P0_IS(controller));
+ if (p_is.s.tfes)
+ {
+ bdk_error("N%d.SATA%d: Task-file error\n", node, controller);
+ bdk_sata_shutdown(node, controller);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Identify the SATA device connected to a controller
+ *
+ * @param node Node to query
+ * @param controller Controller to query
+ * @param port Which SATA port on the controller, zero based
+ *
+ * @return Size of the disk in bytes
+ */
+uint64_t bdk_sata_identify(bdk_node_t node, int controller, int port)
+{
+ if (!__bdk_sata_is_initialized(node, controller))
+ {
+ if (bdk_sata_initialize(node, controller))
+ return 0;
+ }
+
+ const int TIMEOUT = 1000000; /* 1 seconds */
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_SATAX_UAHC_P0_SSTS(controller), ipm, !=, 0, TIMEOUT))
+ {
+ bdk_error("N%d.SATA%d: Device not present or communication not established\n", node, controller);
+ return 0;
+ }
+
+ /* Read the Serial ATA Status */
+ BDK_CSR_INIT(ssts, node, BDK_SATAX_UAHC_P0_SSTS(controller));
+
+ /* Check the link power state */
+ switch (ssts.s.ipm)
+ {
+ case 0: /* Device not present or communication not established */
+ BDK_TRACE(SATA, "N%d.SATA%d: Device not present or communication not established\n", node, controller);
+ return 0;
+ case 1: /* Interface in active state */
+ BDK_TRACE(SATA, "N%d.SATA%d: Interface in active state\n", node, controller);
+ break;
+ case 2: /* Interface in Partial power management state */
+ BDK_TRACE(SATA, "N%d.SATA%d: Interface in Partial power management state\n", node, controller);
+ return 0;
+ case 6: /* Interface in Slumber power management state */
+ BDK_TRACE(SATA, "N%d.SATA%d: Interface in Slumber power management state\n", node, controller);
+ return 0;
+ case 8: /* Interface in DevSleep power management state */
+ BDK_TRACE(SATA, "N%d.SATA%d: Interface in DevSleep power management state\n", node, controller);
+ return 0;
+ default:
+ BDK_TRACE(SATA, "N%d.SATA%d: Interface in unknown power state %d\n", node, controller, ssts.s.ipm);
+ return 0;
+ }
+
+ /* Check the link speed */
+ switch (ssts.s.spd)
+ {
+ case 0: /* Device not present or communication not established */
+ BDK_TRACE(SATA, "N%d.SATA%d: Device not present or communication not established\n", node, controller);
+ return 0;
+ case 1:
+ case 2:
+ case 3:
+ BDK_TRACE(SATA, "N%d.SATA%d: Speed Gen%d\n", node, controller, ssts.s.spd);
+ break;
+ default:
+ BDK_TRACE(SATA, "N%d.SATA%d: Interface in unknown speed %d\n", node, controller, ssts.s.spd);
+ return 0;
+ }
+
+ /* Check the device detection */
+ switch (ssts.s.det)
+ {
+ case 0: /* No device detected and Phy communication not established */
+ BDK_TRACE(SATA, "N%d.SATA%d: No device detected and Phy communication not established\n", node, controller);
+ return 0;
+ case 1: /* Device presence detected but Phy communication not established */
+ BDK_TRACE(SATA, "N%d.SATA%d: Device presence detected but Phy communication not established\n", node, controller);
+ return 0;
+ case 3: /* Device presence detected and Phy communication established */
+ BDK_TRACE(SATA, "N%d.SATA%d: Device presence detected and Phy communication established\n", node, controller);
+ break;
+ case 4: /* Phy in offline mode as a result of the interface being disabled or running in a BIST loopback mode */
+ BDK_TRACE(SATA, "N%d.SATA%d: Phy in offline mode\n", node, controller);
+ return 0;
+ default:
+ BDK_TRACE(SATA, "N%d.SATA%d: Device presence in unknown state %d\n", node, controller, ssts.s.det);
+ return 0;
+ }
+
+ /* Read the port signature to identify the device type */
+ BDK_CSR_INIT(sig, node, BDK_SATAX_UAHC_P0_SIG(controller));
+ switch (sig.s.sig)
+ {
+ case 0x00000101: /* SATA_SIG_ATA 0x00000101, SATA drive */
+ BDK_TRACE(SATA, "N%d.SATA%d: SATA drive\n", node, controller);
+ break;
+ case 0xEB140101: /* SATA_SIG_ATAPI 0xEB140101, SATAPI drive */
+ BDK_TRACE(SATA, "N%d.SATA%d: ATAPI drive, not supported by the BDK\n", node, controller);
+ return 0;
+ case 0xC33C0101: /* SATA_SIG_SEMB 0xC33C0101, Enclosure management bridge */
+ BDK_TRACE(SATA, "N%d.SATA%d: Enclosure management bridge, not supported by the BDK\n", node, controller);
+ return 0;
+ case 0x96690101: /* SATA_SIG_PM 0x96690101, Port multiplier */
+ BDK_TRACE(SATA, "N%d.SATA%d: Port multiplier, not supported by the BDK\n", node, controller);
+ return 0;
+ default: /* Just assume it is a drive */
+ BDK_TRACE(SATA, "N%d.SATA%d: Unknown signature 0x%08x, assuming a SATA drive\n", node, controller, sig.u);
+ break;
+ }
+
+ /* Send identify to the device */
+ const int ATA_CMD_IDENTIFY = 0xec;
+ char buffer[512];
+ if (issue_command(node, controller, ATA_CMD_IDENTIFY, 0, 0, buffer, sizeof(buffer)))
+ return 0;
+
+ /* Extract the data out of the IDENTIFY response */
+ uint16_t *ptr = (uint16_t *)buffer;
+ uint64_t sectors = bdk_le16_to_cpu(ptr[57]);
+ sectors += (uint32_t)bdk_le16_to_cpu(ptr[58]) << 16;
+ char serial[20 + 1];
+ get_ide_string(serial, ptr + 10, 20);
+ char firmware[8 + 1];
+ get_ide_string(firmware, ptr + 23, 8);
+ char model[40 + 1];
+ get_ide_string(model, ptr + 27, 40);
+
+ printf("N%d.SATA%d: Model=\"%s\", Firmware=\"%s\", Serial=\"%s\", Sectors=%lu, Link=Gen%d\n",
+ node, controller, model, firmware, serial, sectors, ssts.s.spd);
+
+ /* Return size in bytes */
+ return sectors * 512;
+}
+
+/**
+ * Read data from a SATA device
+ *
+ * @param node Node the controller is on
+ * @param controller Which controller
+ * @param port Which port on the controller, zero based
+ * @param lba 48 bit Block address to read
+ * @param sectors Number of 512 bytes sectors to read
+ * @param buffer Buffer to receive the data. Must be at least 512 * sectors in size
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sata_read(bdk_node_t node, int controller, int port, uint64_t lba, int sectors, void *buffer)
+{
+ if (!__bdk_sata_is_initialized(node, controller))
+ {
+ if (bdk_sata_initialize(node, controller))
+ return -1;
+ }
+
+ const int ATA_READ_DMA = 0xc8;
+ if (issue_command(node, controller, ATA_READ_DMA, 0, lba, buffer, sectors * 512))
+ return -1;
+ return 0;
+}
+
+/**
+ * Write data to a SATA device
+ *
+ * @param node Node the controller is on
+ * @param controller Which controller
+ * @param port Which port on the controller, zero based
+ * @param lba 48 bit Block address to write
+ * @param sectors Number of 512 bytes sectors to write
+ * @param buffer Data buffer to write. Must be at least 512 * sectors in size
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sata_write(bdk_node_t node, int controller, int port, uint64_t lba, int sectors, const void *buffer)
+{
+ if (!__bdk_sata_is_initialized(node, controller))
+ {
+ if (bdk_sata_initialize(node, controller))
+ return -1;
+ }
+
+ const int ATA_WRITE_DMA = 0xca;
+ if (issue_command(node, controller, ATA_WRITE_DMA, 1, lba, (void*)buffer, sectors * 512))
+ return -1;
+ return 0;
+}
+
+/**
+ * Enter one of the SATA pattern generation / loop testing modes
+ *
+ * @param node Node to access
+ * @param controller SATA controller to access
+ * @param port Which port on the controller
+ * @param mode Test mode to enter
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_sata_bist_fis(bdk_node_t node, int controller, int port, bdk_sata_bist_fis_t mode)
+{
+ if (!__bdk_sata_is_initialized(node, controller))
+ {
+ if (bdk_sata_initialize(node, controller))
+ return -1;
+ }
+
+ /* Select the port we're doing BIST loopback on */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_TESTR(controller),
+ c.s.psel = port);
+
+ /* Select the pattern */
+ int pattern;
+ switch (mode)
+ {
+ case BDK_SATA_BIST_SW_TX_ONLY_SSOP:
+ case BDK_SATA_BIST_SW_TX_ONLY_HTDP:
+ case BDK_SATA_BIST_SW_TX_ONLY_LTDP:
+ case BDK_SATA_BIST_SW_TX_ONLY_LFSCP:
+ case BDK_SATA_BIST_SW_TX_ONLY_COMP:
+ case BDK_SATA_BIST_SW_TX_ONLY_LBP:
+ case BDK_SATA_BIST_SW_TX_ONLY_MFTP:
+ case BDK_SATA_BIST_SW_TX_ONLY_HFTP:
+ case BDK_SATA_BIST_SW_TX_ONLY_LFTP:
+ pattern = mode - BDK_SATA_BIST_SW_TX_ONLY_SSOP;
+ break;
+ default:
+ pattern = 1; /* HTDP */
+ break;
+ }
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_BISTCR(controller),
+ c.s.pattern = pattern);
+
+ /*
+ Note from the Synopsys SATA bist training video on pattern generation
+ without using BIST FIS.
+
+ 1) Far-end Re-timed Loopback Responder Mode (Software Initiated)
+
+ In this mode the host controller receives the pattern and transmits
+ it back out. The setup of the mode is done by software, so no BIST FIS
+ frames are needed. After software sets it up, any pattern generator
+ should be able to send a pattern and get it back.
+
+ Setup:
+ 1) Write SATAX_UAHC_GBL_BISTCR.ferlib = 1
+ 2) Connect pattern generator
+ 3) Pattern generator must send ALIGNs for PHY sync up
+ 4) Pattern should be looped back out
+
+ 2) Far-end Transmit Only Responder Mode (Software Initiated)
+
+ In this mode the host controller sends a transmit pattern and ignores
+ all input. This is useful for checking the TX eye diagram without an
+ external pattern generator.
+
+ Setup:
+ 1) Write SATAX_UAHC_GBL_BISTCR.pattern to select the pattern.
+ 2) Write SATAX_UAHC_GBL_BISTCR.txo = 1.
+ 3) Host starts sending the requested BIST pattern.
+
+ BIST FIS Modes:
+ 1) Far-end Analog Loopback (F=1)
+ Far end loops the received pattern back to transmit without retiming
+ the symbols. This is optional in the SATA 3.0 spec.
+ 2) Far-end Retimed Loopback (L=1)
+ Far end loops the received pattern back to transmit after retiming
+ the symbols. This is mandatory in the SATA 3.0 spec.
+ 3) Far-end Transmit Only (T=1, with other bits)
+ Far end transits a pattern and ignores its input. This is optional
+ in the SATA 3.0 spec.
+ */
+ if (mode == BDK_SATA_BIST_SW_RETIMED)
+ {
+ /* No FIS, just enter local retimed loopback */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_BISTCR(controller),
+ c.s.ferlib = 1);
+ BDK_TRACE(SATA, "N%d.SATA%d: Started Retimed loopback\n", node, controller);
+ return 0;
+ }
+ else if ((mode >= BDK_SATA_BIST_SW_TX_ONLY_SSOP) && (mode <= BDK_SATA_BIST_SW_TX_ONLY_LFTP))
+ {
+ /* No FIS, just enter local transit only */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_BISTCR(controller),
+ c.s.txo = 1);
+ BDK_TRACE(SATA, "N%d.SATA%d: Started tranmsit only\n", node, controller);
+ return 0;
+ }
+
+ /* Issue a BIST FIS command */
+
+ /* Pick a command slot to use */
+ int slot = 0;
+ hba_cmd_header_t *cmd_header = bdk_phys_to_ptr(BDK_CSR_READ(node, BDK_SATAX_UAHC_P0_CLB(controller)));
+ cmd_header += slot;
+
+ /* Build a command table with the command to execute */
+ hba_cmd_tbl_t cmd_table BDK_CACHE_LINE_ALIGNED;
+ memset(&cmd_table, 0, sizeof(hba_cmd_tbl_t));
+
+ /* The actual BIST FIS command */
+ fis_bist_t *bist_fis = (fis_bist_t *)cmd_table.cfis;
+ bist_fis->fis_type = FIS_TYPE_BIST;
+ switch (mode)
+ {
+ case BDK_SATA_BIST_FIS_RETIMED: /* Send FIS to tell device to enter Retimed loopback */
+ bist_fis->l = 1;
+ break;
+ case BDK_SATA_BIST_FIS_ANALOG: /* Send FIS to tell device to enter Analog loopback */
+ bist_fis->f = 1;
+ break;
+ case BDK_SATA_BIST_FIS_TX_ONLY: /* Send FIS to tell device to transit only */
+ bist_fis->t = 1;
+ break;
+ default:
+ bdk_error("Invalid SATA BIST FIS mode %d\n", mode);
+ return -1;
+ }
+
+ /* Setup the command header */
+ memset(cmd_header, 0, sizeof(hba_cmd_header_t));
+ cmd_header->cfl = sizeof(fis_bist_t) / 4;
+ cmd_header->b = 1;
+ cmd_header->ctba = bdk_ptr_to_phys(&cmd_table);
+
+ BDK_WMB;
+
+ /* Check that the slot is idle */
+ BDK_CSR_INIT(ci, node, BDK_SATAX_UAHC_P0_CI(controller));
+ if (ci.u & (1<<slot))
+ {
+ bdk_error("N%d.SATA%d: Command slot busy before submit\n", node, controller);
+ return -1;
+ }
+
+ /* Clear all status bits */
+ BDK_CSR_WRITE(node, BDK_SATAX_UAHC_P0_IS(controller), -1);
+ BDK_CSR_READ(node, BDK_SATAX_UAHC_P0_IS(controller));
+
+ /* Issue command */
+ BDK_CSR_WRITE(node, BDK_SATAX_UAHC_P0_CI(controller), 1 << slot);
+ BDK_TRACE(SATA, "N%d.SATA%d: Sent BIST FIS\n", node, controller);
+
+ return 0;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-twsi.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-twsi.c
deleted file mode 100644
index 4fbb78a876..0000000000
--- a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-twsi.c
+++ /dev/null
@@ -1,318 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-#include <bdk.h>
-#include "libbdk-arch/bdk-csrs-mio_tws.h"
-
-#define RECOVERY_UDELAY 5
-#define RECOVERY_CLK_CNT 9
-#define ARBLOST_UDELAY 5000 /* 5ms */
-
-/* This code is an optional part of the BDK. It is only linked in
- if BDK_REQUIRE() needs it */
-BDK_REQUIRE_DEFINE(TWSI);
-
-/**
- * Initialize the TWSI blocks. This just sets the clock rate.
- * Many times stuff will work without calling this, but some
- * TWSI devices will fail. This is normally called automatically
- * in bdk-init-main.c.
- *
- * @return Zero on success, negative on failure
- */
-int bdk_twsix_initialize(bdk_node_t node)
-{
- const int TWSI_BUS_FREQ = 100000; /* 100 KHz */
- const int TWSI_THP = 24; /* TCLK half period (default 24) */
- const int io_clock_hz = bdk_clock_get_rate(node, BDK_CLOCK_SCLK);
- int N_divider;
- int M_divider;
-
- /* Set the TWSI clock to a conservative TWSI_BUS_FREQ. Compute the
- clocks M divider based on the SCLK.
- TWSI freq = (core freq) / (20 x (M+1) x (thp+1) x 2^N)
- M = ((core freq) / (20 x (TWSI freq) x (thp+1) x 2^N)) - 1 */
- for (N_divider = 0; N_divider < 8; N_divider++)
- {
- M_divider = (io_clock_hz / (20 * TWSI_BUS_FREQ * (TWSI_THP + 1) * (1 << N_divider))) - 1;
- if (M_divider < 16)
- break;
- }
-
- BDK_CSR_DEFINE(sw_twsi, BDK_MIO_TWSX_SW_TWSI(bus));
- sw_twsi.u = 0;
- sw_twsi.s.v = 1; /* Clear valid bit */
- sw_twsi.s.op = 0x6; /* See EOP field */
- sw_twsi.s.r = 0; /* Select CLKCTL when R = 0 */
- sw_twsi.s.eop_ia = 3; /* R=0 selects CLKCTL, R=1 selects STAT */
- sw_twsi.s.data = ((M_divider & 0xf) << 3) | ((N_divider & 0x7) << 0);
-
- int num_busses = 2;
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
- num_busses = 6;
-
- for (int bus = 0; bus < num_busses; bus++)
- {
- /* Only init non-slave ports */
- BDK_CSR_INIT(state, node, BDK_MIO_TWSX_SW_TWSI(bus));
- if (!state.s.slonly)
- BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI(bus), sw_twsi.u);
- }
- return 0;
-}
-
-/**
- * Do a twsi bus recovery in the case when the last transaction
- * on the bus has been left unfinished.
- *
- * @param twsi_id which TWSI bus to use
- */
-static void bdk_twsix_recover_bus(bdk_node_t node, int twsi_id)
-{
- /* read TWSX_INT */
- BDK_CSR_INIT(twsx_int, node, BDK_MIO_TWSX_INT(twsi_id));
-
- for (int i = 0; i < RECOVERY_CLK_CNT * 2; i++)
- {
- if (!twsx_int.s.scl_ovr)
- {
- /* SCL shouldn't be low here */
- if (!twsx_int.s.scl)
- {
- bdk_error("N%d.TWSI%d: SCL is stuck low\n", node, twsi_id);
- return;
- }
-
- /* Break if SDA is high */
- if (twsx_int.s.sda)
- break;
- }
-
- twsx_int.s.scl_ovr = !twsx_int.s.scl_ovr;
- BDK_CSR_WRITE(node, BDK_MIO_TWSX_INT(twsi_id), twsx_int.u);
- bdk_wait_usec(RECOVERY_UDELAY);
- }
-
- /*
- * Generate STOP condition using the register overrides
- * in order to move the higher level controller out of
- * the bad state. This is a workaround for the TWSI hardware.
- */
- twsx_int.s.scl_ovr = 1;
- twsx_int.s.sda_ovr = 1;
- BDK_CSR_WRITE(node, BDK_MIO_TWSX_INT(twsi_id), twsx_int.u);
- bdk_wait_usec(RECOVERY_UDELAY);
- twsx_int.s.scl_ovr = 0;
- BDK_CSR_WRITE(node, BDK_MIO_TWSX_INT(twsi_id), twsx_int.u);
- bdk_wait_usec(RECOVERY_UDELAY);
- twsx_int.s.sda_ovr = 0;
- BDK_CSR_WRITE(node, BDK_MIO_TWSX_INT(twsi_id), twsx_int.u);
-}
-
-/**
- * Do a twsi read from a 7 bit device address using an (optional)
- * internal address. Up to 4 bytes can be read at a time.
- *
- * @param twsi_id which TWSI bus to use
- * @param dev_addr Device address (7 bit)
- * @param internal_addr
- * Internal address. Can be 0, 1 or 2 bytes in width
- * @param num_bytes Number of data bytes to read (1-4)
- * @param ia_width_bytes
- * Internal address size in bytes (0, 1, or 2)
- *
- * @return Read data, or -1 on failure
- */
-int64_t bdk_twsix_read_ia(bdk_node_t node, int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes)
-{
- bdk_mio_twsx_sw_twsi_t sw_twsi_val;
- bdk_mio_twsx_sw_twsi_ext_t twsi_ext;
- int retry_limit = 5;
-
- if (num_bytes < 1 || num_bytes > 4 || ia_width_bytes < 0 || ia_width_bytes > 2)
- return -1;
-retry:
- twsi_ext.u = 0;
- sw_twsi_val.u = 0;
- sw_twsi_val.s.v = 1;
- sw_twsi_val.s.r = 1;
- sw_twsi_val.s.sovr = 1;
- sw_twsi_val.s.size = num_bytes - 1;
- sw_twsi_val.s.addr = dev_addr;
-
- if (ia_width_bytes > 0)
- {
- sw_twsi_val.s.op = 1;
- sw_twsi_val.s.ia = (internal_addr >> 3) & 0x1f;
- sw_twsi_val.s.eop_ia = internal_addr & 0x7;
- if (ia_width_bytes == 2)
- {
- sw_twsi_val.s.eia = 1;
- twsi_ext.s.ia = internal_addr >> 8;
- BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u);
- }
- }
-
- BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u);
- if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_MIO_TWSX_SW_TWSI(twsi_id), v, ==, 0, 10000))
- {
- bdk_warn("N%d.TWSI%d: Timeout waiting for read to complete...start recovering process\n",
- node, twsi_id);
- /* perform bus recovery */
- bdk_twsix_recover_bus(node, twsi_id);
- if (retry_limit-- > 0)
- goto retry;
-
- bdk_error("N%d.TWSI%d: Timeout waiting for operation to complete\n", node, twsi_id);
- return -1;
- }
- sw_twsi_val.u = BDK_CSR_READ(node, BDK_MIO_TWSX_SW_TWSI(twsi_id));
- if (!sw_twsi_val.s.r)
- {
- /* Check the reason for the failure. We may need to retry to handle multi-master
- ** configurations.
- ** Lost arbitration : 0x38, 0x68, 0xB0, 0x78
- ** Core busy as slave: 0x80, 0x88, 0xA0, 0xA8, 0xB8, 0xC0, 0xC8
- */
- if (sw_twsi_val.s.data == 0x38
- || sw_twsi_val.s.data == 0x68
- || sw_twsi_val.s.data == 0xB0
- || sw_twsi_val.s.data == 0x78
- || sw_twsi_val.s.data == 0x80
- || sw_twsi_val.s.data == 0x88
- || sw_twsi_val.s.data == 0xA0
- || sw_twsi_val.s.data == 0xA8
- || sw_twsi_val.s.data == 0xB8
- || sw_twsi_val.s.data == 0xC8)
- {
- /*
- * One of the arbitration lost conditions is recognized.
- * The TWSI hardware has switched to the slave mode and
- * expects the STOP condition on the bus.
- * Make a delay before next retry.
- */
- bdk_wait_usec(ARBLOST_UDELAY);
- if (retry_limit-- > 0)
- goto retry;
- }
- /* For all other errors, return an error code */
- return -1;
- }
-
- return (sw_twsi_val.s.data & (0xFFFFFFFF >> (32 - num_bytes*8)));
-}
-
-
-/**
- * Write 1-8 bytes to a TWSI device using an internal address.
- *
- * @param twsi_id which TWSI interface to use
- * @param dev_addr TWSI device address (7 bit only)
- * @param internal_addr
- * TWSI internal address (0, 8, or 16 bits)
- * @param num_bytes Number of bytes to write (1-8)
- * @param ia_width_bytes
- * internal address width, in bytes (0, 1, 2)
- * @param data Data to write. Data is written MSB first on the twsi bus, and
- * only the lower num_bytes bytes of the argument are valid. (If
- * a 2 byte write is done, only the low 2 bytes of the argument is
- * used.
- *
- * @return Zero on success, -1 on error
- */
-int bdk_twsix_write_ia(bdk_node_t node, int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t data)
-{
- bdk_mio_twsx_sw_twsi_t sw_twsi_val;
- bdk_mio_twsx_sw_twsi_ext_t twsi_ext;
- int retry_limit = 5;
- int to;
-
- if (num_bytes < 1 || num_bytes > 8 || ia_width_bytes < 0 || ia_width_bytes > 2)
- return -1;
-
-retry:
- twsi_ext.u = 0;
- sw_twsi_val.u = 0;
- sw_twsi_val.s.v = 1;
- sw_twsi_val.s.sovr = 1;
- sw_twsi_val.s.size = num_bytes - 1;
- sw_twsi_val.s.addr = dev_addr;
- sw_twsi_val.s.data = 0xFFFFFFFF & data;
-
- if (ia_width_bytes > 0)
- {
- sw_twsi_val.s.op = 1;
- sw_twsi_val.s.ia = (internal_addr >> 3) & 0x1f;
- sw_twsi_val.s.eop_ia = internal_addr & 0x7;
- }
- if (ia_width_bytes == 2)
- {
- sw_twsi_val.s.eia = 1;
- twsi_ext.s.ia = internal_addr >> 8;
- }
- if (num_bytes > 4)
- twsi_ext.s.data = data >> 32;
-
- BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI_EXT(twsi_id), twsi_ext.u);
- BDK_CSR_WRITE(node, BDK_MIO_TWSX_SW_TWSI(twsi_id), sw_twsi_val.u);
- if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_MIO_TWSX_SW_TWSI(twsi_id), v, ==, 0, 10000))
- {
- bdk_warn("N%d.TWSI%d: Timeout waiting for write to complete...start recovering process\n",
- node, twsi_id);
- /* perform bus recovery */
- bdk_twsix_recover_bus(node, twsi_id);
- if (retry_limit-- > 0)
- goto retry;
-
- // After retry but still not success, report error and return
- bdk_error("N%d.TWSI%d: Timeout waiting for operation to complete\n", node, twsi_id);
- return -1;
- }
-
- /* Poll until reads succeed, or polling times out */
- to = 100;
- while (to-- > 0)
- {
- if (bdk_twsix_read_ia(node, twsi_id, dev_addr, 0, 1, 0) >= 0)
- break;
- }
- if (to <= 0)
- return -1;
-
- return 0;
-}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/bdk-usb.c b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-usb.c
new file mode 100644
index 0000000000..eb2a85fa0d
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/bdk-usb.c
@@ -0,0 +1,683 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <libbdk-arch/bdk-csrs-gpio.h>
+#include <libbdk-arch/bdk-csrs-usbdrd.h>
+#include <libbdk-arch/bdk-csrs-usbh.h>
+#include <libbdk-hal/bdk-usb.h>
+#include <libbdk-hal/bdk-config.h>
+
+/* This code is an optional part of the BDK. It is only linked in
+ if BDK_REQUIRE() needs it */
+BDK_REQUIRE_DEFINE(USB);
+
+/**
+ * Write to DWC3 indirect debug control register
+ *
+ * @param node Node to write to
+ * @param usb_port USB port to write to
+ * @param val 32bit value to write
+ */
+static void write_cr_dbg_cfg(bdk_node_t node, int usb_port, uint64_t val)
+{
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ BDK_CSR_WRITE(node, BDK_USBDRDX_UCTL_PORTX_CR_DBG_CFG(usb_port, 0), val);
+ else
+ BDK_CSR_WRITE(node, BDK_USBHX_UCTL_PORTX_CR_DBG_CFG(usb_port, 0), val);
+}
+
+/**
+ * Poll the DWC3 internal status until the ACK bit matches a desired value. Return
+ * the final status.
+ *
+ * @param node Node to query
+ * @param usb_port USB port to query
+ * @param desired_ack
+ * Desired ACK bit state
+ *
+ * @return Final status with ACK at correct state
+ */
+static bdk_usbdrdx_uctl_portx_cr_dbg_status_t get_cr_dbg_status(bdk_node_t node, int usb_port, int desired_ack)
+{
+ const int TIMEOUT = 1000000; /* 1 sec */
+ bdk_usbdrdx_uctl_portx_cr_dbg_status_t status;
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ {
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_USBDRDX_UCTL_PORTX_CR_DBG_STATUS(usb_port, 0), ack, ==, desired_ack, TIMEOUT))
+ {
+ BDK_TRACE(USB_XHCI, "N%d.USB%d: Timeout waiting for indirect ACK\n", node, usb_port);
+ status.u = -1;
+ }
+ else
+ status.u = BDK_CSR_READ(node, BDK_USBDRDX_UCTL_PORTX_CR_DBG_STATUS(usb_port, 0));
+ }
+ else
+ {
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_USBHX_UCTL_PORTX_CR_DBG_STATUS(usb_port, 0), ack, ==, desired_ack, TIMEOUT))
+ {
+ BDK_TRACE(USB_XHCI, "N%d.USB%d: Timeout waiting for indirect ACK\n", node, usb_port);
+ status.u = -1;
+ }
+ else
+ status.u = BDK_CSR_READ(node, BDK_USBHX_UCTL_PORTX_CR_DBG_STATUS(usb_port, 0));
+ }
+ return status;
+}
+
+/**
+ * Perform an indirect read of an internal register inside the DWC3 usb block
+ *
+ * @param node Node to read
+ * @param usb_port USB port to read
+ * @param addr Indirect register address
+ *
+ * @return Value of the indirect register
+ */
+static uint32_t dwc3_uphy_indirect_read(bdk_node_t node, int usb_port, uint32_t addr)
+{
+ bdk_usbdrdx_uctl_portx_cr_dbg_cfg_t dbg_cfg;
+ bdk_usbdrdx_uctl_portx_cr_dbg_status_t status;
+
+ /* See the CSR description for USBHX_UCTL_PORTX_CR_DBG_CFG, which describes
+ the steps implemented by this function */
+
+ dbg_cfg.u = 0;
+ dbg_cfg.s.data_in = addr;
+ write_cr_dbg_cfg(node, usb_port, dbg_cfg.u);
+
+ dbg_cfg.s.cap_addr = 1;
+ write_cr_dbg_cfg(node, usb_port, dbg_cfg.u);
+
+ status = get_cr_dbg_status(node, usb_port, 1);
+ if (status.u == (uint64_t)-1)
+ return 0xffffffff;
+
+ write_cr_dbg_cfg(node, usb_port, 0);
+ get_cr_dbg_status(node, usb_port, 0);
+
+ dbg_cfg.u = 0;
+ dbg_cfg.s.read = 1;
+ write_cr_dbg_cfg(node, usb_port, dbg_cfg.u);
+
+ status = get_cr_dbg_status(node, usb_port, 1);
+
+ write_cr_dbg_cfg(node, usb_port, 0);
+ get_cr_dbg_status(node, usb_port, 0);
+
+ return status.s.data_out;
+}
+
+/**
+ * Perform an indirect write of an internal register inside the DWC3 usb block
+ *
+ * @param node Node to write
+ * @param usb_port USB port to write
+ * @param addr Indirect register address
+ * @param value Value for write
+ */
+static void dwc3_uphy_indirect_write(bdk_node_t node, int usb_port, uint32_t addr, uint16_t value)
+{
+ bdk_usbdrdx_uctl_portx_cr_dbg_cfg_t dbg_cfg;
+
+ /* See the CSR description for USBHX_UCTL_PORTX_CR_DBG_CFG, which describes
+ the steps implemented by this function */
+
+ dbg_cfg.u = 0;
+ dbg_cfg.s.data_in = addr;
+ write_cr_dbg_cfg(node, usb_port, dbg_cfg.u);
+
+ dbg_cfg.s.cap_addr = 1;
+ write_cr_dbg_cfg(node, usb_port, dbg_cfg.u);
+
+ get_cr_dbg_status(node, usb_port, 1);
+
+ write_cr_dbg_cfg(node, usb_port, 0);
+ get_cr_dbg_status(node, usb_port, 0);
+
+ dbg_cfg.u = 0;
+ dbg_cfg.s.data_in = value;
+ write_cr_dbg_cfg(node, usb_port, dbg_cfg.u);
+
+ dbg_cfg.s.cap_data = 1;
+ write_cr_dbg_cfg(node, usb_port, dbg_cfg.u);
+
+ get_cr_dbg_status(node, usb_port, 1);
+
+ write_cr_dbg_cfg(node, usb_port, 0);
+ get_cr_dbg_status(node, usb_port, 0);
+
+ dbg_cfg.u = 0;
+ dbg_cfg.s.write = 1;
+ write_cr_dbg_cfg(node, usb_port, dbg_cfg.u);
+
+ get_cr_dbg_status(node, usb_port, 1);
+
+ write_cr_dbg_cfg(node, usb_port, 0);
+ get_cr_dbg_status(node, usb_port, 0);
+}
+
+/**
+ * Errata USB-29206 - The USB HS PLL in all 28nm devices has a
+ * design issue that may cause the VCO to lock up on
+ * initialization. The Synopsys VCO is designed with an even
+ * number of stages and no kick-start circuit, which makes us
+ * believe that there is no question a latched up
+ * (non-oscillating) state is possible. The workaround is to
+ * check the PLL lock bit, which is just based on a counter and
+ * will not set if the VCO is not oscillating, and if it's not
+ * set do a power down/power up cycle on the PLL, which tests
+ * have proven is much more likely to guarantee the VCO will
+ * start oscillating. Part of the problem appears to be that
+ * the normal init sequence holds the VCO in reset during the
+ * power up sequence, whereas the plain power up/down sequence
+ * does not, so the voltage changing may be helping the circuit
+ * to oscillate.
+ *
+ * @param node Node to check
+ * @param usb_port USB port to check
+ *
+ * @return Zero on success, negative on failure
+ */
+static int dwc3_uphy_check_pll(bdk_node_t node, int usb_port)
+{
+ /* Internal indirect register that reports if the phy PLL has lock. This will
+ be 1 if lock, 0 if no lock */
+ const int DWC3_INT_IND_PLL_LOCK_REG = 0x200b;
+ /* Internal indirect UPHY register that controls the power to the UPHY PLL */
+ const int DWC3_INT_IND_UPHY_PLL_PU = 0x2012;
+ /* Write enable bit for DWC3_INT_IND_PLL_POWER_CTL */
+ const int DWC3_INT_IND_UPHY_PLL_PU_WE = 0x20;
+ /* Power enable bit for DWC3_INT_IND_PLL_POWER_CTL */
+ const int DWC3_INT_IND_UPHY_PLL_PU_POWER_EN = 0x02;
+
+ uint32_t pll_locked = dwc3_uphy_indirect_read(node, usb_port, DWC3_INT_IND_PLL_LOCK_REG);
+ int retry_count = 0;
+ while (!pll_locked)
+ {
+ if (retry_count >= 3)
+ {
+ bdk_error("N%d.USB%d: USB2 PLL failed to lock\n", node, usb_port);
+ return -1;
+ }
+
+ retry_count++;
+ BDK_TRACE(USB_XHCI, "N%d.USB%d: USB2 PLL didn't lock, retry %d\n", node, usb_port, retry_count);
+
+ /* Turn on write enable for PLL power control */
+ uint32_t pwr_val = dwc3_uphy_indirect_read(node, usb_port, DWC3_INT_IND_UPHY_PLL_PU);
+ pwr_val |= DWC3_INT_IND_UPHY_PLL_PU_WE;
+ dwc3_uphy_indirect_write(node, usb_port, DWC3_INT_IND_UPHY_PLL_PU, pwr_val);
+
+ /* Power down the PLL */
+ pwr_val &= ~DWC3_INT_IND_UPHY_PLL_PU_POWER_EN;
+ dwc3_uphy_indirect_write(node, usb_port, DWC3_INT_IND_UPHY_PLL_PU, pwr_val);
+ bdk_wait_usec(1000);
+
+ /* Power on the PLL */
+ pwr_val |= DWC3_INT_IND_UPHY_PLL_PU_POWER_EN;
+ dwc3_uphy_indirect_write(node, usb_port, DWC3_INT_IND_UPHY_PLL_PU, pwr_val);
+ bdk_wait_usec(1000);
+
+ /* Check for PLL Lock again */
+ pll_locked = dwc3_uphy_indirect_read(node, usb_port, DWC3_INT_IND_PLL_LOCK_REG);
+ }
+ return 0;
+}
+
+/**
+ * Initialize the clocks for USB such that it is ready for a generic XHCI driver
+ *
+ * @param node Node to init
+ * @param usb_port Port to intialize
+ * @param clock_type Type of clock connected to the usb port
+ *
+ * @return Zero on success, negative on failure
+ */
+
+int bdk_usb_initialize(bdk_node_t node, int usb_port, bdk_usb_clock_t clock_type)
+{
+ int is_usbdrd = !CAVIUM_IS_MODEL(CAVIUM_CN88XX);
+
+ /* Perform the following steps to initiate a cold reset. */
+
+ /* 1. Wait for all voltages to reach a stable state. Ensure the
+ reference clock is up and stable.
+ a. If 3.3V is up first, 0.85V must be soon after (within tens of ms). */
+
+ /* 2. Wait for IOI reset to deassert. */
+
+ /* 3. If Over Current indication and/or Port Power Control features
+ are desired, program the GPIO CSRs appropriately.
+ a. For Over Current Indication, select a GPIO for the input and
+ program GPIO_USBH_CTL[SEL].
+ b. For Port Power Control, set one of
+ GPIO_BIT_CFG(0..19)[OUTPUT_SEL] = USBH_VBUS_CTRL. */
+
+ /* 4. Assert all resets:
+ a. UPHY reset: USBDRD(0..1)_UCTL_CTL[UPHY_RST] = 1
+ b. UAHC reset: USBDRD(0..1)_UCTL_CTL[UAHC_RST] = 1
+ c. UCTL reset: USBDRD(0..1)_UCTL_CTL[UCTL_RST] = 1 */
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.uphy_rst = 1;
+ c.s.uahc_rst = 1;
+ c.s.uctl_rst = 1);
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.uphy_rst = 1;
+ c.s.uahc_rst = 1;
+ c.s.uctl_rst = 1);
+ }
+
+ /* 5. Configure the controller clock:
+ a. Reset the clock dividers: USBDRD(0..1)_UCTL_CTL[H_CLKDIV_RST] = 1.
+ b. Select the controller clock frequency
+ USBDRD(0..1)_UCTL_CTL[H_CLKDIV] = desired value.
+ USBDRD(0..1)_UCTL_CTL[H_CLKDIV_EN] = 1 to enable the controller
+ clock.
+ Read USBDRD(0..1)_UCTL_CTL to ensure the values take effect.
+ c. Deassert the controller clock divider reset: USB-
+ DRD(0..1)_UCTL_CTL[H_CLKDIV_RST] = 0. */
+ uint64_t sclk_rate = bdk_clock_get_rate(node, BDK_CLOCK_SCLK);
+ uint64_t divider = (sclk_rate + 300000000-1) / 300000000;
+ /*
+ ** According to HRM Rules are:
+ ** - clock must be below 300MHz
+ ** USB3 full-rate requires 150 MHz or better
+ ** USB3 requires 125 MHz
+ ** USB2 full rate requires 90 MHz
+ ** USB2 requires 62.5 MHz
+ */
+ if (divider <= 1)
+ divider = 0;
+ else if (divider <= 2)
+ divider = 1;
+ else if (divider <= 4)
+ divider = 2;
+ else if (divider <= 6)
+ divider = 3;
+ else if (divider <= 8)
+ divider = 4;
+ else if (divider <= 16)
+ divider = 5;
+ else if (divider <= 24)
+ divider = 6;
+ else
+ divider = 7;
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.h_clkdiv_rst = 1);
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.h_clkdiv_sel = divider;
+ c.s.h_clk_en = 1);
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.h_clkdiv_rst = 0);
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.h_clkdiv_rst = 1);
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.h_clkdiv_sel = divider;
+ c.s.h_clk_en = 1);
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.h_clkdiv_rst = 0);
+ }
+ {
+ static bool printit[2] = {true,true};
+ if (printit[usb_port]) {
+ uint64_t fr_div;
+ if (divider < 5) fr_div = divider * 2;
+ else fr_div = 8 * (divider - 3);
+ uint64_t freq = (typeof(freq)) (sclk_rate / fr_div);
+ const char *token;
+ if (freq < 62500000ULL) token = "???Low";
+ else if (freq < 90000000ULL) token = "USB2";
+ else if (freq < 125000000ULL) token = "USB2 Full";
+ else if (freq < 150000000ULL) token = "USB3";
+ else token = "USB3 Full";
+ BDK_TRACE(USB_XHCI, "Freq %lld - %s\n",
+ (unsigned long long)freq, token);
+ printit[usb_port] = false;
+ }
+ }
+
+ /* 6. Configure the strap signals in USBDRD(0..1)_UCTL_CTL.
+ a. Reference clock configuration (see Table 31.2): USB-
+ DRD(0..1)_UCTL_CTL[REF_CLK_FSEL, MPLL_MULTIPLIER,
+ REF_CLK_SEL, REF_CLK_DIV2].
+ b. Configure and enable spread-spectrum for SuperSpeed:
+ USBDRD(0..1)_UCTL_CTL[SSC_RANGE, SSC_EN, SSC_REF_CLK_SEL].
+ c. Enable USBDRD(0..1)_UCTL_CTL[REF_SSP_EN].
+ d. Configure PHY ports:
+ USBDRD(0..1)_UCTL_CTL[USB*_PORT_PERM_ATTACH, USB*_PORT_DISABLE]. */
+ if (is_usbdrd)
+ {
+ int ref_clk_src = 0;
+ int ref_clk_fsel = 0x27;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX)) {
+ if (BDK_USB_CLOCK_SS_PAD_HS_PAD != clock_type) {
+ bdk_error("Node %d usb_port %d: usb clock type %d is invalid\n", node, usb_port, clock_type);
+ return -1;
+ }
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX)) {
+ switch (clock_type)
+ {
+ default:
+ bdk_error("Node %d usb_port %d: usb clock type %d is invalid\n", node, usb_port, clock_type);
+ return -1;
+ case BDK_USB_CLOCK_SS_PAD_HS_PAD : ref_clk_src = 2; break;
+ case BDK_USB_CLOCK_SS_REF0_HS_REF0 : ref_clk_src = 0; break; /* Superspeed and high speed use DLM/QLM ref clock 0 */
+ case BDK_USB_CLOCK_SS_REF1_HS_REF1 : ref_clk_src = 1; break; /* Superspeed and high speed use DLM/QLM ref clock 1 */
+ case BDK_USB_CLOCK_SS_PAD_HS_PLL : ref_clk_src = 6; ref_clk_fsel = 0x7; break; /* Superspeed uses PAD clock, high speed uses PLL ref clock */
+ case BDK_USB_CLOCK_SS_REF0_HS_PLL : ref_clk_src = 4; ref_clk_fsel = 0x7; break; /* Superspeed uses DLM/QLM ref clock 0, high speed uses PLL ref clock */
+ case BDK_USB_CLOCK_SS_REF1_HS_PLL: ref_clk_src = 5; ref_clk_fsel =0x7; break; /* Superspeed uses DLM/QLM ref clock 1, high speed uses PLL ref clock */
+ }
+ }
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.ref_clk_fsel = ref_clk_fsel;
+ c.s.mpll_multiplier = 0x19;
+ c.s.ref_clk_sel = ref_clk_src;
+ c.s.ref_clk_div2 = 0);
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.ssc_en = 1;
+ c.s.ssc_ref_clk_sel = 0);
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.ref_ssp_en = 1);
+ }
+ else
+ {
+ if (BDK_USB_CLOCK_SS_PAD_HS_PAD != clock_type) {
+ bdk_error("Node %d usb_port %d: usb clock type %d is invalid\n", node, usb_port, clock_type);
+ return -1;
+ }
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.ref_clk_fsel = 0x27;
+ c.s.mpll_multiplier = 0;
+ c.s.ref_clk_sel = 0;
+ c.s.ref_clk_div2 = 0);
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.ssc_en = 1;
+ c.s.ssc_ref_clk_sel = 0);
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.ref_ssp_en = 1);
+ }
+ /* Hardware default is for ports to be enabled and not perm attach. Don't
+ change it */
+
+ /* 7. The PHY resets in lowest-power mode. Power up the per-port PHY
+ logic by enabling the following:
+ a. USBDRD(0..1)_UCTL_CTL [HS_POWER_EN] if high-speed/full-speed/low-
+ speed functionality needed.
+ b. USBDRD(0..1)_UCTL_CTL [SS_POWER_EN] if SuperSpeed functionality
+ needed. */
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.hs_power_en = 1;
+ c.s.ss_power_en = 1);
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.hs_power_en = 1;
+ c.s.ss_power_en = 1);
+ }
+
+ /* 8. Wait 10 controller-clock cycles from step 5. for controller clock
+ to start and async FIFO to properly reset. */
+ bdk_wait_usec(1);
+
+ /* 9. Deassert UCTL and UAHC resets:
+ a. USBDRD(0..1)_UCTL_CTL[UCTL_RST] = 0
+ b. USBDRD(0..1)_UCTL_CTL[UAHC_RST] = 0
+ c. [optional] For port-power control:
+ - Set one of GPIO_BIT_CFG(0..47)[PIN_SEL] = USB0_VBUS_CTRLor USB1_VBUS_CTRL.
+ - Set USBDRD(0..1)_UCTL_HOST_CFG[PPC_EN] = 1 and USBDRD(0..1)_UCTL_HOST_CFG[PPC_ACTIVE_HIGH_EN] = 1.
+ - Wait for the external power management chip to power the VBUS.ional port-power control.
+ ]
+ d. You will have to wait 10 controller-clock cycles before accessing
+ any controller-clock-only registers. */
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.uctl_rst = 0);
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.uctl_rst = 0);
+ }
+ bdk_wait_usec(1);
+
+ int usb_gpio = bdk_config_get_int(BDK_CONFIG_USB_PWR_GPIO, node, usb_port);
+ int usb_polarity = bdk_config_get_int(BDK_CONFIG_USB_PWR_GPIO_POLARITY, node, usb_port);
+ if (-1 != usb_gpio) {
+ int gsrc = BDK_GPIO_PIN_SEL_E_USBX_VBUS_CTRL_CN88XX(usb_port);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX)) {
+ gsrc = BDK_GPIO_PIN_SEL_E_USBX_VBUS_CTRL_CN88XX(usb_port);
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX)) {
+ gsrc = BDK_GPIO_PIN_SEL_E_USBX_VBUS_CTRL_CN81XX(usb_port);
+ }
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX)) {
+ gsrc = BDK_GPIO_PIN_SEL_E_USBX_VBUS_CTRL_CN83XX(usb_port);}
+ else {
+ bdk_error("USB_VBUS_CTRL GPIO: unknown chip model\n");
+ }
+
+ BDK_CSR_MODIFY(c,node,BDK_GPIO_BIT_CFGX(usb_gpio),
+ c.s.pin_sel = gsrc;
+ c.s.pin_xor = (usb_polarity) ? 0 : 1;
+ );
+
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_HOST_CFG(usb_port),
+ c.s.ppc_en = 1;
+ c.s.ppc_active_high_en = 1);
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_HOST_CFG(usb_port),
+ c.s.ppc_en = 1;
+ c.s.ppc_active_high_en = 1);
+ }
+ bdk_wait_usec(100000);
+ }
+
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.uahc_rst = 0);
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.uahc_rst = 0);
+ }
+
+ bdk_wait_usec(100000);
+ bdk_wait_usec(1);
+
+ /* 10. Enable conditional coprocessor clock of UCTL by writing USB-
+ DRD(0..1)_UCTL_CTL[CSCLK_EN] = 1. */
+ if (is_usbdrd)
+ {
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ /* CN9XXX make coprocessor clock automatic */
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.cn83xx.csclk_en = 1);
+ }
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.csclk_en = 1);
+ }
+
+ /* 11. Set USBDRD(0..1)_UCTL_CTL[DRD_MODE] to 1 for device mode, 0 for
+ host mode. */
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.drd_mode = 0);
+ }
+
+ /* 12. Soft reset the UPHY and UAHC logic via the UAHC controls:
+ a. USBDRD(0..1)_UAHC_GUSB2PHYCFG(0)[PHYSOFTRST] = 1
+ b. USBDRD(0..1)_UAHC_GUSB3PIPECTL(0)[PHYSOFTRST] = 1
+ c. USBDRD(0..1)_UAHC_GCTL[CORESOFTRESET] = 1 */
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UAHC_GUSB2PHYCFGX(usb_port, 0),
+ c.s.physoftrst = 1);
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UAHC_GUSB3PIPECTLX(usb_port, 0),
+ c.s.physoftrst = 1);
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UAHC_GCTL(usb_port),
+ c.s.coresoftreset = 1);
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UAHC_GUSB2PHYCFGX(usb_port, 0),
+ c.s.physoftrst = 1);
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UAHC_GUSB3PIPECTLX(usb_port, 0),
+ c.s.physoftrst = 1);
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UAHC_GCTL(usb_port),
+ c.s.coresoftreset = 1);
+ }
+
+ /* 13. Program USBDRD(0..1)_UAHC_GCTL[PRTCAPDIR] to 0x2 for device mode
+ or 0x1 for host mode. */
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UAHC_GCTL(usb_port),
+ c.s.prtcapdir = 1);
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UAHC_GCTL(usb_port),
+ c.s.prtcapdir = 1);
+ }
+
+ /* 14. Wait 10us after step 13. for the PHY to complete its reset. */
+ bdk_wait_usec(10);
+
+ /* 15. Deassert UPHY reset: USBDRD(0..1)_UCTL_CTL[UPHY_RST] = 0. */
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UCTL_CTL(usb_port),
+ c.s.uphy_rst = 0);
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UCTL_CTL(usb_port),
+ c.s.uphy_rst = 0);
+ }
+
+ /* 16. Wait for at least 45us after step 15. for UPHY to output
+ stable PHYCLOCK. */
+ bdk_wait_usec(45);
+
+ /* Workround Errata USB-29206 */
+ if (dwc3_uphy_check_pll(node, usb_port))
+ return -1;
+
+ /* 17. Initialize any other strap signals necessary and make sure they
+ propagate by reading back the last register written.
+ a. UCTL
+ USBDRD(0..1)_UCTL_PORT0_CFG_*[*_TUNE]
+ USBDRD(0..1)_UCTL_PORT0_CFG_*[PCS_*]
+ USBDRD(0..1)_UCTL_PORT0_CFG_*[LANE0_TX_TERM_OFFSET]
+ USBDRD(0..1)_UCTL_PORT0_CFG_*[TX_VBOOST_LVL]
+ USBDRD(0..1)_UCTL__PORT0_CFG_*[LOS_BIAS]
+ USBDRD(0..1)_UCTL_HOST_CFG
+ USBDRD(0..1)_UCTL_SHIM_CFG
+ b. UAHC: only the following UAHC registers are accessible during
+ CoreSoftReset.
+ USBDRD(0..1)_UAHC_GCTL
+ USBDRD(0..1)_UAHC_GUCTL
+ USBDRD(0..1)_UAHC_GSTS
+ USBDRD(0..1)_UAHC_GUID
+ USBDRD(0..1)_UAHC_GUSB2PHYCFG(0)
+ USBDRD(0..1)_UAHC_GUSB3PIPECTL(0) */
+
+ /* 18. Release soft reset the UPHY and UAHC logic via the UAHC controls:
+ a. USBDRD(0..1)_UAHC_GUSB2PHYCFG(0)[PHYSOFTRST] = 0
+ b. USBDRD(0..1)_UAHC_GUSB3PIPECTL(0)[PHYSOFTRST] = 0
+ c. USBDRD(0..1)_UAHC_GCTL[CORESOFTRESET] = 0 */
+ if (is_usbdrd)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UAHC_GUSB2PHYCFGX(usb_port, 0),
+ c.s.physoftrst = 0);
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UAHC_GUSB3PIPECTLX(usb_port, 0),
+ c.s.physoftrst = 0);
+ BDK_CSR_MODIFY(c, node, BDK_USBDRDX_UAHC_GCTL(usb_port),
+ c.s.coresoftreset = 0);
+ }
+ else
+ {
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UAHC_GUSB2PHYCFGX(usb_port, 0),
+ c.s.physoftrst = 0);
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UAHC_GUSB3PIPECTLX(usb_port, 0),
+ c.s.physoftrst = 0);
+ BDK_CSR_MODIFY(c, node, BDK_USBHX_UAHC_GCTL(usb_port),
+ c.s.coresoftreset = 0);
+ }
+
+ /* 19. Configure the remaining UAHC_G* registers as needed, including
+ any that were not configured in step 17.-b. */
+
+ /* 20. Initialize the USB controller:
+ a. To initialize the UAHC as a USB host controller, the application
+ should perform the steps described in the xHCI specification
+ (UAHC_X* registers). The xHCI sequence starts with poll for a 0 in
+ USBDRD(0..1)_UAHC_USBSTS[CNR].
+ b. To initialize the UAHC as a device, the application should TBD. The
+ device initiation sequence starts with a device soft reset by
+ setting USBDRD(0..1)_UAHC_DCTL[CSFTRST] = 1 and wait for a read
+ operation to return 0. */
+ return 0;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/device/bdk-device.c b/src/vendorcode/cavium/bdk/libbdk-hal/device/bdk-device.c
new file mode 100644
index 0000000000..a8e65f061a
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/device/bdk-device.c
@@ -0,0 +1,721 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <string.h>
+#include "libbdk-arch/bdk-csrs-ap.h"
+#include "libbdk-arch/bdk-csrs-pccpf.h"
+#include "libbdk-hal/bdk-ecam.h"
+#include "libbdk-hal/device/bdk-device.h"
+#include "libbdk-hal/bdk-config.h"
+#include "libbdk-driver/bdk-driver.h"
+#include "libbdk-hal/bdk-utils.h"
+
+static struct bdk_driver_s *driver_list = NULL;
+
+#define DEVICE_GROW 64
+static bdk_device_t *device_list = NULL;
+static int device_list_count = 0;
+static int device_list_max = 0;
+
+/**
+ * Called to register a new driver with the bdk-device system. Drivers are probed
+ * and initialized as device are found for them. If devices have already been
+ * added before the driver was registered, the driver will be probed and
+ * initialized before this function returns.
+ *
+ * @param driver Driver functions
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_device_add_driver(struct bdk_driver_s *driver)
+{
+ driver->next = driver_list;
+ driver_list = driver;
+ BDK_TRACE(DEVICE, "Added driver for %08x\n", driver->id);
+ return 0;
+}
+
+/**
+ * Lookup the correct driver for a device
+ *
+ * @param device Device to lookup
+ *
+ * @return Driver, or NULL on failure
+ */
+static const bdk_driver_t *lookup_driver(const bdk_device_t *device)
+{
+ const bdk_driver_t *drv = driver_list;
+ while (drv)
+ {
+ if (drv->id == device->id)
+ return drv;
+ drv = drv->next;
+ }
+ return NULL;
+}
+
+/**
+ * Populate the fields of a new device from the ECAM
+ *
+ * @param device Device to populate
+ */
+static void populate_device(bdk_device_t *device)
+{
+ /* The default name may be replaced by the driver with something easier to read */
+ snprintf(device->name, sizeof(device->name), "N%d.E%d:%d:%d.%d",
+ device->node, device->ecam, device->bus, device->dev, device->func);
+
+ BDK_TRACE(DEVICE_SCAN, "%s: Populating device\n", device->name);
+
+ /* Get the current chip ID and pass. We'll need this to fill in version
+ information for the device */
+ bdk_ap_midr_el1_t midr_el1;
+ BDK_MRS(MIDR_EL1, midr_el1.u);
+
+ /* PCCPF_XXX_VSEC_SCTL[RID] with the revision of the chip,
+ read from fuses */
+ BDK_CSR_DEFINE(sctl, BDK_PCCPF_XXX_VSEC_SCTL);
+ sctl.u = bdk_ecam_read32(device, BDK_PCCPF_XXX_VSEC_SCTL);
+ sctl.s.rid = midr_el1.s.revision | (midr_el1.s.variant<<3);
+ sctl.s.node = device->node; /* Program node bits */
+ sctl.s.ea = bdk_config_get_int(BDK_CONFIG_PCIE_EA);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ sctl.s.ea = 0; /* EA is not supported on CN88XX pass 1.x */
+ else
+ sctl.s.ea = bdk_config_get_int(BDK_CONFIG_PCIE_EA);
+ bdk_ecam_write32(device, BDK_PCCPF_XXX_VSEC_SCTL, sctl.u);
+
+ /* Read the Device ID */
+ device->id = bdk_ecam_read32(device, BDK_PCCPF_XXX_ID);
+
+ /* Read the Device Type so we know how to handle BARs */
+ bdk_pccpf_xxx_clsize_t clsize;
+ clsize.u = bdk_ecam_read32(device, BDK_PCCPF_XXX_CLSIZE);
+ int isbridge = (clsize.s.hdrtype & 0x7f) == 1;
+
+ BDK_TRACE(DEVICE_SCAN, "%s: Device ID: 0x%08x%s\n", device->name, device->id,
+ (isbridge) ? " (Bridge)" : "");
+
+ /* Loop through all the BARs */
+ int max_bar = (isbridge) ? BDK_PCCPF_XXX_BAR0U : BDK_PCCPF_XXX_BAR4U;
+ int bar = BDK_PCCPF_XXX_BAR0L;
+ unsigned guess_instance = 0;
+ while (bar <= max_bar)
+ {
+ int bar_index = (bar - BDK_PCCPF_XXX_BAR0L) / 8;
+ /* Read the BAR address and config bits [3:0] */
+ uint64_t address = bdk_ecam_read32(device, bar);
+ int ismem = !(address & 1); /* Bit 0: 0 = mem, 1 = io */
+ int is64 = ismem && (address & 4); /* Bit 2: 0 = 32 bit, 1 = 64 bit if mem */
+ /* Bit 3: 1 = Is prefetchable. We on't care for now */
+
+ /* All internal BARs should be 64 bit. Skip if BAR isn't as that means
+ it is using Enhanced Allocation (EA) */
+ if (!is64)
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: BAR%d Disabled or EA bar skipped (0x%08llx)\n", device->name, bar_index, address);
+ bar += 8;
+ continue;
+ }
+
+ /* Get the upper part of 64bit BARs */
+ address |= (uint64_t)bdk_ecam_read32(device, bar + 4) << 32;
+
+ /* Write the bits to determine the size */
+ bdk_ecam_write32(device, bar, -1);
+ bdk_ecam_write32(device, bar + 4, -1);
+ uint64_t size_mask = (uint64_t)bdk_ecam_read32(device, bar + 4) << 32;
+ size_mask |= bdk_ecam_read32(device, bar);
+ /* Make sure the node bits are correct in the address */
+ address = (address & ~(3UL << 44)) | ((uint64_t)device->node << 44);
+ /* Restore address value */
+ bdk_ecam_write32(device, bar, address);
+ bdk_ecam_write32(device, bar + 4, address >> 32);
+
+ /* Convert the size into a power of 2 bits */
+ int size_bits = bdk_dpop(~size_mask | 0xf);
+ if (size_bits <= 4)
+ size_bits = 0;
+
+ /* Store the BAR info */
+ device->bar[bar_index].address = address & ~0xfull;
+ device->bar[bar_index].size2 = size_bits;
+ device->bar[bar_index].flags = address & 0xf;
+ BDK_TRACE(DEVICE_SCAN, "%s: BAR%d 0x%llx/%d flags=0x%x\n",
+ device->name, bar_index, device->bar[bar_index].address,
+ device->bar[bar_index].size2, device->bar[bar_index].flags);
+ /* Move to the next BAR */
+ bar += 8;
+ }
+
+ /* Walk the PCI capabilities looking for PCIe support and EA headers */
+ BDK_TRACE(DEVICE_SCAN, "%s: Walking PCI capabilites\n", device->name);
+ int has_pcie = 0;
+ bdk_pccpf_xxx_cap_ptr_t cap_ptr;
+ cap_ptr.u = bdk_ecam_read32(device, BDK_PCCPF_XXX_CAP_PTR);
+ int cap_loc = cap_ptr.s.cp;
+ while (cap_loc)
+ {
+ uint32_t cap = bdk_ecam_read32(device, cap_loc);
+ int cap_id = cap & 0xff;
+ int cap_next = (cap >> 8) & 0xff;
+
+ BDK_TRACE(DEVICE_SCAN, "%s: PCI Capability 0x%02x ID:0x%02x Next:0x%02x\n",
+ device->name, cap_loc, cap_id, cap_next);
+
+ if (cap_id == 0x10)
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: PCIe\n", device->name);
+ has_pcie = 1;
+ }
+ else if (cap_id == 0x01)
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: PCI Power Management Interface\n", device->name);
+ /* Do nothing for now */
+ }
+ else if (cap_id == 0x11)
+ {
+ bdk_pccpf_xxx_msix_cap_hdr_t msix_cap_hdr;
+ bdk_pccpf_xxx_msix_table_t msix_table;
+ bdk_pccpf_xxx_msix_pba_t msix_pba;
+ msix_cap_hdr.u = cap;
+ msix_table.u = bdk_ecam_read32(device, cap_loc + 4);
+ msix_pba.u = bdk_ecam_read32(device, cap_loc + 8);
+ BDK_TRACE(DEVICE_SCAN, "%s: MSI-X Entries:%d, Func Mask:%d, Enable:%d\n",
+ device->name, msix_cap_hdr.s.msixts + 1, msix_cap_hdr.s.funm, msix_cap_hdr.s.msixen);
+ BDK_TRACE(DEVICE_SCAN, "%s: Table BAR%d, Offset:0x%x\n",
+ device->name, msix_table.s.msixtbir, msix_table.s.msixtoffs * 8);
+ BDK_TRACE(DEVICE_SCAN, "%s: PBA BAR%d, Offset:0x%x\n",
+ device->name, msix_pba.s.msixpbir, msix_pba.s.msixpoffs * 8);
+ }
+ else if (cap_id == 0x05)
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: MSI\n", device->name);
+ /* Do nothing for now */
+ }
+ else if (cap_id == 0x14)
+ {
+ bdk_pccpf_xxx_ea_cap_hdr_t ea_cap_hdr;
+ ea_cap_hdr.u = cap;
+ cap_loc += 4;
+ BDK_TRACE(DEVICE_SCAN, "%s: Enhanced Allocation, %d entries\n",
+ device->name, ea_cap_hdr.s.num_entries);
+ if (isbridge)
+ {
+ cap = bdk_ecam_read32(device, cap_loc);
+ cap_loc += 4;
+ int fixed_secondary_bus = cap & 0xff;
+ int fixed_subordinate_bus = cap & 0xff;
+ BDK_TRACE(DEVICE_SCAN, "%s: Fixed Secondary Bus:0x%02x Fixed Subordinate Bus:0x%02x\n",
+ device->name, fixed_secondary_bus, fixed_subordinate_bus);
+ }
+ for (int entry = 0; entry < ea_cap_hdr.s.num_entries; entry++)
+ {
+ union bdk_pcc_ea_entry_s ea_entry;
+ memset(&ea_entry, 0, sizeof(ea_entry));
+ uint32_t *ptr = (uint32_t *)&ea_entry;
+ *ptr++ = bdk_ecam_read32(device, cap_loc);
+#if __BYTE_ORDER == __BIG_ENDIAN
+ /* For big endian we actually need the previous data
+ shifted 32 bits */
+ *ptr = ptr[-1];
+#endif
+ asm volatile ("" ::: "memory"); /* Needed by gcc 5.0 to detect aliases on ea_entry */
+ int entry_size = ea_entry.s.entry_size;
+ for (int i = 0; i < entry_size; i++)
+ {
+ *ptr++ = bdk_ecam_read32(device, cap_loc + 4*i + 4);
+ }
+#if __BYTE_ORDER == __BIG_ENDIAN
+ /* The upper and lower 32bits need to be swapped */
+ ea_entry.u[0] = (ea_entry.u[0] >> 32) | (ea_entry.u[0] << 32);
+ ea_entry.u[1] = (ea_entry.u[1] >> 32) | (ea_entry.u[1] << 32);
+ ea_entry.u[2] = (ea_entry.u[2] >> 32) | (ea_entry.u[2] << 32);
+#endif
+ asm volatile ("" ::: "memory"); /* Needed by gcc 5.0 to detect aliases on ea_entry */
+ BDK_TRACE(DEVICE_SCAN, "%s: Enable:%d Writeable:%d Secondary Prop:0x%02x Primary Prop:0x%02x BEI:%d Size:%d\n",
+ device->name, ea_entry.s.enable, ea_entry.s.w, ea_entry.s.sec_prop, ea_entry.s.pri_prop, ea_entry.s.bei, ea_entry.s.entry_size);
+ if (ea_entry.s.entry_size > 0)
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: Base:0x%08x 64bit:%d\n",
+ device->name, ea_entry.s.basel << 2, ea_entry.s.base64);
+ }
+ if (ea_entry.s.entry_size > 1)
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: MaxOffset:0x%08x 64bit:%d\n",
+ device->name, (ea_entry.s.offsetl << 2) | 3, ea_entry.s.offset64);
+ }
+ if (ea_entry.s.entry_size > 2)
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: BaseUpper:0x%08x\n",
+ device->name, ea_entry.s.baseh);
+ }
+ if (ea_entry.s.entry_size > 3)
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: MaxOffsetUpper:0x%08x\n",
+ device->name, ea_entry.s.offseth);
+ }
+ if (ea_entry.s.enable)
+ {
+ uint64_t base = (uint64_t)ea_entry.s.baseh << 32;
+ base |= (uint64_t)ea_entry.s.basel << 2;
+ /* Make sure the node bits are correct in the address */
+ base = (base & ~(3UL << 44)) | ((uint64_t)device->node << 44);
+ uint64_t offset = (uint64_t)ea_entry.s.offseth << 32;
+ offset |= ((uint64_t)ea_entry.s.offsetl << 2) | 3;
+ switch (ea_entry.s.bei)
+ {
+ case 0: /* BAR 0 */
+ case 2: /* BAR 1 */
+ case 4: /* BAR 2 */
+ {
+ int bar_index = ea_entry.s.bei/2;
+ device->bar[bar_index].address = base;
+ device->bar[bar_index].size2 = bdk_dpop(offset);
+ device->bar[bar_index].flags = ea_entry.s.base64 << 2;
+ BDK_TRACE(DEVICE_SCAN, "%s: Updated BAR%d 0x%llx/%d flags=0x%x\n",
+ device->name, bar_index, device->bar[bar_index].address,
+ device->bar[bar_index].size2, device->bar[bar_index].flags);
+ if (0 == ea_entry.s.bei) {
+ /* PEMs eg PCIEEP and PCIERC do not have instance id
+ ** We can calculate it for PCIERC based on BAR0 allocation.
+ ** PCIEEP will be dropped by probe
+ */
+ guess_instance = (device->bar[bar_index].address >> 24) & 7;
+ }
+ break;
+ }
+ case 9: /* SR-IOV BAR 0 */
+ case 11: /* SR-IOV BAR 1 */
+ case 13: /* SR-IOV BAR 2 */
+ // FIXME
+ break;
+ }
+ }
+ cap_loc += ea_entry.s.entry_size * 4 + 4;
+ }
+ }
+ else
+ {
+ /* Unknown PCI capability */
+ bdk_warn("%s: ECAM device unknown PCI capability 0x%x\n", device->name, cap_id);
+ }
+ cap_loc = cap_next;
+ }
+
+ /* Walk the PCIe capabilities looking for instance header */
+ if (has_pcie)
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: Walking PCIe capabilites\n", device->name);
+ cap_loc = 0x100;
+ while (cap_loc)
+ {
+ uint32_t cap = bdk_ecam_read32(device, cap_loc);
+ int cap_id = cap & 0xffff;
+ int cap_ver = (cap >> 16) & 0xf;
+ int cap_next = cap >> 20;
+ BDK_TRACE(DEVICE_SCAN, "%s: PCIe Capability 0x%03x ID:0x%04x Version:0x%x Next:0x%03x\n",
+ device->name, cap_loc, cap_id, cap_ver, cap_next);
+ if (cap_id == 0xe)
+ {
+ /* ARI. Do nothing for now */
+ BDK_TRACE(DEVICE_SCAN, "%s: ARI\n", device->name);
+ }
+ else if (cap_id == 0xb)
+ {
+ /* Vendor specific*/
+ int vsec_id = bdk_ecam_read32(device, cap_loc + 4);
+ int vsec_id_id = vsec_id & 0xffff;
+ int vsec_id_rev = (vsec_id >> 16) & 0xf;
+ int vsec_id_len = vsec_id >> 20;
+ BDK_TRACE(DEVICE_SCAN, "%s: Vendor ID: 0x%04x Rev: 0x%x Size 0x%03x\n",
+ device->name, vsec_id_id, vsec_id_rev, vsec_id_len);
+ switch (vsec_id_id)
+ {
+ case 0x0001: /* RAS Data Path */
+ BDK_TRACE(DEVICE_SCAN, "%s: Vendor RAS Data Path\n", device->name);
+ break;
+
+ case 0x0002: /* RAS DES */
+ BDK_TRACE(DEVICE_SCAN, "%s: Vendor RAS DES\n", device->name);
+ break;
+
+ case 0x00a0: /* Cavium common */
+ case 0x00a1: /* Cavium CN88XX */
+ case 0x00a2: /* Cavium CN81XX */
+ case 0x00a3: /* Cavium CN83XX */
+ if ((vsec_id_rev == 1) || (vsec_id_rev == 2))
+ {
+ int vsec_ctl = bdk_ecam_read32(device, cap_loc + 8);
+ int vsec_ctl_inst_num = vsec_ctl & 0xff;
+ int vsec_ctl_subnum = (vsec_ctl >> 8) & 0xff;
+ BDK_TRACE(DEVICE_SCAN, "%s: Cavium Instance: 0x%02x Static Bus: 0x%02x\n",
+ device->name, vsec_ctl_inst_num, vsec_ctl_subnum);
+ int vsec_sctl = bdk_ecam_read32(device, cap_loc + 12);
+ int vsec_sctl_rid = (vsec_sctl >> 16) & 0xff;
+ if (vsec_id_rev == 2)
+ {
+ int vsec_sctl_pi = (vsec_sctl >> 24) & 0xff; /* Only in Rev 2 */
+ BDK_TRACE(DEVICE_SCAN, "%s: Revision ID: 0x%02x Programming Interface: 0x%02x\n",
+ device->name, vsec_sctl_rid, vsec_sctl_pi);
+ }
+ else
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: Revision ID: 0x%02x\n",
+ device->name, vsec_sctl_rid);
+ }
+ /* Record the device instance */
+ device->instance = vsec_ctl_inst_num;
+ }
+ else
+ {
+ bdk_warn("%s: ECAM device Unknown Cavium extension revision\n", device->name);
+ }
+ break;
+
+ default: /* Unknown Vendor extension */
+ bdk_warn("%s: ECAM device unknown vendor extension ID 0x%x\n", device->name, vsec_id_id);
+ break;
+ }
+ }
+ else if (cap_id == 0x10)
+ {
+ /* Single Root I/O Virtualization (SR-IOV) */
+ BDK_TRACE(DEVICE_SCAN, "%s: SR-IOV\n", device->name);
+ /* Loop through all the SR-IOV BARs */
+ bar = cap_loc + 0x24;
+ while (bar <= (cap_loc + 0x3c))
+ {
+ int bar_index = (bar - 0x24 - cap_loc) / 8;
+ /* Read the BAR address and config bits [3:0] */
+ uint64_t address = bdk_ecam_read32(device, bar);
+ int ismem = !(address & 1); /* Bit 0: 0 = mem, 1 = io */
+ int is64 = ismem && (address & 4); /* Bit 2: 0 = 32 bit, 1 = 64 bit if mem */
+ /* Bit 3: 1 = Is prefetchable. We don't care for now */
+
+ /* All internal BARs should be 64 bit. Skip if BAR isn't as that means
+ it is using Enhanced Allocation (EA) */
+ if (!is64)
+ {
+ BDK_TRACE(DEVICE_SCAN, "%s: SR-IOV BAR%d Disabled or EA bar skipped (0x%08llx)\n", device->name, bar_index, address);
+ bar += 8;
+ continue;
+ }
+
+ /* Get the upper part of 64bit BARs */
+ address |= (uint64_t)bdk_ecam_read32(device, bar + 4) << 32;
+
+ /* Write the bits to determine the size */
+ bdk_ecam_write32(device, bar, -1);
+ bdk_ecam_write32(device, bar + 4, -1);
+ uint64_t size_mask = (uint64_t)bdk_ecam_read32(device, bar + 4) << 32;
+ size_mask |= bdk_ecam_read32(device, bar);
+ /* Make sure the node bits are correct in the address */
+ address = (address & ~(3UL << 44)) | ((uint64_t)device->node << 44);
+ /* Restore address value */
+ bdk_ecam_write32(device, bar, address);
+ bdk_ecam_write32(device, bar + 4, address >> 32);
+
+ /* Convert the size into a power of 2 bits */
+ int size_bits = bdk_dpop(size_mask | 0xf);
+ if (size_bits <= 4)
+ size_bits = 0;
+
+ BDK_TRACE(DEVICE_SCAN, "%s: SR-IOV BAR%d 0x%llx/%d flags=0x%llx\n",
+ device->name, bar_index, address & ~0xfull,
+ size_bits, address & 0xf);
+ /* Move to the next BAR */
+ bar += 8;
+ }
+ }
+ else if (cap_id == 0x01)
+ {
+ /* Advanced Error Reporting Capability */
+ BDK_TRACE(DEVICE_SCAN, "%s: Advanced Error Reporting\n", device->name);
+ }
+ else if (cap_id == 0x19)
+ {
+ /* Secondary PCI Express Extended Capability */
+ BDK_TRACE(DEVICE_SCAN, "%s: Secondary PCI Express Extended\n", device->name);
+ }
+ else if (cap_id == 0x15)
+ {
+ /* PCI Express Resizable BAR (RBAR) Capability */
+ BDK_TRACE(DEVICE_SCAN, "%s: PCI Express Resizable BAR (RBAR)\n", device->name);
+ }
+ else if (cap_id == 0x0d)
+ {
+ /* Extended access control := ACS Extended Capability */
+ BDK_TRACE(DEVICE_SCAN, "%s: ACS\n", device->name);
+ }
+ else
+ {
+ /* Unknown PCIe capability */
+ bdk_warn("%s: ECAM device unknown PCIe capability 0x%x\n", device->name, cap_id);
+ }
+ cap_loc = cap_next;
+ }
+ }
+ else
+ {
+ bdk_error("%s: ECAM device didn't have a PCIe capability\n", device->name);
+ }
+ if (BDK_NO_DEVICE_INSTANCE == device->instance) {
+ device->instance = guess_instance;
+ }
+ BDK_TRACE(DEVICE_SCAN, "%s: Device populated\n", device->name);
+}
+
+/**
+ * Called by the ECAM code whan a new device is detected in the system
+ *
+ * @param node Node the ECAM is on
+ * @param ecam ECAM the device is on
+ * @param bus Bus number for the device
+ * @param dev Device number
+ * @param func Function number
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_device_add(bdk_node_t node, int ecam, int bus, int dev, int func)
+{
+ if (device_list_count == device_list_max)
+ {
+ int grow = device_list_max + DEVICE_GROW;
+ bdk_device_t *tmp = malloc(grow * sizeof(bdk_device_t));
+ if (!tmp)
+ memcpy(tmp, device_list, device_list_max * sizeof(bdk_device_t));
+ free(device_list);
+ if (tmp == NULL)
+ {
+ bdk_error("bdk-device: Failed to allocate space for device\n");
+ return -1;
+ }
+ device_list = tmp;
+ device_list_max = grow;
+ }
+
+ bdk_device_t *device = &device_list[device_list_count++];
+ memset(device, 0, sizeof(*device));
+
+ device->state = BDK_DEVICE_STATE_NOT_PROBED;
+ device->node = node;
+ device->ecam = ecam;
+ device->bus = bus;
+ device->dev = dev;
+ device->func = func;
+ device->instance = BDK_NO_DEVICE_INSTANCE;
+ populate_device(device);
+
+ const bdk_driver_t *drv = lookup_driver(device);
+ if (drv)
+ BDK_TRACE(DEVICE, "%s: Added device\n", device->name);
+ else
+ BDK_TRACE(DEVICE, "%s: Added device without driver (0x%08x)\n", device->name, device->id);
+ return 0;
+}
+
+/**
+ * Rename a device. Called by driver to give devices friendly names
+ *
+ * @param device Device to rename
+ * @param format Printf style format string
+ */
+void bdk_device_rename(bdk_device_t *device, const char *format, ...)
+{
+ char tmp[sizeof(device->name)];
+ va_list args;
+ va_start(args, format);
+ vsnprintf(tmp, sizeof(tmp), format, args);
+ va_end(args);
+ tmp[sizeof(tmp) - 1] = 0;
+ BDK_TRACE(DEVICE, "%s: Renamed to %s\n", device->name, tmp);
+ strcpy(device->name, tmp);
+}
+
+/**
+ * Called by the ECAM code once all devices have been added
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_device_init(void)
+{
+ /* Probe all devices first */
+ for (int i = 0; i < device_list_count; i++)
+ {
+ bdk_device_t *dev = &device_list[i];
+ const bdk_driver_t *drv = lookup_driver(dev);
+ if (drv == NULL)
+ continue;
+ if (dev->state == BDK_DEVICE_STATE_NOT_PROBED)
+ {
+ BDK_TRACE(DEVICE, "%s: Probing\n", dev->name);
+ if (drv->probe(dev))
+ {
+ BDK_TRACE(DEVICE, "%s: Probe failed\n", dev->name);
+ dev->state = BDK_DEVICE_STATE_PROBE_FAIL;
+ }
+ else
+ {
+ BDK_TRACE(DEVICE, "%s: Probe complete\n", dev->name);
+ dev->state = BDK_DEVICE_STATE_PROBED;
+ }
+ }
+ }
+
+ /* Do init() after all the probes. See comments in top of bdk-device.h */
+ for (int i = 0; i < device_list_count; i++)
+ {
+ bdk_device_t *dev = &device_list[i];
+ const bdk_driver_t *drv = lookup_driver(dev);
+ if (drv == NULL)
+ continue;
+ if (dev->state == BDK_DEVICE_STATE_PROBED)
+ {
+ BDK_TRACE(DEVICE, "%s: Initializing\n", dev->name);
+ if (drv->init(dev))
+ {
+ BDK_TRACE(DEVICE, "%s: Init failed\n", dev->name);
+ dev->state = BDK_DEVICE_STATE_INIT_FAIL;
+ }
+ else
+ {
+ BDK_TRACE(DEVICE, "%s: Init complete\n", dev->name);
+ dev->state = BDK_DEVICE_STATE_READY;
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * Lookup a device by ECAM ID and internal instance number. This can be used by
+ * one device to find a handle to an associated device. For example, PKI would
+ * use this function to get a handle to the FPA.
+ *
+ * @param node Node to lookup for
+ * @param id ECAM ID
+ * @param instance Cavium internal instance number
+ *
+ * @return Device pointer, or NULL if the device isn't found
+ */
+const bdk_device_t *bdk_device_lookup(bdk_node_t node, uint32_t id, int instance)
+{
+ for (int i = 0; i < device_list_count; i++)
+ {
+ bdk_device_t *dev = &device_list[i];
+ if ((dev->node == node) && (dev->id == id) && (dev->instance == instance))
+ return dev;
+ }
+ BDK_TRACE(DEVICE, "No device found for node %d, ID %08x, instance %d\n", node, id, instance);
+ return NULL;
+}
+
+/**
+ * Read from a device BAR
+ *
+ * @param device Device to read from
+ * @param bar Which BAR to read from (0-3)
+ * @param size Size of the read
+ * @param offset Offset into the BAR
+ *
+ * @return Value read
+ */
+uint64_t bdk_bar_read(const bdk_device_t *device, int bar, int size, uint64_t offset)
+{
+ uint64_t address = offset & bdk_build_mask(device->bar[bar/2].size2);
+ address += device->bar[bar/2].address;
+ if (offset+size > (1ULL << device->bar[bar/2].size2)) {
+ /* The CSR address passed in offset doesn't contain the node number. Copy it
+ from the BAR address */
+ offset |= address & (0x3ull << 44);
+ if (address != offset)
+ bdk_fatal("BAR read address 0x%llx doesn't match CSR address 0x%llx\n", address, offset);
+ }
+ switch (size)
+ {
+ case 1:
+ return bdk_read64_uint8(address);
+ case 2:
+ return bdk_le16_to_cpu(bdk_read64_uint16(address));
+ case 4:
+ return bdk_le32_to_cpu(bdk_read64_uint32(address));
+ case 8:
+ return bdk_le64_to_cpu(bdk_read64_uint64(address));
+ }
+ bdk_fatal("%s: Unexpected read size %d\n", device->name, size);
+}
+
+/**
+ * Write to a device BAR
+ *
+ * @param device Device to write to
+ * @param bar Which BAR to read from (0-3)
+ * @param size Size of the write
+ * @param offset Offset into the BAR
+ * @param value Value to write
+ */
+void bdk_bar_write(const bdk_device_t *device, int bar, int size, uint64_t offset, uint64_t value)
+{
+ uint64_t address = offset & bdk_build_mask(device->bar[bar/2].size2);
+ address += device->bar[bar/2].address;
+ if (offset+size > (1ULL << device->bar[bar/2].size2)) {
+ /* The CSR address passed in offset doesn't contain the node number. Copy it
+ from the BAR address */
+ offset |= address & (0x3ull << 44);
+ if (address != offset)
+ bdk_fatal("BAR write address 0x%llx doesn't match CSR address 0x%llx\n", address, offset);
+ }
+ switch (size)
+ {
+ case 1:
+ bdk_write64_uint8(address, value);
+ return;
+ case 2:
+ bdk_write64_uint16(address, bdk_cpu_to_le16(value));
+ return;
+ case 4:
+ bdk_write64_uint32(address, bdk_cpu_to_le32(value));
+ return;
+ case 8:
+ bdk_write64_uint64(address, bdk_cpu_to_le64(value));
+ return;
+ }
+ bdk_fatal("%s: Unexpected write size %d\n", device->name, size);
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-marvell.c b/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-marvell.c
new file mode 100644
index 0000000000..e8f3a009dd
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-marvell.c
@@ -0,0 +1,115 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2016 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <libbdk-hal/bdk-mdio.h>
+#include <libbdk-hal/bdk-qlm.h>
+#include <libbdk-hal/if/bdk-if.h>
+
+/**
+ * Setup marvell PHYs
+ * This function sets up one port in a marvell 88E1512 in SGMII mode
+ */
+static void setup_marvell_phy(bdk_node_t node, int mdio_bus, int mdio_addr)
+{
+ int phy_status = 0;
+
+ BDK_TRACE(PHY, "%s In SGMII mode for Marvell PHY 88E1512\n", __FUNCTION__);
+ /* Switch to Page 18 */
+ phy_status = bdk_mdio_write(node, mdio_bus, mdio_addr, 22, 18);
+ if (phy_status < 0)
+ return;
+
+ phy_status = bdk_mdio_read(node, mdio_bus, mdio_addr, 22);
+ if (phy_status < 0)
+ return;
+
+ /* Change the Phy System mode from RGMII(default hw reset mode) to SGMII */
+ phy_status = bdk_mdio_write(node, mdio_bus, mdio_addr, 20, 1);
+ if (phy_status < 0)
+ return;
+
+ /* Requires a Software reset */
+ phy_status = bdk_mdio_write(node, mdio_bus, mdio_addr, 20, 0x8001);
+ if (phy_status < 0)
+ return;
+
+ phy_status = bdk_mdio_read(node, mdio_bus, mdio_addr, 20);
+ if (phy_status < 0)
+ return;
+
+ /* Change the Page back to 0 */
+ phy_status = bdk_mdio_write(node, mdio_bus, mdio_addr, 22, 0);
+ if (phy_status < 0)
+ return;
+
+ phy_status = bdk_mdio_read(node, mdio_bus, mdio_addr, 22);
+ if (phy_status < 0)
+ return;
+
+ phy_status = bdk_mdio_read(node, mdio_bus, mdio_addr, 17);
+ if (phy_status < 0)
+ return;
+}
+
+int bdk_if_phy_marvell_setup(bdk_node_t node, int qlm, int mdio_bus, int phy_addr)
+{
+ BDK_TRACE(PHY,"In %s\n",__FUNCTION__);
+
+ /* Check if the PHY is marvell PHY we expect */
+ int phy_status = bdk_mdio_read(node, mdio_bus, phy_addr, BDK_MDIO_PHY_REG_ID1);
+ if (phy_status != 0x0141)
+ return 0;
+
+ /* Check that the GSER mode is SGMII */
+ /* Switch the marvell PHY to the correct mode */
+ bdk_qlm_modes_t qlm_mode = bdk_qlm_get_mode(node, qlm);
+
+ BDK_TRACE(PHY,"%s: QLM:%d QLM_MODE:%d\n",__FUNCTION__, qlm, qlm_mode);
+
+ if ((qlm_mode != BDK_QLM_MODE_SGMII_1X1) &&
+ (qlm_mode != BDK_QLM_MODE_SGMII_2X1))
+ return 0;
+
+ BDK_TRACE(PHY,"%s: Detected Marvell Phy in SGMII mode\n", __FUNCTION__);
+ for (int port = 0; port < 2; port++)
+ {
+ setup_marvell_phy(node, mdio_bus, phy_addr + port);
+ }
+ return 0;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse-8514.c b/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse-8514.c
new file mode 100644
index 0000000000..4d6ef8d25d
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse-8514.c
@@ -0,0 +1,224 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2016 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <libbdk-hal/if/bdk-if.h>
+#include <libbdk-hal/bdk-mdio.h>
+#include <libbdk-hal/bdk-qlm.h>
+
+#define VSC_PHY_STD_PAGE (0x0)
+#define VSC_PHY_EXT1_PAGE (0x1)
+#define VSC_PHY_EXT2_PAGE (0x2)
+#define VSC_PHY_EXT3_PAGE (0x3)
+#define VSC_PHY_EXT4_PAGE (0x4)
+#define VSC_PHY_GPIO_PAGE (0x10)
+#define VSC_PHY_TEST_PAGE (0x2A30)
+#define VSC_PHY_TR_PAGE (0x52B5)
+
+const uint16_t init_script_rev_a[] = {
+// Op, Page, Reg, Value, Mask
+// 0, 1, 2, 3, 4
+// --, ------, ----, ------, -----
+ 0, 0x0000, 0x1f, 0x0000, 0xffff,
+ 1, 0x0000, 0x16, 0x0001, 0x0001,
+ 0, 0x0001, 0x1f, 0x2A30, 0xffff,
+ 1, 0x2A30, 0x08, 0x8000, 0x8000,
+ 0, 0x2A30, 0x1f, 0x52B5, 0xffff,
+ 0, 0x52B5, 0x12, 0x0068, 0xffff,
+ 0, 0x52B5, 0x11, 0x8980, 0xffff,
+ 0, 0x52B5, 0x10, 0x8f90, 0xffff,
+ 0, 0x52B5, 0x12, 0x0000, 0xffff,
+ 0, 0x52B5, 0x11, 0x0003, 0xffff,
+ 0, 0x52B5, 0x10, 0x8796, 0xffff,
+ 0, 0x52B5, 0x12, 0x0050, 0xffff,
+ 0, 0x52B5, 0x11, 0x100f, 0xffff,
+ 0, 0x52B5, 0x10, 0x87fa, 0xffff,
+ 0, 0x52B5, 0x1f, 0x2A30, 0xffff,
+ 1, 0x2A30, 0x08, 0x0000, 0x8000,
+ 0, 0x2A30, 0x1f, 0x0000, 0xffff,
+ 1, 0x0000, 0x16, 0x0000, 0x0001,
+ 0xf, 0xffff, 0xff, 0xffff, 0xffff
+};
+
+static void wr_masked(bdk_node_t node, int mdio_bus, int phy_addr, int reg, int value, int mask)
+{
+ int nmask = ~mask;
+ int old = bdk_mdio_read(node, mdio_bus, phy_addr, reg);
+ int vmask = value & mask;
+ int newv = old & nmask;
+ newv = newv | vmask;
+ bdk_mdio_write(node, mdio_bus, phy_addr, reg, newv);
+}
+static void vitesse_init_script(bdk_node_t node, int mdio_bus, int phy_addr)
+{
+ const uint16_t *ptr;
+ uint16_t reg_addr;
+ uint16_t reg_val;
+ uint16_t mask;
+
+ BDK_TRACE(PHY,"In %s\n",__FUNCTION__);
+ BDK_TRACE(PHY,"Loading init script for VSC8514\n");
+
+ ptr = init_script_rev_a;
+ while (*ptr != 0xf)
+ {
+ reg_addr = *(ptr+2);
+ reg_val = *(ptr+3);
+ mask = *(ptr+4);
+ ptr+=5;
+ if (mask != 0xffff)
+ {
+ wr_masked(node, mdio_bus, phy_addr, reg_addr,reg_val,mask);
+ }
+ else
+ {
+ bdk_mdio_write(node,mdio_bus,phy_addr,reg_addr,reg_val);
+ }
+ }
+
+ BDK_TRACE(PHY,"loading init script is done\n");
+
+}
+
+static void vitesse_program(bdk_node_t node, int mdio_bus, int phy_addr)
+{
+ return;
+}
+
+/**
+ * Setup Vitesse PHYs
+ * This function sets up one port in a Vitesse VSC8514
+ */
+static void setup_vitesse_phy(bdk_node_t node, int mdio_bus, int phy_addr)
+{
+ /*setting MAC if*/
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, VSC_PHY_GPIO_PAGE);
+ wr_masked(node,mdio_bus,phy_addr, 19, 0x4000, 0xc000);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 18, 0x80e0);
+
+ /*Setting media if*/
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, VSC_PHY_STD_PAGE);
+ // Reg23, 10:8 Select copper, CAT5 copper only
+ wr_masked(node,mdio_bus,phy_addr, 23, 0x0000, 0x0700);
+
+ // Reg0:15, soft Reset
+ wr_masked(node,mdio_bus,phy_addr, 0, 0x8000, 0x8000);
+ int time_out = 100;
+ while (time_out && bdk_mdio_read(node,mdio_bus,phy_addr, 0) & 0x8000)
+ {
+ bdk_wait_usec(100000);
+ time_out--;
+ }
+
+ if (time_out == 0)
+ {
+ BDK_TRACE(PHY,"setting PHY TIME OUT\n");
+ return;
+ }
+ else
+ {
+ BDK_TRACE(PHY,"Setting a phy port is done\n");
+ }
+
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, VSC_PHY_EXT3_PAGE);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 16, 0x80);
+ // Select main registers
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, VSC_PHY_STD_PAGE);
+
+ /*
+
+ if (LOOP_INTERNAL)
+ {
+ reg0 = bdk_mdio_read(node, mdio_bus, phy_addr, 0);
+ reg0 = bdk_insert(reg0, 1, 14, 1);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 0, reg0);
+ }
+
+ // Far end loopback (External side)
+ if (LOOP_EXTERNAL)
+ {
+ reg23 = bdk_mdio_read(node, mdio_bus, phy_addr, 23);
+ reg23 = bdk_insert(reg23, 1, 3, 1);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 23, reg23);
+ }
+
+
+ // Dump registers
+ if (false)
+ {
+ printf("\nVitesse PHY register dump, PHY address %d, mode %s\n",
+ phy_addr, (qsgmii) ? "QSGMII" : "SGMII");
+ int phy_addr = 4;
+ for (int reg_set = 0; reg_set <= 0x10; reg_set += 0x10)
+ {
+ printf("\nDump registers with reg[31]=0x%x\n", reg_set);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, reg_set);
+ for (int reg=0; reg < 32; reg++)
+ printf("reg[%02d]=0x%x\n", reg, bdk_mdio_read(node, mdio_bus, phy_addr, reg));
+ }
+ }
+ */
+}
+
+//static void vetesse_setup(bdk_node_t node, int qlm, int mdio_bus, int phy_addr)
+int bdk_if_phy_vsc8514_setup(bdk_node_t node, int qlm, int mdio_bus, int phy_addr)
+{
+ /* Check if the PHY is Vetesse PHY we expect */
+ int phy_status_1 = bdk_mdio_read(node, mdio_bus, phy_addr, BDK_MDIO_PHY_REG_ID1);
+ int phy_status_2 = bdk_mdio_read(node, mdio_bus, phy_addr, BDK_MDIO_PHY_REG_ID2);
+ if (phy_status_1 != 0x0007 || phy_status_2 != 0x0670)
+ {
+ bdk_error("The PHY on this board is NOT VSC8514.\n");
+ return -1;
+ }
+
+ /* Check that the GSER mode is SGMII or QSGMII */
+ bdk_qlm_modes_t qlm_mode = bdk_qlm_get_mode(node, qlm);
+ if (qlm_mode != BDK_QLM_MODE_QSGMII_4X1)
+ return -1;
+
+ vitesse_init_script(node, mdio_bus, phy_addr);
+ vitesse_program(node, mdio_bus, phy_addr);
+
+ /* VSC8514 just support QSGMII */
+ for (int port = 0; port < 4; port++)
+ setup_vitesse_phy(node, mdio_bus, phy_addr + port);
+
+ return 1;
+
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse-xfi.c b/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse-xfi.c
new file mode 100644
index 0000000000..b6f052ad47
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse-xfi.c
@@ -0,0 +1,395 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <libbdk-hal/if/bdk-if.h>
+#include <libbdk-hal/bdk-mdio.h>
+#include <libbdk-hal/bdk-twsi.h>
+
+/* This code is an optional part of the BDK. It is only linked in
+ * if BDK_REQUIRE() needs it */
+//BDK_REQUIRE(TWSI);
+BDK_REQUIRE_DEFINE(XFI);
+
+/*
+Rate Select Settings
+Mode State : 6/8
+Rate Select State : 0
+RSEL1 : 0
+RSEL0 : 0
+Ref Clock Gen(MHz) : 156.25
+Data Rate(Gbps) : 10.3125
+Description : 10 GbE
+
+
+Data Rate Detection Configuration Registers
+
+Mode Pin Settings:
+Mode State : 0
+MODE1 : 0
+MODE0 : 0
+Mode : Two-wire serial interface mode
+
+LOS Pin Strap Mode Settings
+Mode State : 2/6/8
+State : 4
+LOS1 : Float
+LOS0 : Float
+LOS Amplitude(mVpp) : 20
+LOS Hysteresis(dB) : 2
+
+Input Equalization Retimer Mode Settings
+Mode State : 6/8
+EQ State : 0
+EQ1 : 0
+EQ0 : 0
+EQ(dB) : Auto
+DFE : Auto
+Comment : Full Auto
+
+Input Equalization Re-Driver Mode Settings
+Mode State :
+EQ State : 0
+EQ1 : 0
+EQ0 : 0
+EQ(dB) : Auto
+DFE : APowered Down
+Comment : Analog EQ Only
+
+
+
+Output De-Emphasis Retimer Mode Settings
+Mode State : 6/8
+DE State : 3
+TX1 : Float
+TX0 : 0
+PRE c(-1) mA : -1
+MAIN c( 0) mA : 15
+POST c(+1) mA : 4
+DC Amplitude(mV): 500
+De-Emphasis(dB) : -6.02
+Comment :
+
+
+Output De-Emphasis Re-Driver Mode Settings
+Mode State : 2
+DE State : 3
+TX1 : Float
+TX0 : 0
+Frequency(Gbps) : 10.3125
+DC Amplitude(mV): 600
+De-Emphasis(dB) : 4
+Comment : 10GbE
+
+
+*/
+
+static int debug = 0;
+
+#define xfi_printf(fmt, args...) \
+ do { \
+ if(debug == 1){ \
+ printf(fmt, ##args); \
+ } \
+ } while(0)
+
+
+int bdk_xfi_vsc7224_dump(int twsi_id, int unit){
+ bdk_node_t node=0;
+ uint8_t dev_addr=0x10 + unit;
+ uint16_t internal_addr=0x7F;
+ int num_bytes=2;
+ int ia_width_bytes=1;
+ uint64_t data=0;
+ int p, i;
+ uint64_t result[0x100] = {0};
+
+ uint64_t pagenum[9] = {0x00, 0x01, 0x02, 0x03, 0x20, 0x21, 0x30, 0x31, 0x40};
+
+ for(p=0; p < (sizeof(pagenum)/sizeof(pagenum[0])); p++){
+ data = pagenum[p];
+ bdk_twsix_write_ia(node, twsi_id, dev_addr, internal_addr, num_bytes, ia_width_bytes, data);
+ for(i=0x80; i<=0xFF; i++){
+ result[i] = 0x00;
+ result[i] = bdk_twsix_read_ia(node, twsi_id, dev_addr, (uint16_t)i, num_bytes, ia_width_bytes);
+ }
+ for(i=0x80; i<=0xFF; i++){
+ if(i==0x80){
+ printf("\npage_%02X[0x100] = {\n", (uint8_t)pagenum[p]);
+ }
+ if(i % 8 == 0){
+ printf("/* 0x%2X */", i);
+ }
+ printf(" 0x%04X,", (uint16_t)result[i]);
+ if(i==0xFF){
+ printf("};");
+ }
+ if((i+1) % 8 == 0){
+ printf("\n");
+ }
+ }
+ }
+
+ return 0;
+}
+
+/* XFI ReTimer/ReDriver Mode Settings */
+
+/*
+power down regs:
+Page Reg Position Mask val RegFieldName
+0x00 0x89 b07 0x0080 1 PD_INBUF
+0x00 0x8A b10 0x0400 1 PD_DFECRU
+0x00 0x8A b01 0x0002 1 PD_DFE
+0x00 0x8A b00 0x0001 1 PD_DFEADAPT
+0x00 0x97 b15 0x8000 1 ASYN_SYNN
+0x00 0x97 b09 0x0200 1 PD_OD
+0x00 0xA0 b11 0x0800 1 PD_LOS
+0x00 0xA4 b15 0x8000 1 PD_CH
+0x00 0xB5 b07 0x0080 1 PD_INBUF
+0x00 0xB9 b15 0x8000 1 ASYN_SYNN
+0x00 0xB9 b09 0x0200 1 PD_OD
+0x00 0xBF b07 0x0080 1 PD_INBUF
+0x00 0xF0 b15 0x8000 1 ASYN_SYNN
+0x00 0xF0 b09 0x0200 1 PD_OD
+0x00 0xF6 b07 0x0080 1 PD_INBUF
+0x00 0xFA b15 0x8000 1 ASYN_SYNN
+0x00 0xFA b09 0x0200 1 PD_OD
+*/
+struct regmap{
+ short int page;
+ unsigned char reg;
+ unsigned short int retimer;
+ unsigned short int redriver;
+};
+
+/* This table only applies to SFF8104 */
+struct regmap xfiregmap[64] = {
+//CH 0
+{0x00, 0x84, 0x0800, 0x0000}, //EQTABLE_DCOFF0 (0n_84)
+{0x00, 0x8A, 0x7000, 0x0400}, //DFECRU_CTRL (0n_8A)
+{0x00, 0x8B, 0x4060, 0x0000}, //DFECRU_CFVF_CFAP (0n_8B)
+{0x00, 0x90, 0xDE85, 0x0000}, //DFECRU_DFEAUTO (0n_90)
+{0x00, 0x91, 0x2020, 0x0000}, //DFECRU_BTMX_BFMX (0n_91)
+{0x00, 0x92, 0x0860, 0x0000}, //DFECRU_DXMX_TRMX (0n_92)
+{0x00, 0x93, 0x6000, 0x0000}, //DFECRU_TRMN_ERRI (0n_93)
+{0x00, 0x94, 0x0001, 0x0000}, //DFECRU_DFEMODE (0n_94)
+{0x00, 0x95, 0x0008, 0x0000}, //DFECRU_RATESEL (0n_95)
+{0x00, 0x97, 0x0000, 0x8080}, //OUTDRVCTRL (0n_97)
+{0x00, 0x99, 0x001E, 0x0014}, //KR_MAINTAP (0n_99)
+{0x00, 0x9A, 0x000B, 0x0000}, //KR_PRETAP (0n_9A)
+{0x00, 0x9B, 0x0010, 0x0000}, //KR_POSTTAP (0n_9B)
+{0x00, 0x9E, 0x03E8, 0x07D0}, //LOSASSRT (0n_9E)
+{0x00, 0x9F, 0x04EA, 0x09D5}, //LOSDASSRT (0n_9F)
+{0x00, 0xB2, 0x0888, 0x0000}, //NA
+
+//CH 1
+{0x01, 0x84, 0x0800, 0x0000},
+{0x01, 0x8A, 0x7000, 0x0400},
+{0x01, 0x8B, 0x4060, 0x0000},
+{0x01, 0x90, 0xDE85, 0x0000},
+{0x01, 0x91, 0x2020, 0x0000},
+{0x01, 0x92, 0x0860, 0x0000},
+{0x01, 0x93, 0x6000, 0x0000},
+{0x01, 0x94, 0x0001, 0x0000},
+{0x01, 0x95, 0x0008, 0x0000},
+{0x01, 0x97, 0x0000, 0x8080},
+{0x01, 0x99, 0x001E, 0x0014},
+{0x01, 0x9A, 0x000B, 0x0000},
+{0x01, 0x9B, 0x0010, 0x0000},
+{0x01, 0x9E, 0x03E8, 0x07D0},
+{0x01, 0x9F, 0x04EA, 0x09D5},
+{0x01, 0xB2, 0x0888, 0x0000},
+
+//POWER_DOWN Channel 2 and 3
+{0x02, 0x8A, 0x0400, 0x0400},
+{0x02, 0xA4, 0x8000, 0x8000},
+{0x03, 0x8A, 0x0400, 0x0400},
+{0x03, 0xA4, 0x8000, 0x8000},
+
+{0x30, 0x80, 0x3453, 0x0000}, //FSYNM_NVAL (3f_80)
+{0x30, 0x81, 0x00F6, 0x0000}, //FSYNFVAL_MSB (3f_81)
+{0x30, 0x82, 0x8800, 0x0000}, //FSYNFVAL_LSB (3f_82)
+{0x30, 0x83, 0x000F, 0x0000}, //FSYNRVAL_MSB (3f_83)
+{0x30, 0x84, 0xB5E0, 0x0000}, //FSYNRVAL_LSB (3f_84)
+{0x30, 0x85, 0x0000, 0x0400}, //FSYNTST (3f_85)
+
+{0x40, 0x80, 0x4C00, 0x0000}, //ANMUXSEL (40_80)
+{0x40, 0x81, 0x4000, 0x0000}, //DGMUXCTRL (40_81)
+{0x40, 0x82, 0x7800, 0xC000}, //RCKINCTRL (40_82)
+{0x40, 0x84, 0x0020, 0x0000}, //CHRCKSEL (40_84)
+
+{-1, 0, 0, 0},
+};
+
+int bdk_vsc7224_modeset(int twsi_id, int unit, int xfi_mode){
+ bdk_node_t node=0;
+ uint8_t dev_addr=0x10 + unit;
+ uint16_t internal_addr=0x7F;
+ uint16_t page=0;
+ int num_bytes=2;
+ int ia_width_bytes=1;
+ uint64_t data=0;
+ int val=0;
+ int ret = 0, r=0;
+ uint16_t reg = 0;
+
+ if(xfi_mode==0){
+ printf("XFI Mode Retimer\n");
+ }else{
+ printf("XFI Mode Redriver\n");
+ }
+
+ while(xfiregmap[r].page != -1){
+ page = xfiregmap[r].page;
+ reg = xfiregmap[r].reg;
+ if(xfi_mode==0){
+ data = xfiregmap[r].retimer;
+ }else{
+ data = xfiregmap[r].redriver;
+ }
+ ret = bdk_twsix_write_ia(node, twsi_id, dev_addr, internal_addr, num_bytes, ia_width_bytes, (uint64_t)page);
+ if(ret !=0){
+ printf("XFI init Error\n");
+ break;
+ }
+ ret = bdk_twsix_write_ia(node, twsi_id, dev_addr, reg, num_bytes, ia_width_bytes, data);
+ if(ret !=0){
+ printf("XFI init Error\n");
+ break;
+ }
+ val = bdk_twsix_read_ia(node, twsi_id, dev_addr, reg, num_bytes, ia_width_bytes);
+ if(val == -1){
+ printf("XFI Read Reg Failed @ page:reg :: %2X:%2X \n",page, reg);
+ break;
+ }else{
+ xfi_printf(" Page: reg: data: val :: %2X:%2X:%04X:%04X\n", page, reg, (uint16_t)data, val);
+ }
+ r++;
+ }
+
+ return ret;
+}
+
+
+int bdk_vsc7224_regmap_modeget(int twsi_id, int unit){
+ bdk_node_t node=0;
+ uint8_t dev_addr=0x10 + unit;
+ uint16_t internal_addr=0x7F;
+ uint16_t page=0;
+ int num_bytes=2;
+ int ia_width_bytes=1;
+ //uint64_t data=0;
+ uint16_t reg = 0;
+ int ret = 0, r=0;
+ int data;
+
+ printf("\n===========================================\n");
+ printf("Page :Reg :Value :Retimer :Redriver\n");
+ printf("===========================================\n");
+ while(xfiregmap[r].page != -1){
+ page = xfiregmap[r].page;
+ reg = xfiregmap[r].reg;
+
+ ret = bdk_twsix_write_ia(node, twsi_id, dev_addr, internal_addr, num_bytes, ia_width_bytes, (uint64_t)page);
+ if(ret !=0){
+ printf("XFI init Error\n");
+ break;
+ }
+ data = bdk_twsix_read_ia(node, twsi_id, dev_addr, reg, num_bytes, ia_width_bytes);
+ if(data == -1){
+ printf("XFI Read Reg Failed @ page:reg :: %2X:%2X \n",page, reg);
+ break;
+ }
+ printf(" %02X: %02X: %04X: %04X: %04X\n", page, reg, (uint16_t)data, xfiregmap[r].retimer, xfiregmap[r].redriver);
+ r++;
+ }
+ printf("=======================================\n");
+
+ return ret;
+}
+
+int bdk_vsc7224_wp_regs(int twsi_id, int unit, int xfi_wp){
+ bdk_node_t node=0;
+ uint8_t dev_addr=0x10 + unit;
+ uint16_t internal_addr=0x7E;
+ uint16_t data=0x0000;
+ int num_bytes=2;
+ int ia_width_bytes=1;
+ int ret =0;
+
+ if(xfi_wp == 1){
+ data = 0x0000;
+ }else{
+ data = 0xFFFF;
+ }
+
+ ret = bdk_twsix_write_ia(node, twsi_id, dev_addr, internal_addr, num_bytes, ia_width_bytes, (uint64_t)data);
+ if(ret !=0){
+ printf("XFI VSC7224 Write Protect Error\n");
+ }
+
+ return ret;
+}
+
+int bdk_vsc7224_set_reg(int twsi_id, int unit, int page, int reg, int val){
+ bdk_node_t node=0;
+ uint8_t dev_addr=0x10 + unit;
+ uint16_t internal_addr = reg;
+ int num_bytes=2;
+ int ia_width_bytes=1;
+ int ret=0;
+
+ xfi_printf(" Unit: Page: reg: val :: %02x:%2X:%2X:%04X\n", unit, page, reg, val & 0xFFFF);
+ ret = bdk_twsix_write_ia(node, twsi_id, dev_addr, 0x7F, num_bytes, ia_width_bytes, (uint64_t)(page & 0xFF));
+ if (ret) {
+ printf("XFI VSC7224 TWSI Set Page Register Error\n");
+ }
+
+ ret = bdk_twsix_write_ia(node, twsi_id, dev_addr, internal_addr, num_bytes, ia_width_bytes, (uint64_t)(val & 0xFFFF));
+ if (ret) {
+ printf("XFI VSC7224 TWSI Set Register Error\n");
+ }
+
+ return ret;
+}
+
+int bdk_vsc7224_debug(int _debug){
+ debug =_debug;
+ return 0;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse.c b/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse.c
new file mode 100644
index 0000000000..e1ef6b0f2d
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy-vetesse.c
@@ -0,0 +1,372 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <libbdk-hal/if/bdk-if.h>
+#include <libbdk-hal/bdk-mdio.h>
+#include <libbdk-hal/bdk-qlm.h>
+
+static bool LOOP_INTERNAL = false;
+static bool LOOP_EXTERNAL = false;
+
+static uint8_t patch_arr[] = {
+ 0x44, 0x83, 0x02, 0x42, 0x12, 0x02, 0x44, 0x93, 0x02, 0x44,
+ 0xca, 0x02, 0x44, 0x4d, 0x02, 0x43, 0xef, 0xed, 0xff, 0xe5,
+ 0xfc, 0x54, 0x38, 0x64, 0x20, 0x70, 0x08, 0x65, 0xff, 0x70,
+ 0x04, 0xed, 0x44, 0x80, 0xff, 0x22, 0x8f, 0x19, 0x7b, 0xbb,
+ 0x7d, 0x0e, 0x7f, 0x04, 0x12, 0x3d, 0xd7, 0xef, 0x4e, 0x60,
+ 0x03, 0x02, 0x41, 0xf9, 0xe4, 0xf5, 0x1a, 0x74, 0x01, 0x7e,
+ 0x00, 0xa8, 0x1a, 0x08, 0x80, 0x05, 0xc3, 0x33, 0xce, 0x33,
+ 0xce, 0xd8, 0xf9, 0xff, 0xef, 0x55, 0x19, 0x70, 0x03, 0x02,
+ 0x41, 0xed, 0x85, 0x1a, 0xfb, 0x7b, 0xbb, 0xe4, 0xfd, 0xff,
+ 0x12, 0x3d, 0xd7, 0xef, 0x4e, 0x60, 0x03, 0x02, 0x41, 0xed,
+ 0xe5, 0x1a, 0x54, 0x02, 0x75, 0x1d, 0x00, 0x25, 0xe0, 0x25,
+ 0xe0, 0xf5, 0x1c, 0xe4, 0x78, 0xc5, 0xf6, 0xd2, 0x02, 0x12,
+ 0x41, 0xfa, 0x7b, 0xff, 0x7d, 0x12, 0x7f, 0x07, 0x12, 0x3d,
+ 0xd7, 0xef, 0x4e, 0x60, 0x03, 0x02, 0x41, 0xe7, 0xc2, 0x02,
+ 0x74, 0xc7, 0x25, 0x1a, 0xf9, 0x74, 0xe7, 0x25, 0x1a, 0xf8,
+ 0xe6, 0x27, 0xf5, 0x1b, 0xe5, 0x1d, 0x24, 0x5b, 0x12, 0x44,
+ 0x2a, 0x12, 0x3e, 0xda, 0x7b, 0xfc, 0x7d, 0x11, 0x7f, 0x07,
+ 0x12, 0x3d, 0xd7, 0x78, 0xcc, 0xef, 0xf6, 0x78, 0xc1, 0xe6,
+ 0xfe, 0xef, 0xd3, 0x9e, 0x40, 0x06, 0x78, 0xcc, 0xe6, 0x78,
+ 0xc1, 0xf6, 0x12, 0x41, 0xfa, 0x7b, 0xec, 0x7d, 0x12, 0x7f,
+ 0x07, 0x12, 0x3d, 0xd7, 0x78, 0xcb, 0xef, 0xf6, 0xbf, 0x07,
+ 0x06, 0x78, 0xc3, 0x76, 0x1a, 0x80, 0x1f, 0x78, 0xc5, 0xe6,
+ 0xff, 0x60, 0x0f, 0xc3, 0xe5, 0x1b, 0x9f, 0xff, 0x78, 0xcb,
+ 0xe6, 0x85, 0x1b, 0xf0, 0xa4, 0x2f, 0x80, 0x07, 0x78, 0xcb,
+ 0xe6, 0x85, 0x1b, 0xf0, 0xa4, 0x78, 0xc3, 0xf6, 0xe4, 0x78,
+ 0xc2, 0xf6, 0x78, 0xc2, 0xe6, 0xff, 0xc3, 0x08, 0x96, 0x40,
+ 0x03, 0x02, 0x41, 0xd1, 0xef, 0x54, 0x03, 0x60, 0x33, 0x14,
+ 0x60, 0x46, 0x24, 0xfe, 0x60, 0x42, 0x04, 0x70, 0x4b, 0xef,
+ 0x24, 0x02, 0xff, 0xe4, 0x33, 0xfe, 0xef, 0x78, 0x02, 0xce,
+ 0xa2, 0xe7, 0x13, 0xce, 0x13, 0xd8, 0xf8, 0xff, 0xe5, 0x1d,
+ 0x24, 0x5c, 0xcd, 0xe5, 0x1c, 0x34, 0xf0, 0xcd, 0x2f, 0xff,
+ 0xed, 0x3e, 0xfe, 0x12, 0x44, 0x6a, 0x7d, 0x11, 0x80, 0x0b,
+ 0x78, 0xc2, 0xe6, 0x70, 0x04, 0x7d, 0x11, 0x80, 0x02, 0x7d,
+ 0x12, 0x7f, 0x07, 0x12, 0x3e, 0x9a, 0x8e, 0x1e, 0x8f, 0x1f,
+ 0x80, 0x03, 0xe5, 0x1e, 0xff, 0x78, 0xc5, 0xe6, 0x06, 0x24,
+ 0xcd, 0xf8, 0xa6, 0x07, 0x78, 0xc2, 0x06, 0xe6, 0xb4, 0x1a,
+ 0x0a, 0xe5, 0x1d, 0x24, 0x5c, 0x12, 0x44, 0x2a, 0x12, 0x3e,
+ 0xda, 0x78, 0xc5, 0xe6, 0x65, 0x1b, 0x70, 0x82, 0x75, 0xdb,
+ 0x20, 0x75, 0xdb, 0x28, 0x12, 0x44, 0x42, 0x12, 0x44, 0x42,
+ 0xe5, 0x1a, 0x12, 0x44, 0x35, 0xe5, 0x1a, 0xc3, 0x13, 0x12,
+ 0x44, 0x35, 0x78, 0xc5, 0x16, 0xe6, 0x24, 0xcd, 0xf8, 0xe6,
+ 0xff, 0x7e, 0x08, 0x1e, 0xef, 0xa8, 0x06, 0x08, 0x80, 0x02,
+ 0xc3, 0x13, 0xd8, 0xfc, 0xfd, 0xc4, 0x33, 0x54, 0xe0, 0xf5,
+ 0xdb, 0xef, 0xa8, 0x06, 0x08, 0x80, 0x02, 0xc3, 0x13, 0xd8,
+ 0xfc, 0xfd, 0xc4, 0x33, 0x54, 0xe0, 0x44, 0x08, 0xf5, 0xdb,
+ 0xee, 0x70, 0xd8, 0x78, 0xc5, 0xe6, 0x70, 0xc8, 0x75, 0xdb,
+ 0x10, 0x02, 0x40, 0xfd, 0x78, 0xc2, 0xe6, 0xc3, 0x94, 0x17,
+ 0x50, 0x0e, 0xe5, 0x1d, 0x24, 0x62, 0x12, 0x42, 0x08, 0xe5,
+ 0x1d, 0x24, 0x5c, 0x12, 0x42, 0x08, 0x20, 0x02, 0x03, 0x02,
+ 0x40, 0x76, 0x05, 0x1a, 0xe5, 0x1a, 0xc3, 0x94, 0x04, 0x50,
+ 0x03, 0x02, 0x40, 0x3a, 0x22, 0xe5, 0x1d, 0x24, 0x5c, 0xff,
+ 0xe5, 0x1c, 0x34, 0xf0, 0xfe, 0x12, 0x44, 0x6a, 0x22, 0xff,
+ 0xe5, 0x1c, 0x34, 0xf0, 0xfe, 0x12, 0x44, 0x6a, 0x22, 0xd2,
+ 0x00, 0x75, 0xfb, 0x03, 0xab, 0x7e, 0xaa, 0x7d, 0x7d, 0x19,
+ 0x7f, 0x03, 0x12, 0x3e, 0xda, 0xe5, 0x7e, 0x54, 0x0f, 0x24,
+ 0xf3, 0x60, 0x03, 0x02, 0x42, 0xb9, 0x12, 0x44, 0xa3, 0x12,
+ 0x44, 0xaa, 0xd8, 0xfb, 0xff, 0x20, 0xe2, 0x2a, 0x13, 0x92,
+ 0x04, 0xef, 0xa2, 0xe1, 0x92, 0x03, 0x30, 0x04, 0x1f, 0xe4,
+ 0xf5, 0x10, 0xe5, 0x10, 0x24, 0x17, 0xfd, 0x7b, 0x54, 0x7f,
+ 0x04, 0x12, 0x3d, 0xd7, 0x74, 0x25, 0x25, 0x10, 0xf8, 0xa6,
+ 0x07, 0x05, 0x10, 0xe5, 0x10, 0xc3, 0x94, 0x02, 0x40, 0xe4,
+ 0x12, 0x44, 0xa3, 0x12, 0x44, 0xaa, 0xd8, 0xfb, 0x54, 0x05,
+ 0x64, 0x04, 0x70, 0x27, 0x78, 0xc4, 0xe6, 0x78, 0xc6, 0xf6,
+ 0xe5, 0x7d, 0xff, 0x33, 0x95, 0xe0, 0xef, 0x54, 0x0f, 0x78,
+ 0xc4, 0xf6, 0x12, 0x42, 0xcf, 0x20, 0x04, 0x0c, 0x12, 0x44,
+ 0xa3, 0x12, 0x44, 0xaa, 0xd8, 0xfb, 0x13, 0x92, 0x05, 0x22,
+ 0xc2, 0x05, 0x22, 0x12, 0x44, 0xa3, 0x12, 0x44, 0xaa, 0xd8,
+ 0xfb, 0x54, 0x05, 0x64, 0x05, 0x70, 0x1e, 0x78, 0xc4, 0x7d,
+ 0xb8, 0x12, 0x42, 0xc5, 0x78, 0xc1, 0x7d, 0x74, 0x12, 0x42,
+ 0xc5, 0xe4, 0x78, 0xc1, 0xf6, 0x22, 0x7b, 0x01, 0x7a, 0x00,
+ 0x7d, 0xee, 0x7f, 0x92, 0x12, 0x38, 0xbd, 0x22, 0xe6, 0xfb,
+ 0x7a, 0x00, 0x7f, 0x92, 0x12, 0x38, 0xbd, 0x22, 0x78, 0xc1,
+ 0xe6, 0xfb, 0x7a, 0x00, 0x7d, 0x74, 0x7f, 0x92, 0x12, 0x38,
+ 0xbd, 0xe4, 0x78, 0xc1, 0xf6, 0xf5, 0x11, 0x74, 0x01, 0x7e,
+ 0x00, 0xa8, 0x11, 0x08, 0x80, 0x05, 0xc3, 0x33, 0xce, 0x33,
+ 0xce, 0xd8, 0xf9, 0xff, 0x78, 0xc4, 0xe6, 0xfd, 0xef, 0x5d,
+ 0x60, 0x44, 0x85, 0x11, 0xfb, 0xe5, 0x11, 0x54, 0x02, 0x25,
+ 0xe0, 0x25, 0xe0, 0xfe, 0xe4, 0x24, 0x5b, 0xfb, 0xee, 0x12,
+ 0x44, 0x2d, 0x12, 0x3e, 0xda, 0x7b, 0x40, 0x7d, 0x11, 0x7f,
+ 0x07, 0x12, 0x3d, 0xd7, 0x74, 0xc7, 0x25, 0x11, 0xf8, 0xa6,
+ 0x07, 0x7b, 0x11, 0x7d, 0x12, 0x7f, 0x07, 0x12, 0x3d, 0xd7,
+ 0xef, 0x4e, 0x60, 0x09, 0x74, 0xe7, 0x25, 0x11, 0xf8, 0x76,
+ 0x04, 0x80, 0x07, 0x74, 0xe7, 0x25, 0x11, 0xf8, 0x76, 0x0a,
+ 0x05, 0x11, 0xe5, 0x11, 0xc3, 0x94, 0x04, 0x40, 0x9a, 0x78,
+ 0xc6, 0xe6, 0x70, 0x15, 0x78, 0xc4, 0xe6, 0x60, 0x10, 0x75,
+ 0xd9, 0x38, 0x75, 0xdb, 0x10, 0x7d, 0xfe, 0x12, 0x43, 0x7d,
+ 0x7d, 0x76, 0x12, 0x43, 0x7d, 0x79, 0xc6, 0xe7, 0x78, 0xc4,
+ 0x66, 0xff, 0x60, 0x03, 0x12, 0x40, 0x25, 0x78, 0xc4, 0xe6,
+ 0x70, 0x09, 0xfb, 0xfa, 0x7d, 0xfe, 0x7f, 0x8e, 0x12, 0x38,
+ 0xbd, 0x22, 0x7b, 0x01, 0x7a, 0x00, 0x7f, 0x8e, 0x12, 0x38,
+ 0xbd, 0x22, 0xe4, 0xf5, 0x19, 0x74, 0x25, 0x25, 0x19, 0xf8,
+ 0xe6, 0x64, 0x03, 0x60, 0x51, 0xe5, 0x19, 0x24, 0x17, 0xfd,
+ 0x7b, 0xeb, 0x7f, 0x04, 0x12, 0x3d, 0xd7, 0x8f, 0xfb, 0x7b,
+ 0x22, 0x7d, 0x18, 0x7f, 0x06, 0x12, 0x3d, 0xd7, 0xef, 0x64,
+ 0x01, 0x4e, 0x60, 0x1c, 0x7d, 0x1c, 0xe4, 0xff, 0x12, 0x3e,
+ 0x9a, 0xef, 0x54, 0x1b, 0x64, 0x0a, 0x70, 0x15, 0x7b, 0xcc,
+ 0x7d, 0x10, 0xff, 0x12, 0x3d, 0xd7, 0xef, 0x64, 0x01, 0x4e,
+ 0x70, 0x07, 0x12, 0x44, 0xb1, 0x7b, 0x03, 0x80, 0x0a, 0x12,
+ 0x44, 0xb1, 0x74, 0x25, 0x25, 0x19, 0xf8, 0xe6, 0xfb, 0x7a,
+ 0x00, 0x7d, 0x54, 0x12, 0x38, 0xbd, 0x05, 0x19, 0xe5, 0x19,
+ 0xc3, 0x94, 0x02, 0x40, 0x9c, 0x22, 0xe5, 0x7e, 0x30, 0xe5,
+ 0x35, 0x30, 0xe4, 0x0b, 0x7b, 0x02, 0x7d, 0x33, 0x7f, 0x35,
+ 0x12, 0x36, 0x29, 0x80, 0x10, 0x7b, 0x01, 0x7d, 0x33, 0x7f,
+ 0x35, 0x12, 0x36, 0x29, 0x90, 0x47, 0xd2, 0xe0, 0x44, 0x04,
+ 0xf0, 0x90, 0x47, 0xd2, 0xe0, 0x54, 0xf7, 0xf0, 0x90, 0x47,
+ 0xd1, 0xe0, 0x44, 0x10, 0xf0, 0x7b, 0x05, 0x7d, 0x84, 0x7f,
+ 0x86, 0x12, 0x36, 0x29, 0x22, 0xfb, 0xe5, 0x1c, 0x34, 0xf0,
+ 0xfa, 0x7d, 0x10, 0x7f, 0x07, 0x22, 0x54, 0x01, 0xc4, 0x33,
+ 0x54, 0xe0, 0xf5, 0xdb, 0x44, 0x08, 0xf5, 0xdb, 0x22, 0xf5,
+ 0xdb, 0x75, 0xdb, 0x08, 0xf5, 0xdb, 0x75, 0xdb, 0x08, 0x22,
+ 0xe5, 0x7e, 0x54, 0x0f, 0x64, 0x01, 0x70, 0x0d, 0xe5, 0x7e,
+ 0x30, 0xe4, 0x08, 0x90, 0x47, 0xd0, 0xe0, 0x44, 0x02, 0xf0,
+ 0x22, 0x90, 0x47, 0xd0, 0xe0, 0x54, 0xfd, 0xf0, 0x22, 0xab,
+ 0x07, 0xaa, 0x06, 0x7d, 0x10, 0x7f, 0x07, 0x12, 0x3e, 0xda,
+ 0x7b, 0xff, 0x7d, 0x10, 0x7f, 0x07, 0x12, 0x3d, 0xd7, 0xef,
+ 0x4e, 0x60, 0xf3, 0x22, 0x12, 0x44, 0xc5, 0x12, 0x44, 0xbb,
+ 0x90, 0x47, 0xfa, 0xe0, 0x54, 0xf8, 0x44, 0x02, 0xf0, 0x22,
+ 0x30, 0x04, 0x03, 0x12, 0x43, 0x87, 0x78, 0xc4, 0xe6, 0xff,
+ 0x60, 0x03, 0x12, 0x40, 0x25, 0x22, 0xe5, 0x7e, 0xae, 0x7d,
+ 0x78, 0x04, 0x22, 0xce, 0xa2, 0xe7, 0x13, 0xce, 0x13, 0x22,
+ 0xe5, 0x19, 0x24, 0x17, 0x54, 0x1f, 0x44, 0x80, 0xff, 0x22,
+ 0xe4, 0x78, 0xc4, 0xf6, 0xc2, 0x05, 0x78, 0xc1, 0xf6, 0x22,
+ 0xc2, 0x04, 0xc2, 0x03, 0x22, 0x22
+};
+
+/**
+ * Setup Vitesse PHYs
+ * This function sets up one port in a Vitesse VSC8574 for
+ * either SGMII or QSGMII
+ */
+static void setup_vitesse_phy(bdk_node_t node, int mdio_bus, int phy_addr, bool qsgmii)
+{
+ // Select "G" registers
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0x10);
+ // Reg 19G, bit 15:14
+ // 0 = SGMII
+ // 1 = QSGMII
+ int reg19 = bdk_mdio_read(node, mdio_bus, phy_addr, 19);
+ int reg18;
+ if (qsgmii)
+ {
+ // QSGMII
+ reg19 = (reg19 & ~(3 << 14)) | (1 << 14);
+ reg18 = 0x80e0;
+ }
+ else
+ {
+ // SGMII
+ reg19 = (reg19 & ~(3 << 14)) | (0 << 14);
+ reg18 = 0x80f0;
+ }
+ bdk_mdio_write(node, mdio_bus, phy_addr, 19, reg19);
+ // Write 18G, change all 4 ports
+ bdk_mdio_write(node, mdio_bus, phy_addr, 18, reg18);
+ // Select main registers
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0);
+ // Reg23, 10:8 Select copper
+ int reg23 = bdk_mdio_read(node, mdio_bus, phy_addr, 23);
+ reg23 = (reg23 & ~(7 << 8)) | (0 << 8);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 23, reg23);
+ // Reg0, Reset
+ int reg0 = bdk_mdio_read(node, mdio_bus, phy_addr, 0);
+ reg0 |= (1 << 15);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 0, reg0);
+ // Reg 16E3, bit 7 auto negotiation
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 3);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 16, 0x80);
+ // Select main registers
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0);
+ // Near end loopback (Thunder side)
+ if (LOOP_INTERNAL)
+ {
+ reg0 = bdk_mdio_read(node, mdio_bus, phy_addr, 0);
+ reg0 |= (1 << 14);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 0, reg0);
+ }
+
+ // Far end loopback (External side)
+ if (LOOP_EXTERNAL)
+ {
+ reg23 = bdk_mdio_read(node, mdio_bus, phy_addr, 23);
+ reg23 |= (1 << 3);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 23, reg23);
+ }
+}
+
+static void wr_masked(bdk_node_t node, int mdio_bus, int phy_addr, int reg, int value, int mask)
+{
+ int nmask = ~mask;
+ int old = bdk_mdio_read(node, mdio_bus, phy_addr, reg);
+ int vmask = value & mask;
+ int newv = old & nmask;
+ newv = newv | vmask;
+ bdk_mdio_write(node, mdio_bus, phy_addr, reg, newv);
+}
+
+static void vitesse_program(bdk_node_t node, int mdio_bus, int phy_addr)
+{
+ printf("Programming Vitesse PHY at address %d\n", phy_addr);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0x0010);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 18, 0x800f);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0x0010);
+
+ int reg18g = bdk_mdio_read(node, mdio_bus, phy_addr, 18);
+ int timeout = 10;
+ while ((reg18g & (1<<15)) && (timeout > 0))
+ {
+ bdk_wait_usec(100000);
+ reg18g = bdk_mdio_read(node, mdio_bus, phy_addr, 18);
+ timeout = timeout - 1;
+ }
+ if (timeout == 0)
+ bdk_error("Vetesse: Timeout waiting for complete\n");
+
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0x0000);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0x0010);
+ wr_masked(node, mdio_bus, phy_addr, 12, 0x0000, 0x0800);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 9, 0x005b);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 10, 0x005b);
+ wr_masked(node, mdio_bus, phy_addr, 12, 0x0800, 0x0800);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 18, 0x800f);
+ wr_masked(node, mdio_bus, phy_addr, 0, 0x0000, 0x8000);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 18, 0x0000);
+ wr_masked(node, mdio_bus, phy_addr, 12, 0x0000, 0x0800);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0x10);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 0, 0x7009);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 12, 0x5002);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 11, 0x0000);
+
+ for (unsigned int i=0; i<sizeof(patch_arr); i++)
+ {
+ int d = 0x5000 | patch_arr[i];
+ bdk_mdio_write(node, mdio_bus, phy_addr, 12, d);
+ }
+ bdk_mdio_write(node, mdio_bus, phy_addr, 12, 0x0000);
+
+ bdk_mdio_write(node, mdio_bus, phy_addr, 3, 0x3eb7);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 4, 0x4012);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 12, 0x0100);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 0, 0x4018);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 0, 0xc018);
+
+ // below verifies CRC is correct in 8051 RAM. CRC is 16-bit.
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0x0001);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 25, 0x4000);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 26, sizeof(patch_arr) + 1);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0x0010);
+ bdk_mdio_write(node, mdio_bus, phy_addr, 18, 0x8008);
+
+ reg18g = bdk_mdio_read(node, mdio_bus, phy_addr, 18);
+ timeout = 10;
+ while ((reg18g & (1<<15)) && (timeout > 0))
+ {
+ bdk_wait_usec(100000);
+ reg18g = bdk_mdio_read(node, mdio_bus, phy_addr, 18);
+ timeout = timeout - 1;
+ }
+ if (timeout == 0)
+ bdk_error("Vetesse: Timeout waiting for complete\n");
+
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0x0001);
+
+ int crc_calculated = bdk_mdio_read(node, mdio_bus, phy_addr, 25);
+ if (crc_calculated != 0xB7C2)
+ printf("8051 crc_calculated = 0x%x, expected_crc = 0x%x\n", crc_calculated, 0xB7C2);
+
+ bdk_mdio_write(node, mdio_bus, phy_addr, 31, 0x0000);
+}
+
+//static void vetesse_setup(bdk_node_t node, int qlm, int mdio_bus, int phy_addr)
+int bdk_if_phy_vetesse_setup(bdk_node_t node, int qlm, int mdio_bus, int phy_addr)
+{
+ /* Check if the PHY is Vetesse PHY we expect */
+ int phy_status = bdk_mdio_read(node, mdio_bus, phy_addr, BDK_MDIO_PHY_REG_ID1);
+ if (phy_status != 0x0007)
+ return 0;
+
+ /* Check that the GSER mode is SGMII or QSGMII */
+ bdk_qlm_modes_t qlm_mode = bdk_qlm_get_mode(node, qlm);
+ if ((qlm_mode != BDK_QLM_MODE_SGMII_1X1) &&
+ (qlm_mode != BDK_QLM_MODE_SGMII_2X1) &&
+ (qlm_mode != BDK_QLM_MODE_SGMII_4X1) &&
+ (qlm_mode != BDK_QLM_MODE_QSGMII_4X1))
+ return 0;
+
+ /* Program the Vetesse PHY */
+ vitesse_program(node, mdio_bus, phy_addr);
+
+ /* Switch the Vitesse PHY to the correct mode */
+ bool is_qsgmii = (qlm_mode == BDK_QLM_MODE_QSGMII_4X1);
+ if (is_qsgmii)
+ {
+ for (int port = 0; port < 4; port++)
+ setup_vitesse_phy(node, mdio_bus, phy_addr + port, true);
+ }
+ else
+ setup_vitesse_phy(node, mdio_bus, phy_addr, false);
+ return 0;
+}
+
+#if 0
+int bdk_if_phy_vetesse_setup(bdk_node_t node)
+{
+ for (int bgx = 0; bgx < 4; bgx++)
+ {
+ int port = 0;
+ int phy_addr = bdk_config_get_int(BDK_CONFIG_PHY_ADDRESS, node, bgx, port);
+ if (phy_addr != -1)
+ {
+ int node = (phy_addr >> 24) & 0xff;
+ int mdio_bus = (phy_addr >> 8) & 0xff;
+ int mdio_addr = phy_addr & 0xff;
+ if (node == 0xff)
+ node = bdk_numa_local();
+ if ((phy_addr & BDK_IF_PHY_TYPE_MASK) == BDK_IF_PHY_MDIO)
+ {
+ int qlm = bdk_qlm_get(node, BDK_IF_BGX, bgx, port);
+ if (qlm != -1)
+ vetesse_setup(node, qlm, mdio_bus, mdio_addr);
+ }
+ }
+ }
+ return 0;
+}
+#endif
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy.c b/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy.c
new file mode 100644
index 0000000000..5b02eff0cd
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/if/bdk-if-phy.c
@@ -0,0 +1,445 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include <libbdk-hal/if/bdk-if.h>
+#include <libbdk-hal/bdk-config.h>
+#include <libbdk-hal/bdk-mdio.h>
+#include <libbdk-hal/bdk-qlm.h>
+#include <libbdk-hal/bdk-twsi.h>
+
+/**
+ * Called when the PHY is connected through TWSI
+ *
+ * @param dev_node Node the ethernet device is on
+ * @param phy_addr Encoded address, see bdk-if.h for format
+ *
+ * @return Link status
+ */
+static bdk_if_link_t __bdk_if_phy_get_twsi(bdk_node_t dev_node, int phy_addr)
+{
+ /* For TWSI:
+ Bits[31:24]: Node ID, 0xff for device node
+ Bits[23:16]: TWSI internal address width in bytes (0-2)
+ Bits[15:12]: 2=TWSI
+ Bits[11:8]: TWSI bus number
+ Bits[7:0]: TWSI address */
+ int node = (phy_addr >> 24) & 0xff;
+ int twsi_ia_width = (phy_addr >> 16) & 0xff;
+ int twsi_bus = (phy_addr >> 8) & 0xf;
+ int twsi_addr = phy_addr & 0xff;
+ if (node == 0xff)
+ node = dev_node;
+
+ bdk_if_link_t result;
+ result.u64 = 0;
+
+ /* This is from the Avago SFP 1G Module data sheet
+ Register 17 (Extended Status 1) */
+ int64_t phy_status = bdk_twsix_read_ia(node, twsi_bus, twsi_addr, 17, 2, twsi_ia_width);
+ if (phy_status != -1)
+ {
+ int speed = (phy_status >> 14)& 3;
+ int duplex = (phy_status >> 13)& 1;
+ int resolved = (phy_status >> 11)& 1;
+ int link = (phy_status >> 10)& 1;
+ if (resolved)
+ {
+ result.s.up = link;
+ result.s.full_duplex = duplex;
+ switch (speed)
+ {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.u64 = 0;
+ break;
+ }
+ }
+ }
+
+ return result;
+}
+
+/**
+ * Read the status of a PHY
+ *
+ * @param dev_node Node the ethernet device is on
+ * @param phy_addr Encoded PHY address, see bdk-if.h for format
+ *
+ * @return Link status
+ */
+bdk_if_link_t __bdk_if_phy_get(bdk_node_t dev_node, int phy_addr)
+{
+ int node = (phy_addr >> 24) & 0xff;
+ int mdio_bus = (phy_addr >> 8) & 0xff;
+ int mdio_addr = phy_addr & 0xff;
+ if (node == 0xff)
+ node = dev_node;
+ int phy_status;
+ bdk_if_link_t result;
+ result.u64 = 0;
+
+ /* PHY address of -1 menas there is no PHY and we should have never
+ gotten here */
+ if (phy_addr == -1)
+ return result;
+
+ /* A PHY address with the special value 0x1000 represents a PHY we can't
+ connect to through MDIO which is assumed to be at 1Gbps */
+ if (phy_addr == BDK_IF_PHY_FIXED_1GB)
+ {
+ result.s.up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ return result;
+ }
+
+ /* A PHY address with the special value 0x1001 represents a PHY we can't
+ connect to through MDIO which is assumed to be at 100Mbps */
+ if (phy_addr == BDK_IF_PHY_FIXED_100MB)
+ {
+ result.s.up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 100;
+ return result;
+ }
+
+ /* Check for a PHY connected through TWSI */
+ if ((phy_addr & BDK_IF_PHY_TYPE_MASK) == BDK_IF_PHY_TWSI)
+ return __bdk_if_phy_get_twsi(dev_node, phy_addr);
+
+ phy_status = bdk_mdio_read(node, mdio_bus, mdio_addr, BDK_MDIO_PHY_REG_ID1);
+ if ((phy_status <= 0) || (phy_status == 0xffff))
+ return result;
+
+ switch (phy_status)
+ {
+ case 0x0141: /* Marvell */
+ {
+
+ /* This code assumes we are using a Marvell Gigabit PHY. All the
+ speed information can be read from register 17 in one go. Somebody
+ using a different PHY will need to handle it above in the board
+ specific area */
+ phy_status = bdk_mdio_read(node, mdio_bus, mdio_addr, 17);
+ if (phy_status < 0)
+ return result;
+
+ /* If the resolve bit 11 isn't set, see if autoneg is turned off
+ (bit 12, reg 0). The resolve bit doesn't get set properly when
+ autoneg is off, so force it */
+ if ((phy_status & (1<<11)) == 0)
+ {
+ bdk_mdio_phy_reg_control_t control;
+ int phy_c = bdk_mdio_read(node, mdio_bus, mdio_addr, BDK_MDIO_PHY_REG_CONTROL);
+ if (phy_c < 0)
+ return result;
+ control.u16 = phy_c;
+ if (control.s.autoneg_enable == 0)
+ phy_status |= 1<<11;
+ }
+
+ /* Only return a link if the PHY has finished auto negotiation
+ and set the resolved bit (bit 11) */
+ if (phy_status & (1<<11))
+ {
+ result.s.up = 1;
+ result.s.full_duplex = ((phy_status>>13)&1);
+ switch ((phy_status>>14)&3)
+ {
+ case 0: /* 10 Mbps */
+ result.s.speed = 10;
+ break;
+ case 1: /* 100 Mbps */
+ result.s.speed = 100;
+ break;
+ case 2: /* 1 Gbps */
+ result.s.speed = 1000;
+ break;
+ case 3: /* Illegal */
+ result.u64 = 0;
+ break;
+ }
+ }
+ break;
+ }
+ case 0x0022: /* Kendin */
+ {
+ /* Register 1Fh - PHY Control */
+ /* Micrel KSZ9031RNX, EBB8104 RGMII transceiver */
+ /* Reports as "Kendin" in BDK_MDIO_PHY_REG_ID1 */
+ phy_status = bdk_mdio_read(node, mdio_bus, mdio_addr, 0x1F);
+ if (phy_status & (1 << 6)) // Speed Status - 1000Base-T
+ {
+ result.s.up = 1;
+ result.s.speed = 1000;
+ }
+ else if (phy_status & (1 << 5)) // Speed Status - 100Base-TX
+ {
+ result.s.up = 1;
+ result.s.speed = 100;
+ }
+ else if (phy_status & (1 << 4)) // Speed Status - 10Base-T
+ {
+ result.s.up = 1;
+ result.s.speed = 10;
+ }
+ if (phy_status & (1 << 3)) // Duplex Status
+ {
+ result.s.full_duplex = 1;
+ }
+ break;
+ }
+ case 0x0007: /* Vitesse */
+ {
+ /* Auxiliary Control and Status, Address 28 (0x1C) */
+ phy_status = bdk_mdio_read(node, mdio_bus, mdio_addr, 0x1c);
+ result.s.full_duplex = (phy_status>>5)&1;
+ switch ((phy_status>>3) & 3)
+ {
+ case 0:
+ result.s.speed = 10;
+ result.s.up = 1;
+ break;
+ case 1:
+ result.s.speed = 100;
+ result.s.up = 1;
+ break;
+ default:
+ result.s.speed = 1000;
+ break;
+ }
+ phy_status = bdk_mdio_read(node, mdio_bus, mdio_addr, 0x01);
+ result.s.up = (phy_status>>2)&1;
+ break;
+ }
+ default: /* Treat like Broadcom */
+ {
+ /* Below we are going to read SMI/MDIO register 0x19 which works
+ on Broadcom parts */
+ phy_status = bdk_mdio_read(node, mdio_bus, mdio_addr, 0x19);
+ if (phy_status < 0)
+ return result;
+
+ switch ((phy_status>>8) & 0x7)
+ {
+ case 0:
+ result.u64 = 0;
+ break;
+ case 1:
+ result.s.up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 10;
+ break;
+ case 2:
+ result.s.up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 10;
+ break;
+ case 3:
+ result.s.up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 100;
+ break;
+ case 4:
+ result.s.up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 100;
+ break;
+ case 5:
+ result.s.up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 100;
+ break;
+ case 6:
+ result.s.up = 1;
+ result.s.full_duplex = 0;
+ result.s.speed = 1000;
+ break;
+ case 7:
+ result.s.up = 1;
+ result.s.full_duplex = 1;
+ result.s.speed = 1000;
+ break;
+ }
+ break;
+ }
+ }
+
+ /* If link is down, return all fields as zero. */
+ if (!result.s.up)
+ result.u64 = 0;
+
+ return result;
+}
+
+/**
+ * PHY XS initialization, primarily for RXAUI
+ *
+ * @param dev_node Node the ethernet device is on
+ * @param phy_addr Encoded PHY address, see bdk-if.h for format
+ *
+ * @return none
+ */
+void __bdk_if_phy_xs_init(bdk_node_t dev_node, int phy_addr)
+{
+ /* This code only supports PHYs connected through MDIO */
+ if ((phy_addr & BDK_IF_PHY_TYPE_MASK) != BDK_IF_PHY_MDIO)
+ return;
+
+ int node = (phy_addr >> 24) & 0xff;
+ int mdio_bus = (phy_addr >> 8) & 0xff;
+ int mdio_addr = phy_addr & 0xff;
+ if (node == 0xff)
+ node = dev_node;
+
+ /* Read the PMA/PMD Device Identifier (1.2, 1.3)
+ OUI is spread across both registers */
+ int dev_addr = 1;
+ int reg_addr = 2;
+ int phy_id1 = bdk_mdio_45_read(node, mdio_bus, mdio_addr, dev_addr, reg_addr);
+ if (phy_id1 == -1)
+ return;
+ reg_addr = 3;
+ int phy_id2 = bdk_mdio_45_read(node, mdio_bus, mdio_addr, dev_addr, reg_addr);
+ if (phy_id2 == -1)
+ return;
+ int model_number = (phy_id2 >> 4) & 0x3F;
+ int oui = phy_id1;
+ oui <<= 6;
+ oui |= (phy_id2 >> 10) & 0x3F;
+ switch (oui)
+ {
+ case 0x5016: /* Marvell */
+ if (model_number == 9) /* 88X3140/3120 */
+ {
+ BDK_TRACE(BGX, "N%d.MDIO%d.%d: Performing PHY reset on Marvell RXAUI PHY\n",
+ node, mdio_bus, mdio_addr);
+ dev_addr = 4;
+ reg_addr = 0;
+ /* Write bit 15, Software Reset, in PHY XS Control 1 (4.0). On CN78xx,
+ sometimes the PHY/BGX gets stuck in local fault mode, link never comes up,
+ and this appears to clear it up. Haven't seen this on CN81xx or T88,
+ but the reset seems like cheap insurance. */
+ if (bdk_mdio_45_write(node, mdio_bus, mdio_addr, dev_addr, reg_addr, (1 << 15)))
+ {
+ bdk_error("PHY XS: MDIO write to (%d.%d) failed\n", dev_addr, reg_addr);
+ return;
+ }
+
+ int reset_pending = 1;
+ while (reset_pending)
+ {
+ reset_pending = bdk_mdio_45_read(node, mdio_bus, mdio_addr, dev_addr, reg_addr);
+ reset_pending &= (1 << 15);
+ }
+
+ /* Adjust the RXAUI TX Level for Marvell PHY, per Brendan Metzner
+ write 5 to register 4.49155 */
+ reg_addr = 49155;
+ if (bdk_mdio_45_write(node, mdio_bus, mdio_addr, dev_addr, reg_addr, 5))
+ {
+ bdk_error("PHY XS: MDIO write to (%d.%d) failed\n", dev_addr, reg_addr);
+ return;
+ }
+ }
+ break;
+
+ default: /* Unknown PHY, or no PHY present */
+ break;
+ }
+}
+
+int bdk_if_phy_setup(bdk_node_t dev_node)
+{
+ /* 81xx has only 2 BGX (BGX0-BGX1); BGX2 is RGMII */
+ for (int bgx = 0; bgx < 2; bgx++)
+ {
+ int port = 0;
+ int phy_addr = bdk_config_get_int(BDK_CONFIG_PHY_ADDRESS, dev_node, bgx, port);
+ if (phy_addr != -1)
+ {
+ int node = (phy_addr >> 24) & 0xff;
+ int mdio_bus = (phy_addr >> 8) & 0xff;
+ int mdio_addr = phy_addr & 0xff;
+ if (node == 0xff)
+ node = bdk_numa_local();
+ if ((phy_addr & BDK_IF_PHY_TYPE_MASK) == BDK_IF_PHY_MDIO)
+ {
+ int qlm = bdk_qlm_get_qlm_num(node, BDK_IF_BGX, bgx, port);
+ if (qlm == -1)
+ continue;
+
+ BDK_TRACE(PHY, "N%d.BGX%d.%d: Configuring ...\n", node, bgx, port);
+
+ /* Check PHY id */
+ int phy_status_1 = bdk_mdio_read(node, mdio_bus, phy_addr, BDK_MDIO_PHY_REG_ID1);
+ int phy_status_2 = bdk_mdio_read(node, mdio_bus, phy_addr, BDK_MDIO_PHY_REG_ID2);
+
+ /* Vitesse */
+ if (phy_status_1 == 0x0007)
+ {
+ if (phy_status_2 == 0x0670)
+ {
+ bdk_if_phy_vsc8514_setup(node, qlm, mdio_bus, mdio_addr);
+ }
+ else
+ {
+ bdk_if_phy_vetesse_setup(node, qlm, mdio_bus, mdio_addr);
+ }
+ }
+
+ /* Marvell */
+ else if (phy_status_1 == 0x0141)
+ bdk_if_phy_marvell_setup(node, qlm, mdio_bus, mdio_addr);
+ else
+ BDK_TRACE(PHY, "N%d.BGX%d.%d: Unknown PHY %x\n", node, bgx, port, phy_status_1);
+ }
+ }
+ }
+ return 0;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-cn81xx.c b/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-cn81xx.c
new file mode 100644
index 0000000000..303b276a8b
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-cn81xx.c
@@ -0,0 +1,1003 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-hal/bdk-qlm.h"
+#include "libbdk-hal/qlm/bdk-qlm-common.h"
+#include "libbdk-arch/bdk-csrs-bgx.h"
+#include "libbdk-arch/bdk-csrs-gser.h"
+#include "libbdk-arch/bdk-csrs-pem.h"
+#include "libbdk-arch/bdk-csrs-sata.h"
+#include "libbdk-arch/bdk-csrs-rst.h"
+#include "libbdk-hal/bdk-config.h"
+#include "libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.h"
+#include "libbdk-hal/bdk-gpio.h"
+
+/**
+ * Return the number of QLMs supported for the chip
+ *
+ * @return Number of QLMs
+ */
+int bdk_qlm_get_num(bdk_node_t node)
+{
+ return 4; /* 4 DLM */
+}
+
+/**
+ * Return the number of lanes in a QLM. QLMs normally contain
+ * 4 lanes, except for chips which only have half of a QLM.
+ *
+ * @param qlm QLM to get lanes number for
+ *
+ * @return Number of lanes on the QLM
+ */
+int bdk_qlm_get_lanes(bdk_node_t node, int qlm)
+{
+
+ if ((qlm < 2) && cavium_is_altpkg(CAVIUM_CN81XX))
+ return 1; /* DLM0 and DLM1 are a single lane on CN80XX */
+ else
+ return 2; /* DLMs */
+}
+
+/**
+ * Lookup the hardware QLM number for a given interface type and index. This
+ * function will fail with a fatal error if called on invalid interfaces for
+ * a chip. It returns the QLM number for an interface without checking to
+ * see if the QLM is in the correct mode.
+ *
+ * @param iftype Interface type
+ * @param interface Interface index number
+ *
+ * @return QLM number. Dies on a fatal error on failure.
+ */
+int bdk_qlm_get_qlm_num(bdk_node_t node, bdk_if_t iftype, int interface, int index)
+{
+ switch (iftype)
+ {
+ case BDK_IF_BGX:
+ {
+ int qlm;
+ switch (interface)
+ {
+ case 0:
+ {
+ /* This BGX spans two DLMs. The index must be used to
+ figure out which DLM we are using */
+ BDK_CSR_INIT(gserx_cfg, node, BDK_GSERX_CFG(0));
+ if (gserx_cfg.s.bgx)
+ {
+ if (gserx_cfg.s.bgx_quad) /* 4 lanes together */
+ qlm = 0;
+ else if (gserx_cfg.s.bgx_dual) /* 2 lanes together */
+ qlm = (index >= 1) ? 1 : 0;
+ else /* All lanes independent */
+ {
+ bdk_qlm_modes_t mode = bdk_qlm_get_mode(node, 0);
+ if (mode == BDK_QLM_MODE_QSGMII_4X1)
+ qlm = 0;
+ else if (mode <= BDK_QLM_MODE_PCIE_1X8)
+ qlm = 1;
+ else if (cavium_is_altpkg(CAVIUM_CN81XX))
+ {
+ bdk_qlm_modes_t mode1 = bdk_qlm_get_mode(node, 1);
+ if ((mode1 != BDK_QLM_MODE_QSGMII_4X1) && (index >= 2))
+ return -1;
+ qlm = (index >= 1) ? 1 : 0;
+ }
+ else
+ qlm = (index >= 2) ? 1 : 0;
+ }
+ }
+ else
+ qlm = 1;
+ break;
+ }
+ case 1:
+ {
+ /* This BGX spans two DLMs. The index must be used to
+ figure out which DLM we are using */
+ BDK_CSR_INIT(gserx_cfg, node, BDK_GSERX_CFG(2));
+ if (gserx_cfg.s.bgx)
+ {
+ if (gserx_cfg.s.bgx_quad) /* 4 lanes together */
+ qlm = 2;
+ else if (gserx_cfg.s.bgx_dual) /* 2 lanes together */
+ qlm = (index >= 1) ? 3 : 2;
+ else /* All lanes independent */
+ {
+ bdk_qlm_modes_t mode = bdk_qlm_get_mode(node, 2);
+ if (mode == BDK_QLM_MODE_QSGMII_4X1)
+ qlm = 2;
+ else if (mode <= BDK_QLM_MODE_PCIE_1X8)
+ qlm = 1;
+ else
+ qlm = (index >= 2) ? 3 : 2;
+ }
+ }
+ else
+ qlm = 3;
+ break;
+ }
+ default:
+ return -1;
+ }
+ /* Make sure the QLM is powered up and out of reset */
+ BDK_CSR_INIT(phy_ctl, node, BDK_GSERX_PHY_CTL(qlm));
+ if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
+ return -1;
+ /* Make sure the QLM is in BGX mode */
+ BDK_CSR_INIT(gserx_cfg, node, BDK_GSERX_CFG(qlm));
+ if (gserx_cfg.s.bgx)
+ return qlm;
+ else
+ return -1;
+ }
+ case BDK_IF_PCIE: /* PCIe */
+ {
+ switch (interface)
+ {
+ case 0: /* PEM0 */
+ {
+ BDK_CSR_INIT(gserx_cfg, node, BDK_GSERX_CFG(0));
+ if (gserx_cfg.s.pcie)
+ return 0; /* PEM0 is on DLM0 */
+ else
+ return -1; /* PEM0 is disabled */
+ }
+ case 1: /* PEM1 */
+ {
+ BDK_CSR_INIT(gserx_cfg, node, BDK_GSERX_CFG(2));
+ if (gserx_cfg.s.pcie)
+ return 2; /* PEM1 is on DLM2 */
+ else
+ return -1; /* PEM1 is disabled */
+ }
+ case 2: /* PEM2 */
+ {
+ BDK_CSR_INIT(pem1_cfg, node, BDK_PEMX_CFG(1));
+ BDK_CSR_INIT(gserx_cfg, node, BDK_GSERX_CFG(3));
+ if (!pem1_cfg.cn81xx.lanes4 && gserx_cfg.s.pcie)
+ return 3; /* PEM2 is on DLM3 */
+ else
+ return -1; /* PEM2 is disabled */
+ }
+ default: /* Max of 3 PEMs, 0-2 */
+ return -1;
+ }
+ }
+ default: /* Not supported by CN81XX */
+ return -1;
+ }
+}
+
+/**
+ * Get the mode of a QLM as a human readable string
+ *
+ * @param qlm QLM to examine
+ *
+ * @return String mode
+ */
+bdk_qlm_modes_t bdk_qlm_get_mode(bdk_node_t node, int qlm)
+{
+ BDK_CSR_INIT(gserx_cfg, node, BDK_GSERX_CFG(qlm));
+ if (gserx_cfg.s.pcie)
+ {
+ switch (qlm)
+ {
+ case 0: /* PEM0 */
+ {
+ BDK_CSR_INIT(pemx_cfg, node, BDK_PEMX_CFG(0));
+ if (cavium_is_altpkg(CAVIUM_CN81XX))
+ return BDK_QLM_MODE_PCIE_1X1; /* PEM0 x1 */
+ else if (pemx_cfg.cn81xx.lanes4)
+ return BDK_QLM_MODE_PCIE_1X4; /* PEM0 x4 */
+ else
+ return BDK_QLM_MODE_PCIE_1X2; /* PEM0 x2 */
+ }
+ case 1: /* PEM0 second two lanes */
+ return BDK_QLM_MODE_PCIE_1X4; /* PEM0 x4 */
+ case 2: /* Either PEM1 x4 or PEM1 x2 */
+ {
+ BDK_CSR_INIT(pemx_cfg, node, BDK_PEMX_CFG(1));
+ if (pemx_cfg.cn81xx.lanes4)
+ return BDK_QLM_MODE_PCIE_1X4; /* PEM1 x4 */
+ else
+ return BDK_QLM_MODE_PCIE_1X2; /* PEM1 x2 */
+ }
+ case 3: /* Either PEM1 x4 or PEM2 x2 */
+ {
+ /* Can be last 2 lanes of PEM1 */
+ BDK_CSR_INIT(pem1_cfg, node, BDK_PEMX_CFG(1));
+ if (pem1_cfg.cn81xx.lanes4)
+ return BDK_QLM_MODE_PCIE_1X4; /* PEM1 x4 */
+ /* Can be 2 lanes of PEM2 */
+ return BDK_QLM_MODE_PCIE_1X2; /* PEM2 x2 */
+ }
+ default:
+ return BDK_QLM_MODE_DISABLED;
+ }
+ }
+ else if (gserx_cfg.s.bgx)
+ {
+ int bgx;
+ int bgx_index;
+ switch (qlm)
+ {
+ case 0:
+ {
+ bgx = 0;
+ bgx_index = 0;
+ break;
+ }
+ case 1:
+ bgx = 0;
+ bgx_index = 2;
+ break;
+ case 2:
+ {
+ bgx = 1;
+ bgx_index = 0;
+ break;
+ }
+ case 3:
+ bgx = 1;
+ bgx_index = 2;
+ break;
+ default:
+ return BDK_QLM_MODE_DISABLED;
+ }
+ BDK_CSR_INIT(cmrx_config, node, BDK_BGXX_CMRX_CONFIG(bgx, bgx_index));
+ bool is_kr = __bdk_qlm_is_lane_kr(node, qlm, 0);
+ switch (cmrx_config.s.lmac_type)
+ {
+ case BDK_BGX_LMAC_TYPES_E_SGMII:
+ if (cavium_is_altpkg(CAVIUM_CN81XX) && (qlm < 2))
+ return BDK_QLM_MODE_SGMII_1X1;
+ else
+ return BDK_QLM_MODE_SGMII_2X1;
+ case BDK_BGX_LMAC_TYPES_E_XAUI: return BDK_QLM_MODE_XAUI_1X4; /* Doesn't differntiate between XAUI and DXAUI */
+ case BDK_BGX_LMAC_TYPES_E_RXAUI: return BDK_QLM_MODE_RXAUI_1X2;
+ case BDK_BGX_LMAC_TYPES_E_TENG_R:
+ if (is_kr)
+ return (cavium_is_altpkg(CAVIUM_CN81XX) && (qlm < 2)) ? BDK_QLM_MODE_10G_KR_1X1 : BDK_QLM_MODE_10G_KR_2X1;
+ else
+ return (cavium_is_altpkg(CAVIUM_CN81XX) && (qlm < 2)) ? BDK_QLM_MODE_XFI_1X1 : BDK_QLM_MODE_XFI_2X1;
+ case BDK_BGX_LMAC_TYPES_E_FORTYG_R:
+ if (is_kr)
+ return BDK_QLM_MODE_40G_KR4_1X4;
+ else
+ return BDK_QLM_MODE_XLAUI_1X4;
+ case BDK_BGX_LMAC_TYPES_E_QSGMII: return BDK_QLM_MODE_QSGMII_4X1;
+ default: return BDK_QLM_MODE_DISABLED;
+ }
+ }
+ else if (gserx_cfg.s.sata)
+ return BDK_QLM_MODE_SATA_2X1;
+ else
+ return BDK_QLM_MODE_DISABLED;
+}
+
+static int qlm_set_sata(bdk_node_t node, int qlm, bdk_qlm_modes_t mode, int baud_mhz, bdk_qlm_mode_flags_t flags)
+{
+ /* SATA has a fixed mapping for ports on CN81XX */
+ int sata_port;
+ switch (qlm)
+ {
+ case 3: /* SATA 0-1 = DLM3 lanes 0-1 */
+ sata_port = 0;
+ break;
+ default:
+ bdk_error("Attempted to configure SATA on QLM that doesn't support it\n");
+ return -1;
+ }
+ return __bdk_qlm_set_sata_cn8xxx(node, qlm, baud_mhz, sata_port, sata_port + 1);
+}
+
+/**
+ * For chips that don't use pin strapping, this function programs
+ * the QLM to the specified mode
+ *
+ * @param node Node to use in a Numa setup
+ * @param qlm QLM to configure
+ * @param mode Desired mode
+ * @param baud_mhz Desired speed
+ * @param flags Flags to specify mode specific options
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_qlm_set_mode(bdk_node_t node, int qlm, bdk_qlm_modes_t mode, int baud_mhz, bdk_qlm_mode_flags_t flags)
+{
+ int lane_mode = 0xf;
+ int lmac_type = -1;
+ int is_pcie = 0;
+ int is_sata = 0;
+ int is_ilk = 0;
+ int is_bgx = 0;
+ int bgx_block;
+ int bgx_index;
+
+ switch (qlm)
+ {
+ case 0:
+ bgx_block = 0;
+ bgx_index = 0;
+ break;
+ case 1:
+ bgx_block = 0;
+ bgx_index = 2;
+ break;
+ case 2:
+ bgx_block = 1;
+ bgx_index = 0;
+ break;
+ case 3:
+ bgx_block = 1;
+ bgx_index = 2;
+ break;
+ default:
+ bgx_block = -1;
+ bgx_index = -1;
+ break;
+ }
+
+ int measured_ref = bdk_qlm_measure_clock(node, qlm);
+ int ref_clk = (mode == BDK_QLM_MODE_DISABLED) ? 0 : __bdk_qlm_round_refclock(node, qlm, measured_ref);
+ int kr_mode = 0;
+
+ switch (mode)
+ {
+ case BDK_QLM_MODE_PCIE_1X1:
+ case BDK_QLM_MODE_PCIE_1X2:
+ case BDK_QLM_MODE_PCIE_1X4:
+ {
+ /* Note: PCIe ignores baud_mhz. Use the GEN 1/2/3 flags
+ to control speed */
+ is_pcie = 1;
+ if (ref_clk == REF_100MHZ)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_REFCLK_SEL(qlm),
+ c.s.pcie_refclk125 = 0);
+ if (baud_mhz == 2500)
+ lane_mode = BDK_GSER_LMODE_E_R_25G_REFCLK100;
+ else if (baud_mhz == 5000)
+ lane_mode = BDK_GSER_LMODE_E_R_5G_REFCLK100;
+ else
+ lane_mode = BDK_GSER_LMODE_E_R_8G_REFCLK100;
+ }
+ else if (ref_clk == REF_125MHZ)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_REFCLK_SEL(qlm),
+ c.s.pcie_refclk125 = 1);
+ if (baud_mhz == 2500)
+ lane_mode = BDK_GSER_LMODE_E_R_25G_REFCLK125;
+ else if (baud_mhz == 5000)
+ lane_mode = BDK_GSER_LMODE_E_R_5G_REFCLK125;
+ else
+ lane_mode = BDK_GSER_LMODE_E_R_8G_REFCLK125;
+ }
+ else
+ {
+ bdk_error("Invalid reference clock for PCIe on QLM%d\n", qlm);
+ return -1;
+ }
+ int cfg_md;
+ if (baud_mhz == 2500)
+ cfg_md = 0; /* Gen1 Speed */
+ else if (baud_mhz == 5000)
+ cfg_md = 1; /* Gen2 Speed */
+ else
+ cfg_md = 2; /* Gen3 Speed */
+ switch (qlm)
+ {
+ case 0: /* Either PEM0 x4 or PEM0 x2 or PEM0 x1 */
+ BDK_CSR_MODIFY(c, node, BDK_RST_SOFT_PRSTX(0),
+ c.s.soft_prst = !(flags & BDK_QLM_MODE_FLAG_ENDPOINT));
+ __bdk_qlm_setup_pem_reset(node, 0, flags & BDK_QLM_MODE_FLAG_ENDPOINT);
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_CFG(0),
+ c.cn81xx.lanes4 = (mode == BDK_QLM_MODE_PCIE_1X4);
+ //c.cn81xx.hostmd = !(flags & BDK_QLM_MODE_FLAG_ENDPOINT);
+ c.cn81xx.md = cfg_md);
+ break;
+ case 1: /* Second two lanes for PEM0 x4 */
+ /* PEMX_CFG already setup */
+ break;
+ case 2: /* Either PEM1 x4 or PEM1 x2 */
+ BDK_CSR_MODIFY(c, node, BDK_RST_SOFT_PRSTX(1),
+ c.s.soft_prst = !(flags & BDK_QLM_MODE_FLAG_ENDPOINT));
+ __bdk_qlm_setup_pem_reset(node, 1, flags & BDK_QLM_MODE_FLAG_ENDPOINT);
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_CFG(1),
+ c.cn81xx.lanes4 = (mode == BDK_QLM_MODE_PCIE_1X4);
+ //c.cn81xx.hostmd = !(flags & BDK_QLM_MODE_FLAG_ENDPOINT);
+ c.cn81xx.md = cfg_md);
+ break;
+ case 3: /* Either PEM1 x4 or PEM2 x2 */
+ if (mode == BDK_QLM_MODE_PCIE_1X4)
+ {
+ /* Last 2 lanes of PEM1 */
+ /* PEMX_CFG already setup */
+ }
+ else
+ {
+ /* Two lanes for PEM2 */
+ BDK_CSR_MODIFY(c, node, BDK_RST_SOFT_PRSTX(2),
+ c.s.soft_prst = !(flags & BDK_QLM_MODE_FLAG_ENDPOINT));
+ __bdk_qlm_setup_pem_reset(node, 2, flags & BDK_QLM_MODE_FLAG_ENDPOINT);
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_CFG(2),
+ c.cn81xx.lanes4 = 0;
+ //c.cn81xx.hostmd = !(flags & BDK_QLM_MODE_FLAG_ENDPOINT);
+ c.cn81xx.md = cfg_md);
+ }
+ break;
+ default:
+ return -1;
+ }
+ break;
+ }
+ case BDK_QLM_MODE_SGMII_4X1:
+ case BDK_QLM_MODE_SGMII_2X1:
+ case BDK_QLM_MODE_SGMII_1X1:
+ /* Disable port BGX ports 2-3 on CN80XX */
+ if ((qlm < 2) && cavium_is_altpkg(CAVIUM_CN81XX))
+ {
+ BDK_CSR_WRITE(node, BDK_BGXX_CMRX_RX_DMAC_CTL(0, 2), 0);
+ BDK_CSR_WRITE(node, BDK_BGXX_CMRX_RX_DMAC_CTL(0, 3), 0);
+ }
+ lmac_type = BDK_BGX_LMAC_TYPES_E_SGMII; /* SGMII */
+ is_bgx = 1;
+ lane_mode = __bdk_qlm_get_lane_mode_for_speed_and_ref_clk("SGMII", qlm, ref_clk, baud_mhz);
+ if (lane_mode == -1)
+ return -1;
+ break;
+ case BDK_QLM_MODE_XAUI_1X4:
+ lmac_type = BDK_BGX_LMAC_TYPES_E_XAUI; /* XAUI */
+ is_bgx = 5;
+ lane_mode = __bdk_qlm_get_lane_mode_for_speed_and_ref_clk("XAUI", qlm, ref_clk, baud_mhz);
+ if (lane_mode == -1)
+ return -1;
+ break;
+ case BDK_QLM_MODE_RXAUI_2X2:
+ case BDK_QLM_MODE_RXAUI_1X2:
+ lmac_type = BDK_BGX_LMAC_TYPES_E_RXAUI; /* RXAUI */
+ is_bgx = 3;
+ lane_mode = __bdk_qlm_get_lane_mode_for_speed_and_ref_clk("RXAUI", qlm, ref_clk, baud_mhz);
+ if (lane_mode == -1)
+ return -1;
+ break;
+ case BDK_QLM_MODE_XFI_4X1:
+ case BDK_QLM_MODE_XFI_2X1:
+ case BDK_QLM_MODE_XFI_1X1:
+ /* Disable port BGX ports 2-3 on CN80XX */
+ if ((qlm < 2) && cavium_is_altpkg(CAVIUM_CN81XX))
+ {
+ BDK_CSR_WRITE(node, BDK_BGXX_CMRX_RX_DMAC_CTL(0, 2), 0);
+ BDK_CSR_WRITE(node, BDK_BGXX_CMRX_RX_DMAC_CTL(0, 3), 0);
+ }
+ lmac_type = BDK_BGX_LMAC_TYPES_E_TENG_R; /* 10G_R */
+ is_bgx = 1;
+ lane_mode = __bdk_qlm_get_lane_mode_for_speed_and_ref_clk("XFI", qlm, ref_clk, baud_mhz);
+ if (lane_mode == -1)
+ return -1;
+ break;
+ case BDK_QLM_MODE_XLAUI_1X4:
+ lmac_type = BDK_BGX_LMAC_TYPES_E_FORTYG_R; /* 40G_R */
+ is_bgx = 5;
+ lane_mode = __bdk_qlm_get_lane_mode_for_speed_and_ref_clk("XLAUI", qlm, ref_clk, baud_mhz);
+ if (lane_mode == -1)
+ return -1;
+ break;
+ case BDK_QLM_MODE_10G_KR_4X1:
+ case BDK_QLM_MODE_10G_KR_2X1:
+ case BDK_QLM_MODE_10G_KR_1X1:
+ /* Disable port BGX ports 2-3 on CN80XX */
+ if ((qlm < 2) && cavium_is_altpkg(CAVIUM_CN81XX))
+ {
+ BDK_CSR_WRITE(node, BDK_BGXX_CMRX_RX_DMAC_CTL(0, 2), 0);
+ BDK_CSR_WRITE(node, BDK_BGXX_CMRX_RX_DMAC_CTL(0, 3), 0);
+ }
+ lmac_type = BDK_BGX_LMAC_TYPES_E_TENG_R; /* 10G_R */
+ is_bgx = 1;
+ lane_mode = __bdk_qlm_get_lane_mode_for_speed_and_ref_clk("10G-KR", qlm, ref_clk, baud_mhz);
+ if (lane_mode == -1)
+ return -1;
+ kr_mode = 1;
+ break;
+ case BDK_QLM_MODE_40G_KR4_1X4:
+ lmac_type = BDK_BGX_LMAC_TYPES_E_FORTYG_R; /* 40G_R */
+ is_bgx = 5;
+ lane_mode = __bdk_qlm_get_lane_mode_for_speed_and_ref_clk("40G-KR", qlm, ref_clk, baud_mhz);
+ if (lane_mode == -1)
+ return -1;
+ kr_mode = 1;
+ break;
+ case BDK_QLM_MODE_QSGMII_4X1:
+ lmac_type = BDK_BGX_LMAC_TYPES_E_QSGMII; /* QSGMII */
+ is_bgx = 1;
+ lane_mode = BDK_GSER_LMODE_E_R_5G_REFCLK15625_QSGMII;
+ break;
+ case BDK_QLM_MODE_SATA_2X1:
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANE_MODE(qlm), c.s.lmode = BDK_GSER_LMODE_E_R_8G_REFCLK100);
+ /* SATA initialization is different than BGX. Call its init function
+ and skip the rest of this routine */
+ return qlm_set_sata(node, qlm, mode, baud_mhz, flags);
+ case BDK_QLM_MODE_DISABLED:
+ /* Set gser for the interface mode */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_CFG(qlm),
+ c.u = 0);
+ /* Put the PHY in reset */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm),
+ c.s.phy_reset = 1);
+ return 0;
+ default:
+ bdk_error("Unsupported QLM mode %d\n", mode);
+ return -1;
+ }
+
+ BDK_TRACE(QLM, "N%u.QLM%u: Power up...\n", node, qlm);
+
+ /* Power up phy, but keep it in reset */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm),
+ c.s.phy_pd = 0;
+ c.s.phy_reset = 1);
+
+ /* Set gser for the interface mode */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_CFG(qlm),
+ c.s.sata = is_sata;
+ c.s.ila = is_ilk;
+ c.s.bgx = is_bgx & 1;
+ c.s.bgx_quad = (is_bgx >> 2) & 1;
+ c.s.bgx_dual = (is_bgx >> 1) & 1;
+ c.s.pcie = is_pcie);
+
+ /* Lane mode */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANE_MODE(qlm),
+ c.s.lmode = lane_mode);
+
+ /* LMAC type. We only program one port as the full setup is done in BGX */
+ if (lmac_type != -1)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_BGXX_CMRX_CONFIG(bgx_block, bgx_index),
+ c.s.enable = 0;
+ c.s.lmac_type = lmac_type);
+ }
+
+ BDK_TRACE(QLM, "N%u.QLM%u: Deassert reset...\n", node, qlm);
+
+ /* Bring phy out of reset */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm),
+ c.s.phy_reset = 0);
+
+ /* Wait 1us until the management interface is ready to accept
+ read/write commands.*/
+ bdk_wait_usec(1);
+
+ /* Configure the gser pll */
+ __bdk_qlm_init_mode_table(node, qlm, ref_clk);
+
+ /* Remember which lanes are using KR over BGX */
+ if (is_bgx)
+ {
+ int num_lanes = bdk_qlm_get_lanes(node, qlm);
+ for (int lane = 0; lane < num_lanes; lane++)
+ __bdk_qlm_set_lane_kr(node, qlm, lane, kr_mode);
+ }
+
+ /* Wait for reset to complete and the PLL to lock */
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_GSERX_PLL_STAT(qlm), pll_lock, ==, 1, 10000))
+ {
+ bdk_error("QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", qlm);
+ return -1;
+ }
+
+ /* PCIe mode doesn't become ready until the PEM block attempts to bring
+ the interface up. Skip this check for PCIe */
+ if (!is_pcie && BDK_CSR_WAIT_FOR_FIELD(node, BDK_GSERX_QLM_STAT(qlm), rst_rdy, ==, 1, 10000))
+ {
+ bdk_error("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
+ return -1;
+ }
+
+ /* cdrlock will be checked in the BGX */
+
+ /* Errata (GSER-27140) SERDES temperature drift sensitivity in receiver */
+ int channel_loss = bdk_config_get_int(BDK_CONFIG_QLM_CHANNEL_LOSS, node, qlm);
+ __bdk_qlm_errata_gser_27140(node, qlm, baud_mhz, channel_loss);
+
+ /* Apply any custom tuning */
+ __bdk_qlm_tune(node, qlm, mode, baud_mhz);
+
+ /* Some modes require 4 lanes, which spans DLMs. For these modes, we need
+ to setup the second DLM at the same time we setup the first. The second
+ DLM also must use the same reference clock as the first */
+ bool paired_dlm = ((qlm & 1) == 0) && /* We're on the first (even) DLM */
+ ((mode == BDK_QLM_MODE_PCIE_1X4) || /* We're using a 4 lane mode */
+ (mode == BDK_QLM_MODE_XAUI_1X4) ||
+ (mode == BDK_QLM_MODE_XLAUI_1X4) ||
+ (mode == BDK_QLM_MODE_40G_KR4_1X4));
+ if (paired_dlm)
+ {
+ /* Use the same reference clock for the second QLM */
+ BDK_CSR_WRITE(node, BDK_GSERX_REFCLK_SEL(qlm + 1),
+ BDK_CSR_READ(node, BDK_GSERX_REFCLK_SEL(qlm)));
+ return bdk_qlm_set_mode(node, qlm + 1, mode, baud_mhz, flags);
+ }
+
+ return 0;
+}
+
+/**
+ * Get the speed (Gbaud) of the QLM in Mhz.
+ *
+ * @param qlm QLM to examine
+ *
+ * @return Speed in Mhz
+ */
+int bdk_qlm_get_gbaud_mhz(bdk_node_t node, int qlm)
+{
+ BDK_CSR_INIT(gserx_cfg, node, BDK_GSERX_CFG(qlm));
+ if (gserx_cfg.u == 0)
+ return 0;
+ if (gserx_cfg.s.pcie)
+ {
+ /* QLMs in PCIe mode ignore LMODE and get their speed from
+ the PEM block that controls them */
+ int pem;
+ switch (qlm)
+ {
+ case 0: /* PEM0 */
+ case 1: /* PEM0 */
+ pem = 0;
+ break;
+ case 2: /* PEM1 */
+ pem = 1;
+ break;
+ case 3: /* PEM1 or PEM2 */
+ {
+ BDK_CSR_INIT(pemx_cfg, node, BDK_PEMX_CFG(1));
+ if (pemx_cfg.cn81xx.lanes4)
+ pem = 1;
+ else
+ pem = 2;
+ break;
+ }
+ default:
+ bdk_fatal("QLM%d: In PCIe mode, which shouldn't happen\n", qlm);
+ }
+ return __bdk_qlm_get_gbaud_mhz_pem(node, pem);
+ }
+ else if (gserx_cfg.s.sata)
+ {
+ int sata;
+ switch (qlm)
+ {
+ case 3:
+ sata = 0;
+ break;
+ default:
+ return 0;
+ }
+ BDK_CSR_INIT(sata_uctl_ctl, node, BDK_SATAX_UCTL_CTL(sata));
+ if (!sata_uctl_ctl.s.a_clk_en)
+ return 0;
+ BDK_CSR_INIT(sctl, node, BDK_SATAX_UAHC_P0_SCTL(sata));
+ switch (sctl.s.spd)
+ {
+ case 1: return 1500;
+ case 2: return 3000;
+ case 3: return 6000;
+ default: return 6000; /* No limit, assume 6G */
+ }
+ }
+ else
+ return __bdk_qlm_get_gbaud_mhz_lmode(node, qlm);
+}
+
+/**
+ * Initialize the QLM layer
+ */
+void bdk_qlm_init(bdk_node_t node)
+{
+ /* Setup how each PEM drives the PERST lines */
+ for (int pem = 0; pem < 3; pem++)
+ {
+ BDK_CSR_INIT(rst_ctlx, node, BDK_RST_CTLX(pem));
+ __bdk_qlm_setup_pem_reset(node, pem, !rst_ctlx.s.host_mode);
+ }
+}
+
+static void __bdk_qlm_sff81xx_set_reference(bdk_node_t node, int qlm, int ref_clk)
+{
+ int use_clock;
+
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) || CAVIUM_IS_MODEL(CAVIUM_CN83XX) || CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ {
+ // Common clock 0 is 156MHz
+ // Common clock 1 is 100MHz
+ switch (qlm)
+ {
+ case 0:
+ use_clock = 1; /* DLMC_REF_CLK1 of 100MHz */
+ break;
+ case 1:
+ if (ref_clk == REF_100MHZ)
+ use_clock = 1; /* DLMC_REF_CLK1 of 100MHz */
+ else
+ use_clock = 2; /* DLM1_REF_CLK of 156MHz */
+ break;
+ case 2:
+ case 3:
+ default:
+ if (ref_clk == REF_100MHZ)
+ use_clock = 1; /* DLMC_REF_CLK1 of 100MHz */
+ else
+ use_clock = 2; /* DLM1_REF_CLK of 156MHz */
+ break;
+ }
+
+ BDK_TRACE(QLM, "Setting N%d.QLM%d to use ref clock %d\n", node, qlm, use_clock);
+ }
+ else
+ {
+ bdk_error("Update %s for qlm auto config of this chip\n",__FUNCTION__);
+ return;
+ }
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_REFCLK_SEL(qlm),
+ c.s.com_clk_sel = (use_clock != 2);
+ c.s.use_com1 = (use_clock == 1));
+}
+
+int bdk_qlm_auto_config(bdk_node_t node)
+{
+ return -1;
+}
+
+/**
+ * For Cavium SFF board, query the DIP switches in GPIO o determine the QLM setup.
+ * Configure the GPIOs to read the DLM settings
+ * SW1.1 -> DLM0_SEL -> GPIO_26
+ * SW1.2 -> DLM1_SEL -> GPIO_25
+ * SW1.3 -> DLM2_SEL -> GPIO_31
+ * SW1.4 -> DLM3_SEL -> GPIO_4
+ *V1.x boards SW3.8 -> QSGMII/XFI SEL ->GPIO_9
+ *V2.x boards SW3.7 -> QSGMII/XFI SEL ->GPIO_36
+*/
+int bdk_qlm_dip_auto_config(bdk_node_t node)
+{
+ bdk_qlm_modes_t dlm_mode[4];
+ int dlm_speed = 0;
+ int use_ref = 0;
+ bdk_qlm_mode_flags_t dlm_flags = 0;
+
+ unsigned int dlm_config, dlm3, dlm2, dlm1, dlm0;
+ uint64_t gpio = 0;
+
+ /* Configure the GPIOs to read the DLM settings */
+ /* SW1.1 -> DLM0_SEL -> GPIO_26 */
+ /* SW1.2 -> DLM1_SEL -> GPIO_25 */
+ /* SW1.3 -> DLM2_SEL -> GPIO_31 */
+ /* SW1.4 -> DLM3_SEL -> GPIO_4 */
+ //V1.x boards /* SW3.8 -> QSGMII/XFI SEL ->GPIO_9 */
+ //V2.x boards /* SW3.7 -> QSGMII/XFI SEL ->GPIO_36 */
+ /* Configure the GPIOs are input */
+ bdk_gpio_initialize(node, 26, 0, 0);
+ bdk_gpio_initialize(node, 25, 0, 0);
+ bdk_gpio_initialize(node, 31, 0, 0);
+ bdk_gpio_initialize(node, 4, 0, 0);
+ bdk_gpio_initialize(node, 36, 0, 0);
+
+
+ /* Read the GPIOs */
+ gpio = bdk_gpio_read(node, 0);
+
+ dlm3 = !!(gpio & (1ULL<<4));
+ dlm2 = !!(gpio & (1ULL<<31));
+ dlm1 = !!(gpio & (1ULL<<25));
+ dlm0 = !!(gpio & (1ULL<<26));
+
+
+ dlm_config = (dlm0<<3)| (dlm1<<2) | (dlm2<<1) | (dlm3);
+
+ BDK_TRACE(QLM, "DLM CONFIG:%d gpio36: %d\n", dlm_config, !!(gpio & (1ULL<<36)));
+
+ switch(dlm_config)
+ {
+ case 0:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = BDK_QLM_MODE_DISABLED;
+ dlm_mode[2] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[3] = BDK_QLM_MODE_PCIE_1X2;
+ break;
+ case 1:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = BDK_QLM_MODE_DISABLED;
+ dlm_mode[2] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[3] = BDK_QLM_MODE_SATA_2X1;
+ break;
+ case 2:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = BDK_QLM_MODE_DISABLED;
+ dlm_mode[2] = BDK_QLM_MODE_XFI_2X1;
+ dlm_mode[3] = BDK_QLM_MODE_PCIE_1X2;
+ break;
+ case 3:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = BDK_QLM_MODE_DISABLED;
+ dlm_mode[2] = BDK_QLM_MODE_XFI_2X1;
+ dlm_mode[3] = BDK_QLM_MODE_SATA_2X1;
+ break;
+ case 4:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = (!!(gpio & (1ULL<<36)) ? BDK_QLM_MODE_XFI_1X1 : BDK_QLM_MODE_QSGMII_4X1);
+ dlm_mode[2] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[3] = BDK_QLM_MODE_PCIE_1X2;
+ break;
+ case 5:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = (!!(gpio & (1ULL<<36)) ? BDK_QLM_MODE_XFI_1X1 : BDK_QLM_MODE_QSGMII_4X1);
+ dlm_mode[2] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[3] = BDK_QLM_MODE_SATA_2X1;
+ break;
+ case 6:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = (!!(gpio & (1ULL<<36)) ? BDK_QLM_MODE_XFI_1X1 : BDK_QLM_MODE_QSGMII_4X1);
+ dlm_mode[2] = BDK_QLM_MODE_XFI_2X1;
+ dlm_mode[3] = BDK_QLM_MODE_PCIE_1X2;
+ break;
+ case 7:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = (!!(gpio & (1ULL<<36)) ? BDK_QLM_MODE_XFI_1X1 : BDK_QLM_MODE_QSGMII_4X1);
+ dlm_mode[2] = BDK_QLM_MODE_XFI_2X1;
+ dlm_mode[3] = BDK_QLM_MODE_SATA_2X1;
+ break;
+ case 8:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X4;
+ dlm_mode[1] = BDK_QLM_MODE_PCIE_1X4;
+ dlm_mode[2] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[3] = BDK_QLM_MODE_PCIE_1X2;
+ break;
+ case 9:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X4;
+ dlm_mode[1] = BDK_QLM_MODE_PCIE_1X4;
+ dlm_mode[2] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[3] = BDK_QLM_MODE_SATA_2X1;
+ break;
+ case 10:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X4;
+ dlm_mode[1] = BDK_QLM_MODE_PCIE_1X4;
+ dlm_mode[2] = BDK_QLM_MODE_XFI_2X1;
+ dlm_mode[3] = BDK_QLM_MODE_PCIE_1X2;
+ break;
+ case 11:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X4;
+ dlm_mode[1] = BDK_QLM_MODE_PCIE_1X4;
+ dlm_mode[2] = BDK_QLM_MODE_XFI_2X1;
+ dlm_mode[3] = BDK_QLM_MODE_SATA_2X1;
+ break;
+ case 12:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = (!!(gpio & (1ULL<<36)) ? BDK_QLM_MODE_XFI_1X1 : BDK_QLM_MODE_QSGMII_4X1);
+ dlm_mode[2] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[3] = BDK_QLM_MODE_PCIE_1X2;
+ break;
+ case 13:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = (!!(gpio & (1ULL<<36)) ? BDK_QLM_MODE_XFI_1X1 : BDK_QLM_MODE_QSGMII_4X1);
+ dlm_mode[2] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[3] = BDK_QLM_MODE_SATA_2X1;
+ break;
+ case 14:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = (!!(gpio & (1ULL<<36)) ? BDK_QLM_MODE_XFI_1X1 : BDK_QLM_MODE_QSGMII_4X1);
+ dlm_mode[2] = BDK_QLM_MODE_XFI_2X1;
+ dlm_mode[3] = BDK_QLM_MODE_PCIE_1X2;
+ break;
+ case 15:
+ dlm_mode[0] = BDK_QLM_MODE_PCIE_1X2;
+ dlm_mode[1] = (!!(gpio & (1ULL<<36)) ? BDK_QLM_MODE_XFI_1X1 : BDK_QLM_MODE_QSGMII_4X1);
+ dlm_mode[2] = BDK_QLM_MODE_XFI_2X1;
+ dlm_mode[3] = BDK_QLM_MODE_SATA_2X1;
+ break;
+ default:
+ return -1;
+ }
+
+ for(int dlm = 0; dlm < 4; dlm++)
+ {
+ const char *dlm_mode_str = bdk_qlm_mode_tostring(dlm_mode[dlm]);
+ switch(dlm_mode[dlm])
+ {
+ case BDK_QLM_MODE_DISABLED:
+ break;
+ case BDK_QLM_MODE_XFI_2X1:
+ case BDK_QLM_MODE_XFI_1X1:
+ use_ref = REF_156MHZ;
+ dlm_speed = 10312;
+ break;
+ case BDK_QLM_MODE_SATA_2X1:
+ dlm_speed = 6000;
+ use_ref = REF_100MHZ;
+ break;
+ case BDK_QLM_MODE_PCIE_1X2:
+ case BDK_QLM_MODE_PCIE_1X4:
+ dlm_speed = 8000;
+ use_ref =REF_100MHZ;
+ break;
+ case BDK_QLM_MODE_QSGMII_4X1:
+ use_ref = REF_100MHZ;
+ dlm_speed = 5000;
+ break;
+ default:
+ bdk_error("Unsupported N%d.QLM%d mode: %s(%d)",
+ node, dlm,
+ dlm_mode_str ? dlm_mode_str : "???",
+ dlm_mode[dlm]);
+ return -1;
+ }
+ if ((1 == dlm) && (dlm_mode[dlm] != BDK_QLM_MODE_QSGMII_4X1) && (dlm_mode[dlm] != BDK_QLM_MODE_DISABLED))
+ {
+ /* This code is specific to sff8104 board
+ ** QSGMII phy is wired to dlm1-gser lane 2
+ ** AQR-107 phy is wired to dlm1-gser lane 3
+ ** bdk always uses bgx0.port0 on that board
+ */
+ // If dlm1 is in XFI mode, change PHY address to mdio of aquantia phy
+ unsigned mdio_bus = 1;
+ unsigned mdio_addr = 0;
+ int phy_cfg = 0xff<<24 | ((mdio_bus& 0xf)<<8) | (mdio_addr & 0xff);
+ bdk_config_set_int((uint32_t) phy_cfg,BDK_CONFIG_PHY_ADDRESS, node, 0, 0);
+ /* Indicate serdes lane 3 , aquantia phy active */
+ int aq_phy = (0x3<<8) | 1;
+ bdk_config_set_int(aq_phy, BDK_CONFIG_AQUANTIA_PHY,node,0,0);
+ BDK_TRACE(QLM,"Disabling phys 0.1,0.2,0.3\n");
+ for (int i = 1; i<4; i++) {
+ bdk_config_set_int(-1,BDK_CONFIG_PHY_ADDRESS, node, 0, i);
+ bdk_config_set_int(0,BDK_CONFIG_BGX_ENABLE,node,0,i);
+ }
+ }
+
+ BDK_TRACE(QLM, "Setting N%d.QLM%d mode %s(%d), speed %d, flags 0x%x\n",
+ node, dlm, dlm_mode_str, dlm_mode[dlm], dlm_speed, dlm_flags);
+
+ /* Set the reference clock for this QLM */
+ __bdk_qlm_sff81xx_set_reference(node, dlm, use_ref);
+
+ if (bdk_qlm_set_mode(node, dlm, dlm_mode[dlm], dlm_speed, dlm_flags))
+ return -1;
+ }
+ return 0;
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-common-sata.c b/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-common-sata.c
new file mode 100644
index 0000000000..9e31ad1dce
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-common-sata.c
@@ -0,0 +1,625 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-hal/if/bdk-if.h"
+#include "libbdk-hal/bdk-qlm.h"
+#include "libbdk-hal/qlm/bdk-qlm-common.h"
+#include "libbdk-arch/bdk-csrs-gser.h"
+#include "libbdk-arch/bdk-csrs-sata.h"
+
+/**
+ * Initialize a DLM/QLM for use with SATA controllers
+ *
+ * @param node Node to intialize
+ * @param qlm Which DLM/QLM to init
+ * @param baud_mhz QLM speed in Gbaud
+ * @param sata_first First SATA controller connected to this DLM/QLM
+ * @param sata_last Last SATA controller connected to this DLM/QLM (inclusive)
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_set_sata_cn8xxx(bdk_node_t node, int qlm, int baud_mhz, int sata_first, int sata_last)
+{
+ const int NUM_LANES = sata_last - sata_first + 1;
+ const int MAX_A_CLK = 333000000; /* Max of 333Mhz */
+
+ /* 26.4.1 Cold Reset */
+ /* 1. Ensure that the SerDes reference clock is up and stable. */
+ /* Already done */
+
+ /* 2. Optionally program the GPIO CSRs for SATA features.
+ a. For cold-presence detect, select a GPIO for the input and program GPI-
+ O_BIT_CFG(0..50)[PIN_SEL] = GPIO_PIN_SEL_E::SATA(0..15)_CP_DET.
+ b. For mechanical-presence detect, select a GPIO for the input and program
+ GPIO_BIT_CFG(0..50)[PIN_SEL] = GPI-
+ O_PIN_SEL_E::SATA(0..15)_MP_SWITCH.
+ c. For BIST board-test loopback, select a GPIO for the input and program GPI-
+ O_BIT_CFG(0..50)[PIN_SEL] = GPIO_PIN_SEL_E:::SATA_LAB_LB.
+ d. For LED activity, select a GPIO for the output and program GPI-
+ O_BIT_CFG(0..50)[PIN_SEL] = GPIO_PIN_SEL_E:::SATA(0..15)_ACT_LED.
+ e. For cold-presence power-on-device, select a GPIO for the output and program
+ GPIO_BIT_CFG(0..50)[PIN_SEL] = GPIO_PIN_SEL_E:::SATA(0..15)_CP_-
+ POD. */
+ /* Skipping */
+
+ /* 3. Optionally program the SGPIO unit. */
+ /* Skipping */
+
+ /* 4. Assert all resets:
+ a. UAHC reset: SATA(0..15)_UCTL_CTL[SATA_UAHC_RST] = 1
+ b. UCTL reset: SATA(0..15)_UCTL_CTL[SATA_UCTL_RST] = 1 */
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.sata_uahc_rst = 1;
+ c.s.sata_uctl_rst = 1);
+ }
+
+ /* 5. Configure the ACLK:
+ a. Reset the clock dividers: SATA(0..15)_UCTL_CTL[A_CLKDIV_RST] = 1.
+ b. Select the ACLK frequency (refer to maximum values in Table 26 1).
+ i. SATA(0..15)_UCTL_CTL[A_CLKDIV_SEL] = desired value,
+ ii. SATA(0..15)_UCTL_CTL[A_CLK_EN] = 1 to enable the ACLK.
+ c. Deassert the ACLK clock divider reset:
+ SATA(0..15)_UCTL_CTL[A_CLKDIV_RST] = 0. */
+ int divisor = (bdk_clock_get_rate(node, BDK_CLOCK_SCLK) + MAX_A_CLK - 1) / MAX_A_CLK;
+ int a_clkdiv;
+ /* This screwy if logic is from the description of
+ SATAX_UCTL_CTL[a_clkdiv_sel] in the CSR */
+ if (divisor <= 4)
+ {
+ a_clkdiv = divisor - 1;
+ /* Divisor matches calculated value */
+ }
+ else if (divisor <= 6)
+ {
+ a_clkdiv = 4;
+ divisor = 6;
+ }
+ else if (divisor <= 8)
+ {
+ a_clkdiv = 5;
+ divisor = 8;
+ }
+ else if (divisor <= 16)
+ {
+ a_clkdiv = 6;
+ divisor = 16;
+ }
+ else if (divisor <= 24)
+ {
+ a_clkdiv = 7;
+ divisor = 24;
+ }
+ else
+ {
+ bdk_error("Unable to determine SATA clock divisor\n");
+ return -1;
+ }
+ /* Calculate the final clock rate */
+ int a_clk = bdk_clock_get_rate(node, BDK_CLOCK_SCLK) / divisor;
+
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.a_clkdiv_rst = 1);
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.a_clk_byp_sel = 0;
+ c.s.a_clkdiv_sel = a_clkdiv;
+ c.s.a_clk_en = 1);
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.a_clkdiv_rst = 0);
+ }
+ bdk_wait_usec(1);
+
+ /* 8. Configure PHY for SATA. Refer to Section 21.1.2. */
+ /* Done below, section 24.1.2.3 */
+
+ /* 9. TBD: Poll QLM2_MPLL_STATUS for MPLL lock */
+ /* Not needed */
+
+ /* 10. Initialize UAHC as described in the AHCI specification
+ (UAHC_* registers). */
+ /* Done when a SATA driver is initialized */
+
+ /* 24.1.2.3 SATA Configuration
+ Software must perform the following steps to configure the GSER_WEST
+ for a SATA application. Note that the GSERW steps are on a QLM basis. */
+
+ /* 1. Configure the SATA controller (refer to Chapter 26). */
+ /* This is the code above */
+
+ /* 2. Configure the QLM Reference clock.
+ Set GSER(0..13)_REFCLK_SEL[COM_CLK_SEL] = 1 to source the reference
+ clock from the external clock multiplexer.
+ Configure GSER(0..13)_REFCLK_SEL[USE_COM1]:
+ 0 = use QLMC_REF_CLK0_P/N
+ 1 = use QLMC_REF_CLK1_P/N */
+ /* Already done */
+
+ /* Make sure the PHY is in reset before we reconfig */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm),
+ c.s.phy_reset = 1);
+ bdk_wait_usec(1);
+
+ /* 3. Configure the QLM for SATA mode: set GSER(0..13)_CFG[SATA] = 1. */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_CFG(qlm),
+ c.u = 0;
+ c.s.sata = 1);
+
+ /* 9. Clear the appropriate lane resets:
+ GSER(0..13)_SATA_LANE_RST[Ln_RST] = 0, where n is the lane number 0-3. */
+ BDK_CSR_WRITE(node, BDK_GSERX_SATA_LANE_RST(qlm), 0);
+ BDK_CSR_READ(node, BDK_GSERX_SATA_LANE_RST(qlm));
+
+ /* We'll check for the SATA_PCS Ready in step 8a below */
+ /* Short 1 usec wait */
+ bdk_wait_usec(1);
+
+ /* 4. Take the PHY out of reset: write GSER(0..13)_PHY_CTL[PHY_RESET] = 0. */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm),
+ c.s.phy_reset = 0);
+
+ /* 4a. Poll for PHY RST_RDY indicating the PHY has initialized before
+ trying to access internal registers to reconfigure for SATA */
+ /* 8. Wait for GSER(0..13)_QLM_STAT[RST_RDY] = 1, indicating that the PHY
+ has been reconfigured and PLLs are locked. */
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_GSERX_QLM_STAT(qlm), rst_rdy, ==, 1, 10000))
+ {
+ bdk_error("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
+ return -1;
+ }
+
+ /* Workaround for errata GSER-30310: SATA HDD Not Ready due to
+ PHY SDLL/LDLL lockup at 3GHz */
+ for (int slice = 0; slice < NUM_LANES / 2; slice++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_SLICEX_PCIE1_MODE(qlm, slice),
+ c.s.rx_pi_bwsel = 1;
+ c.s.rx_ldll_bwsel = 1;
+ c.s.rx_sdll_bwsel = 1);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_SLICEX_PCIE2_MODE(qlm, slice),
+ c.s.rx_pi_bwsel = 1;
+ c.s.rx_ldll_bwsel = 1;
+ c.s.rx_sdll_bwsel = 1);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_SLICEX_PCIE3_MODE(qlm, slice),
+ c.s.rx_pi_bwsel = 1;
+ c.s.rx_ldll_bwsel = 1;
+ c.s.rx_sdll_bwsel = 1);
+ }
+
+ /* 5. Change the P2 termination
+ GSERn_RX_PWR_CTRL_P2[P2_RX_SUBBLK_PD<0>] = 0 (termination) */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_RX_PWR_CTRL_P2(qlm),
+ c.s.p2_rx_subblk_pd &= 0x1e);
+
+ /* 6. Modify the electrical IDLE detect on delay: set
+ GSER(0..13)_LANE(0..3)_MISC_CFG_0[EIE_DET_STL_ON_TIME] = 0x4 */
+ for (int lane = 0; lane < NUM_LANES; lane++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_MISC_CFG_0(qlm, lane),
+ c.s.eie_det_stl_on_time = 4);
+ }
+
+ /* 7. Modify the PLL and lane-protocol-mode registers to configure the
+ PHY for SATA */
+ /* Errata (GSER-26724) SATA never indicates GSER QLM_STAT[RST_RDY]
+ We program PLL_PX_MODE_0 last due to this errata */
+ for (int p=0; p<3; p++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PLL_PX_MODE_1(qlm, p),
+ c.s.pll_16p5en = 0x0;
+ c.s.pll_cpadj = 0x2;
+ c.s.pll_pcie3en = 0;
+ c.s.pll_opr = 0x0;
+ c.s.pll_div = 0x1e);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANE_PX_MODE_0(qlm, p),
+ c.s.ctle = 0x0;
+ c.s.pcie = 0;
+ c.s.tx_ldiv = 0x0;
+ c.s.rx_ldiv = 2 - p;
+ c.s.srate = 0;
+ c.s.tx_mode = 3;
+ c.s.rx_mode = 3);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANE_PX_MODE_1(qlm, p),
+ c.s.vma_fine_cfg_sel = 0;
+ c.s.vma_mm = 1;
+ c.s.cdr_fgain = 0xa;
+ c.s.ph_acc_adj = 0x15);
+ }
+ for (int p=0; p<3; p++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PLL_PX_MODE_0(qlm, p),
+ c.s.pll_icp = 0x1;
+ c.s.pll_rloop = 0x3;
+ c.s.pll_pcs_div = 0x5);
+ }
+
+ for (int s = 0; s < NUM_LANES / 2; s++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_SLICEX_RX_SDLL_CTRL(qlm, s),
+ c.s.pcs_sds_oob_clk_ctrl = 2;
+ c.s.pcs_sds_rx_sdll_tune = 0;
+ c.s.pcs_sds_rx_sdll_swsel = 0);
+ }
+
+ for (int lane = 0; lane < NUM_LANES; lane++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_MISC_CFG_0(qlm, lane),
+ c.s.use_pma_polarity = 0;
+ c.s.cfg_pcs_loopback = 0;
+ c.s.pcs_tx_mode_ovrrd_en = 0;
+ c.s.pcs_rx_mode_ovrrd_en = 0;
+ c.s.cfg_eie_det_cnt = 0;
+ c.s.eie_det_stl_on_time = 4;
+ c.s.eie_det_stl_off_time = 0;
+ c.s.tx_bit_order = 1;
+ c.s.rx_bit_order = 1);
+ }
+
+ /* 8. Wait for GSER(0..13)_QLM_STAT[RST_RDY] = 1, indicating that the PHY
+ has been reconfigured and PLLs are locked. */
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_GSERX_QLM_STAT(qlm), rst_rdy, ==, 1, 10000))
+ {
+ bdk_error("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
+ return -1;
+ }
+ /* 8a. Check that the SATA_PCS is "Ready" here, should be but check it */
+ /* Poll GSERX_SATA_STATUS for PX_RDY = 1 */
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_GSERX_SATA_STATUS(qlm), p0_rdy, ==, 1, 10000))
+ {
+ bdk_error("QLM%d: Timeout waiting for GSERX_SATA_STATUS[p0_rdy]\n", qlm);
+ return -1;
+ }
+
+ /* Add 1ms delay for everything to stabilize*/
+ bdk_wait_usec(1000);
+
+ /* Apply any custom tuning */
+ __bdk_qlm_tune(node, qlm, BDK_QLM_MODE_SATA_4X1, baud_mhz);
+ bdk_wait_usec(1000);
+
+
+ /* 6. Deassert UCTL and UAHC resets:
+ a. SATA(0..15)_UCTL_CTL[SATA_UAHC_RST] = 0
+ b. SATA(0..15)_UCTL_CTL[SATA_UCTL_RST] = 0
+ c. Wait 10 ACLK cycles before accessing any ACLK-only registers. */
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.sata_uahc_rst = 0;
+ c.s.sata_uctl_rst = 0);
+ }
+ bdk_wait_usec(1);
+
+ /* 7. Enable conditional SCLK of UCTL by writing
+ SATA(0..15)_UCTL_CTL[CSCLK_EN] = 1. */
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ /* CN9XXX make coprocessor clock automatic */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.cn8.csclk_en = 1);
+ }
+ }
+
+ /* Check BIST on the SATA controller. Start BIST in parallel on the
+ controllers */
+
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ /* Make sure BIST is configured properly before we start it. We
+ want full BIST, not just CLEAR */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.clear_bist = 0;
+ c.s.start_bist = 0);
+ /* Start BIST */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.start_bist = 1);
+ }
+ bdk_wait_usec(1000);
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ BDK_CSR_INIT(bist, node, BDK_SATAX_UCTL_BIST_STATUS(p));
+ if (bist.u)
+ bdk_error("N%d.SATA%d: Controller failed BIST (0x%llx)\n", node, p, bist.u);
+ else
+ BDK_TRACE(SATA, "N%d.SATA%d: Passed BIST\n", node, p);
+ }
+ /* Clear start_bist so it is ready for the next run */
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.start_bist = 0);
+ }
+
+ int spd;
+ if (baud_mhz < 3000)
+ spd = 1;
+ else if (baud_mhz < 6000)
+ spd = 2;
+ else
+ spd = 3;
+
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ /* From the synopsis data book, SATAX_UAHC_GBL_TIMER1MS is the
+ AMBA clock in MHz * 1000, which is a_clk(Hz) / 1000 */
+ BDK_TRACE(QLM, "QLM%d: SATA%d set to %d Hz\n", qlm, p, a_clk);
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_TIMER1MS(p),
+ c.s.timv = a_clk / 1000);
+ /* Set speed */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_P0_SCTL(p),
+ c.s.ipm = 3; /* Disable parial and slumber power management */
+ c.s.spd = spd);
+ /* The following SATA setup is from the AHCI 1.3 spec, section
+ 10.1.1, Firmware Specific Initialization. */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_CAP(p),
+ c.s.sss = 1; /* Support staggered spin-up */
+ c.s.smps = 1); /* Support mechanical presence switch */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_PI(p),
+ c.s.pi = 1); /* One port per controller */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_P0_CMD(p),
+ c.s.hpcp = 1; /* Hot-plug-capable support */
+ c.s.mpsp = 1; /* Mechanical presence switch attached to port */
+ c.s.cpd = 1); /* Cold-presence detection */
+ }
+ return 0;
+}
+
+/**
+ * Initialize a DLM/QLM for use with SATA controllers
+ *
+ * @param node Node to intialize
+ * @param qlm Which DLM/QLM to init
+ * @param baud_mhz QLM speed in Gbaud
+ * @param sata_first First SATA controller connected to this DLM/QLM
+ * @param sata_last Last SATA controller connected to this DLM/QLM (inclusive)
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_set_sata_cn9xxx(bdk_node_t node, int qlm, int baud_mhz, int sata_first, int sata_last)
+{
+ //const int NUM_LANES = sata_last - sata_first + 1;
+ const int MAX_A_CLK = 333000000; /* Max of 333Mhz */
+
+ /* 26.4.1 Cold Reset */
+ /* 1. Ensure that the SerDes reference clock is up and stable. */
+ /* Already done */
+
+ /* 2. Optionally program the GPIO CSRs for SATA features.
+ a. For cold-presence detect, select a GPIO for the input and program GPI-
+ O_BIT_CFG(0..50)[PIN_SEL] = GPIO_PIN_SEL_E::SATA(0..15)_CP_DET.
+ b. For mechanical-presence detect, select a GPIO for the input and program
+ GPIO_BIT_CFG(0..50)[PIN_SEL] = GPI-
+ O_PIN_SEL_E::SATA(0..15)_MP_SWITCH.
+ c. For BIST board-test loopback, select a GPIO for the input and program GPI-
+ O_BIT_CFG(0..50)[PIN_SEL] = GPIO_PIN_SEL_E:::SATA_LAB_LB.
+ d. For LED activity, select a GPIO for the output and program GPI-
+ O_BIT_CFG(0..50)[PIN_SEL] = GPIO_PIN_SEL_E:::SATA(0..15)_ACT_LED.
+ e. For cold-presence power-on-device, select a GPIO for the output and program
+ GPIO_BIT_CFG(0..50)[PIN_SEL] = GPIO_PIN_SEL_E:::SATA(0..15)_CP_-
+ POD. */
+ /* Skipping */
+
+ /* 3. Optionally program the SGPIO unit. */
+ /* Skipping */
+
+ /* 4. Assert all resets:
+ a. UAHC reset: SATA(0..15)_UCTL_CTL[SATA_UAHC_RST] = 1
+ b. UCTL reset: SATA(0..15)_UCTL_CTL[SATA_UCTL_RST] = 1 */
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.sata_uahc_rst = 1;
+ c.s.sata_uctl_rst = 1);
+ }
+
+ /* 5. Configure the ACLK:
+ a. Reset the clock dividers: SATA(0..15)_UCTL_CTL[A_CLKDIV_RST] = 1.
+ b. Select the ACLK frequency (refer to maximum values in Table 26 1).
+ i. SATA(0..15)_UCTL_CTL[A_CLKDIV_SEL] = desired value,
+ ii. SATA(0..15)_UCTL_CTL[A_CLK_EN] = 1 to enable the ACLK.
+ c. Deassert the ACLK clock divider reset:
+ SATA(0..15)_UCTL_CTL[A_CLKDIV_RST] = 0. */
+ int divisor = (bdk_clock_get_rate(node, BDK_CLOCK_SCLK) + MAX_A_CLK - 1) / MAX_A_CLK;
+ int a_clkdiv;
+ /* This screwy if logic is from the description of
+ SATAX_UCTL_CTL[a_clkdiv_sel] in the CSR */
+ if (divisor <= 4)
+ {
+ a_clkdiv = divisor - 1;
+ /* Divisor matches calculated value */
+ }
+ else if (divisor <= 6)
+ {
+ a_clkdiv = 4;
+ divisor = 6;
+ }
+ else if (divisor <= 8)
+ {
+ a_clkdiv = 5;
+ divisor = 8;
+ }
+ else if (divisor <= 16)
+ {
+ a_clkdiv = 6;
+ divisor = 16;
+ }
+ else if (divisor <= 24)
+ {
+ a_clkdiv = 7;
+ divisor = 24;
+ }
+ else
+ {
+ bdk_error("Unable to determine SATA clock divisor\n");
+ return -1;
+ }
+ /* Calculate the final clock rate */
+ int a_clk = bdk_clock_get_rate(node, BDK_CLOCK_SCLK) / divisor;
+
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.a_clkdiv_rst = 1);
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.a_clk_byp_sel = 0;
+ c.s.a_clkdiv_sel = a_clkdiv;
+ c.s.a_clk_en = 1);
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.a_clkdiv_rst = 0);
+ }
+ bdk_wait_usec(1);
+
+ /* 8. Configure PHY for SATA. Refer to Section 21.1.2. */
+ /* Done below, section 24.1.2.3 */
+
+ /* 9. TBD: Poll QLM2_MPLL_STATUS for MPLL lock */
+ /* Not needed */
+
+ /* 10. Initialize UAHC as described in the AHCI specification
+ (UAHC_* registers). */
+ /* Done when a SATA driver is initialized */
+
+ /* 24.1.2.3 SATA Configuration
+ Software must perform the following steps to configure the GSER_WEST
+ for a SATA application. Note that the GSERW steps are on a QLM basis. */
+
+ /* 1. Configure the SATA controller (refer to Chapter 26). */
+ /* This is the code above */
+
+ /* 2. Configure the QLM Reference clock.
+ Set GSER(0..13)_REFCLK_SEL[COM_CLK_SEL] = 1 to source the reference
+ clock from the external clock multiplexer.
+ Configure GSER(0..13)_REFCLK_SEL[USE_COM1]:
+ 0 = use QLMC_REF_CLK0_P/N
+ 1 = use QLMC_REF_CLK1_P/N */
+ /* Already done */
+
+ // FIXME: GSERN setup
+
+ /* 6. Deassert UCTL and UAHC resets:
+ a. SATA(0..15)_UCTL_CTL[SATA_UAHC_RST] = 0
+ b. SATA(0..15)_UCTL_CTL[SATA_UCTL_RST] = 0
+ c. Wait 10 ACLK cycles before accessing any ACLK-only registers. */
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.sata_uahc_rst = 0;
+ c.s.sata_uctl_rst = 0);
+ }
+ bdk_wait_usec(1);
+
+ /* 7. Enable conditional SCLK of UCTL by writing
+ SATA(0..15)_UCTL_CTL[CSCLK_EN] = 1. */
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ {
+ /* CN9XXX make coprocessor clock automatic */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.cn8.csclk_en = 1);
+ }
+ }
+
+ /* Check BIST on the SATA controller. Start BIST in parallel on the
+ controllers */
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ /* Make sure BIST is configured properly before we start it. We
+ want full BIST, not just CLEAR */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.clear_bist = 0;
+ c.s.start_bist = 0);
+ /* Start BIST */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.start_bist = 1);
+ }
+ bdk_wait_usec(1000);
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ BDK_CSR_INIT(bist, node, BDK_SATAX_UCTL_BIST_STATUS(p));
+ if (bist.u)
+ bdk_error("N%d.SATA%d: Controller failed BIST (0x%llx)\n", node, p, bist.u);
+ else
+ BDK_TRACE(SATA, "N%d.SATA%d: Passed BIST\n", node, p);
+ }
+ /* Clear start_bist so it is ready for the next run */
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UCTL_CTL(p),
+ c.s.start_bist = 0);
+ }
+
+ int spd;
+ if (baud_mhz < 3000)
+ spd = 1;
+ else if (baud_mhz < 6000)
+ spd = 2;
+ else
+ spd = 3;
+
+ for (int p = sata_first; p <= sata_last; p++)
+ {
+ /* From the synopsis data book, SATAX_UAHC_GBL_TIMER1MS is the
+ AMBA clock in MHz * 1000, which is a_clk(Hz) / 1000 */
+ BDK_TRACE(QLM, "QLM%d: SATA%d set to %d Hz\n", qlm, p, a_clk);
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_TIMER1MS(p),
+ c.s.timv = a_clk / 1000);
+ /* Set speed */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_P0_SCTL(p),
+ c.s.ipm = 3; /* Disable parial and slumber power management */
+ c.s.spd = spd);
+ /* The following SATA setup is from the AHCI 1.3 spec, section
+ 10.1.1, Firmware Specific Initialization. */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_CAP(p),
+ c.s.sss = 1; /* Support staggered spin-up */
+ c.s.smps = 1); /* Support mechanical presence switch */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_GBL_PI(p),
+ c.s.pi = 1); /* One port per controller */
+ BDK_CSR_MODIFY(c, node, BDK_SATAX_UAHC_P0_CMD(p),
+ c.s.hpcp = 1; /* Hot-plug-capable support */
+ c.s.mpsp = 1; /* Mechanical presence switch attached to port */
+ c.s.cpd = 1); /* Cold-presence detection */
+ }
+ return 0;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-common.c b/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-common.c
new file mode 100644
index 0000000000..2b3390228a
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-common.c
@@ -0,0 +1,1636 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-hal/if/bdk-if.h"
+#include "libbdk-hal/bdk-qlm.h"
+#include "libbdk-hal/qlm/bdk-qlm-common.h"
+#include "libbdk-arch/bdk-csrs-gser.h"
+#include "libbdk-arch/bdk-csrs-pem.h"
+#include "libbdk-hal/bdk-config.h"
+#include "libbdk-hal/bdk-utils.h"
+#include "libbdk-hal/bdk-twsi.h"
+
+/* Indexed by QLM number and lane */
+static uint64_t prbs_errors[14][4];
+
+/**
+ * Figure out which lane mode to use for a given reference clock and GBaud
+ *
+ * @param mode_name String name for error messages
+ * @param qlm QlM being configured
+ * @param ref_clk Reference clock in hertz
+ * @param baud_mhz Baud rate in Mhz
+ *
+ * @return Lane mode or -1 on failure
+ */
+int __bdk_qlm_get_lane_mode_for_speed_and_ref_clk(const char *mode_name, int qlm, int ref_clk, int baud_mhz)
+{
+ if (baud_mhz <= 1250)
+ {
+ if ((ref_clk == REF_156MHZ) || (ref_clk == REF_100MHZ))
+ return BDK_GSER_LMODE_E_R_125G_REFCLK15625_SGMII;
+ else
+ {
+ bdk_error("Invalid reference clock for %s on QLM%d with speed %d, ref %d Mhz\n", mode_name, qlm, baud_mhz, ref_clk / 1000000);
+ return -1;
+ }
+ }
+ else if (baud_mhz <= 2500)
+ {
+ if (ref_clk == REF_100MHZ)
+ return BDK_GSER_LMODE_E_R_25G_REFCLK100;
+ else if (ref_clk == REF_125MHZ)
+ return BDK_GSER_LMODE_E_R_25G_REFCLK125;
+ else
+ {
+ bdk_error("Invalid reference clock for %s on QLM%d with speed %d, ref %d Mhz\n", mode_name, qlm, baud_mhz, ref_clk / 1000000);
+ return -1;
+ }
+ }
+ else if (baud_mhz <= 3125)
+ {
+ if (ref_clk == REF_156MHZ)
+ return BDK_GSER_LMODE_E_R_3125G_REFCLK15625_XAUI;
+ else
+ {
+ bdk_error("Invalid reference clock for %s on QLM%d with speed %d, ref %d Mhz\n", mode_name, qlm, baud_mhz, ref_clk / 1000000);
+ return -1;
+ }
+ }
+ else if (baud_mhz <= 5000)
+ {
+ if (ref_clk == REF_100MHZ)
+ return BDK_GSER_LMODE_E_R_5G_REFCLK100;
+ else if (ref_clk == REF_125MHZ)
+ return BDK_GSER_LMODE_E_R_5G_REFCLK125;
+ else
+ return BDK_GSER_LMODE_E_R_5G_REFCLK15625_QSGMII;
+ }
+ else if (baud_mhz <= 6250)
+ {
+ if (ref_clk == REF_156MHZ)
+ return BDK_GSER_LMODE_E_R_625G_REFCLK15625_RXAUI;
+ else
+ {
+ bdk_error("Invalid reference clock for %s on QLM%d with speed %d, ref %d Mhz\n", mode_name, qlm, baud_mhz, ref_clk / 1000000);
+ return -1;
+ }
+ }
+ else if (baud_mhz <= 8000)
+ {
+ if (ref_clk == REF_100MHZ)
+ return BDK_GSER_LMODE_E_R_8G_REFCLK100;
+ else if (ref_clk == REF_125MHZ)
+ return BDK_GSER_LMODE_E_R_8G_REFCLK125;
+ else
+ {
+ bdk_error("Invalid reference clock for %s on QLM%d with speed %d, ref %d Mhz\n", mode_name, qlm, baud_mhz, ref_clk / 1000000);
+ return -1;
+ }
+ }
+ else /* Baud 10312.5 */
+ {
+ if (ref_clk == REF_156MHZ)
+ return BDK_GSER_LMODE_E_R_103125G_REFCLK15625_KR;
+ else
+ {
+ bdk_error("Invalid reference clock for %s on QLM%d with speed %d, ref %d Mhz\n", mode_name, qlm, baud_mhz, ref_clk / 1000000);
+ return -1;
+ }
+ }
+ bdk_error("Invalid speed for %s on QLM%d with speed %d, ref %d Mhz\n", mode_name, qlm, baud_mhz, ref_clk / 1000000);
+ return -1;
+}
+
+/**
+ * Setup the PEM to either driver or receive reset from PRST based on RC or EP
+ *
+ * @param node Node to use in a Numa setup
+ * @param pem Which PEM to setuo
+ * @param is_endpoint
+ * Non zero if PEM is a EP
+ */
+void __bdk_qlm_setup_pem_reset(bdk_node_t node, int pem, int is_endpoint)
+{
+ /* Make sure is_endpoint is either 0 or 1 */
+ is_endpoint = (is_endpoint != 0);
+ BDK_CSR_MODIFY(c, node, BDK_RST_CTLX(pem),
+ c.s.prst_link = 0; /* Link down doesn't automatically assert PERST */
+ c.s.rst_link = is_endpoint; /* Link down automatically assert soft reset for EP */
+ c.s.rst_drv = !is_endpoint; /* PERST is output for RC, input for EP */
+ c.s.rst_rcv = is_endpoint; /* Only read PERST in EP mode */
+ c.s.rst_chip = 0); /* PERST doesn't pull CHIP_RESET */
+
+ if (is_endpoint)
+ {
+ /* If we're configuring an endpoint manually the PEM will not
+ be turned on by default by the hardware. Turn it on now */
+ BDK_CSR_INIT(pemx_on, node, BDK_PEMX_ON(pem));
+ if (!pemx_on.s.pemon)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_CLK_EN(pem),
+ c.cn83xx.pceclk_gate = 0;
+ c.cn83xx.csclk_gate = 0);
+ BDK_CSR_MODIFY(c, node, BDK_PEMX_ON(pem),
+ c.s.pemon = 1);
+ }
+ }
+}
+
+/**
+ * Measure the reference clock of a QLM
+ *
+ * @param qlm QLM to measure
+ *
+ * @return Clock rate in Hz
+ */
+int __bdk_qlm_measure_refclock(bdk_node_t node, int qlm)
+{
+ /* Clear the counter */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_REFCLK_EVT_CTRL(qlm),
+ c.s.enb = 0;
+ c.s.clr = 1);
+ bdk_wait_usec(1); /* Give counter a chance to clear */
+ if (BDK_CSR_READ(node, BDK_GSERX_REFCLK_EVT_CNTR(qlm)))
+ bdk_error("GSER%d: Ref clock counter not zero\n", qlm);
+ /* Start counting */
+ uint64_t start = bdk_clock_get_count(BDK_CLOCK_TIME);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_REFCLK_EVT_CTRL(qlm),
+ c.s.enb = 1;
+ c.s.clr = 0);
+ /* Wait for a short time to get a number of counts */
+ bdk_wait_usec(20000); /* 20ms */
+ /* Stop counting */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_REFCLK_EVT_CTRL(qlm),
+ c.s.enb = 0);
+ uint64_t stop = bdk_clock_get_count(BDK_CLOCK_TIME);
+ bdk_wait_usec(1); /* Give counter a chance to stabalize */
+
+ /* Calculate the rate */
+ uint64_t count = BDK_CSR_READ(node, BDK_GSERX_REFCLK_EVT_CNTR(qlm));
+ count *= bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME);
+ count /= stop - start;
+ return count;
+}
+
+/**
+ * Put a QLM into hardware reset
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_reset(bdk_node_t node, int qlm)
+{
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm),
+ c.s.phy_reset = 1);
+ return 0;
+}
+
+/**
+ * Enable PRBS on a QLM
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ * @param prbs PRBS mode (31, etc)
+ * @param dir Directions to enable. This is so you can enable TX and later
+ * enable RX after TX has run for a time
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_enable_prbs(bdk_node_t node, int qlm, int prbs, bdk_qlm_direction_t dir)
+{
+ const int NUM_LANES = bdk_qlm_get_lanes(node, qlm);
+ int mode;
+ switch (prbs)
+ {
+ case 31:
+ mode = 1;
+ break;
+ case 23:
+ mode = 2; /* Or 3? */
+ break;
+ case 16:
+ mode = 4;
+ break;
+ case 15:
+ mode = 5;
+ break;
+ case 11:
+ mode = 6;
+ break;
+ case 7:
+ mode = 7;
+ break;
+ default:
+ mode = prbs & 0xff;
+ for (int lane = 0; lane < NUM_LANES; lane++)
+ BDK_CSR_WRITE(node, BDK_GSERX_LANEX_LBERT_PAT_CFG(qlm, lane), prbs >> 8);
+ BDK_TRACE(QLM, "Using mode 0x%x with custom pattern 0x%x\n", mode, prbs >> 8);
+ break;
+ }
+
+ /* For some reason PRBS doesn't work if GSER is configured for PCIe.
+ Disconnect PCIe when we start PRBS */
+ BDK_CSR_INIT(gserx_cfg, node, BDK_GSERX_CFG(qlm));
+ if (gserx_cfg.s.pcie)
+ {
+ gserx_cfg.s.pcie = 0;
+ BDK_CSR_WRITE(node, BDK_GSERX_CFG(qlm), gserx_cfg.u);
+ bdk_warn("N%d.QLM%d: Disabling PCIe for PRBS/pattern generation\n", node, qlm);
+ }
+ /* For some reason PRBS doesn't work if GSER is configured for SATA.
+ Disconnect SATA when we start PRBS */
+ if (gserx_cfg.s.sata)
+ {
+ gserx_cfg.s.sata = 0;
+ BDK_CSR_WRITE(node, BDK_GSERX_CFG(qlm), gserx_cfg.u);
+ bdk_warn("N%d.QLM%d: Disabling SATA for PRBS/pattern generation\n", node, qlm);
+ bdk_warn("N%d.QLM%d: SATA PRBS/patterns always run at 6G\n", node, qlm);
+ }
+
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_PHY_CTL(qlm),
+ c.s.phy_reset = 0);
+
+ if (dir & BDK_QLM_DIRECTION_TX)
+ {
+ /* Disable first in case already running */
+ for (int lane = 0; lane < NUM_LANES; lane++)
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_LBERT_CFG(qlm, lane),
+ c.s.lbert_pg_en = 0);
+ for (int lane = 0; lane < NUM_LANES; lane++)
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_LBERT_CFG(qlm, lane),
+ c.s.lbert_pg_en = 1; /* Enable generator */
+ c.s.lbert_pg_width = 3; /* 20 bit */
+ c.s.lbert_pg_mode = mode);
+ }
+
+ if (dir & BDK_QLM_DIRECTION_RX)
+ {
+ /* Clear the error counter and Disable the matcher */
+ for (int lane = 0; lane < NUM_LANES; lane++)
+ {
+ prbs_errors[qlm][lane] = 0;
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_LBERT_CFG(qlm, lane),
+ c.s.lbert_pm_en = 0);
+ }
+ for (int lane = 0; lane < NUM_LANES; lane++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_LBERT_CFG(qlm, lane),
+ c.s.lbert_pm_en = 1; /* Enable matcher */
+ c.s.lbert_pm_width = 3; /* 20 bit */
+ c.s.lbert_pm_mode = mode);
+ }
+ /* Tell the matcher to start sync */
+ for (int retry=0; retry < 4; retry++)
+ {
+ for (int lane = 0; lane < NUM_LANES; lane++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_LBERT_CFG(qlm, lane),
+ c.s.lbert_pm_sync_start = 1);
+ }
+ /* Wait 10ms */
+ bdk_wait_usec(10000);
+ }
+ }
+ return 0;
+}
+
+/**
+ * Disable PRBS on a QLM
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_disable_prbs(bdk_node_t node, int qlm)
+{
+ const int NUM_LANES = bdk_qlm_get_lanes(node, qlm);
+ BDK_CSR_INIT(phy_ctl, node, BDK_GSERX_PHY_CTL(qlm));
+ if (phy_ctl.s.phy_reset)
+ return -1;
+
+ for (int lane = 0; lane < NUM_LANES; lane++)
+ {
+ prbs_errors[qlm][lane] = 0;
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_LBERT_CFG(qlm, lane),
+ c.s.lbert_pg_en = 0;
+ c.s.lbert_pm_en = 0);
+ }
+ return 0;
+}
+
+/**
+ * Return the number of PRBS errors since PRBS started running
+ *
+ * @param node Node to use in numa setup
+ * @param qlm QLM to use
+ * @param lane Which lane
+ * @param clear Clear counter after return the current value
+ *
+ * @return Number of errors
+ */
+uint64_t __bdk_qlm_get_prbs_errors(bdk_node_t node, int qlm, int lane, int clear)
+{
+ /* Restart synchronization */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_LBERT_CFG(qlm, lane),
+ c.s.lbert_pm_sync_start = 1);
+ /* This CSR is self clearing per the CSR description, but it doesn't
+ seem to do that. Instead it clears when we trigger sync again */
+ BDK_CSR_INIT(rx, node, BDK_GSERX_LANEX_LBERT_ECNT(qlm, lane));
+ uint64_t errors = rx.s.lbert_err_cnt;
+ if (rx.s.lbert_err_ovbit14)
+ errors <<= 7;
+ prbs_errors[qlm][lane] += errors;
+ uint64_t result = prbs_errors[qlm][lane];
+ if (clear)
+ prbs_errors[qlm][lane] = 0;
+ return result;
+}
+
+/**
+ * Inject an error into PRBS
+ *
+ * @param node Node to use in numa setup
+ * @param qlm QLM to use
+ * @param lane Which lane
+ */
+void __bdk_qlm_inject_prbs_error(bdk_node_t node, int qlm, int lane)
+{
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_LBERT_CFG(qlm, lane),
+ c.s.lbert_pg_err_insert = 1);
+}
+
+/**
+ * Enable shallow loopback on a QLM
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ * @param loop Type of loopback. Not all QLMs support all modes
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_enable_loop(bdk_node_t node, int qlm, bdk_qlm_loop_t loop)
+{
+ bdk_error("Chip doesn't support shallow QLM loopback\n");
+ return -1;
+}
+
+/**
+ * Initialize the QLM mode table
+ *
+ * @param node Node to initialize
+ * @param qlm Which QLM
+ * @param ref_clk Reference clock of the QLM in Hz
+ */
+void __bdk_qlm_init_mode_table(bdk_node_t node, int qlm, int ref_clk)
+{
+ /* The QLM PLLs are controlled by an array of parameters indexed
+ by the QLM mode for each QLM. We need to fill in these tables.
+ Also each lane has some mode parameters, again in a array index
+ by the lane_mode */
+ for (int lane_mode = 0; lane_mode < 12; lane_mode++)
+ {
+ /* The values used below are all from
+ http://mawiki.caveonetworks.com/wiki/78xx/GSER_WEST */
+ BDK_CSR_INIT(pll_mode_0 , node, BDK_GSERX_PLL_PX_MODE_0(qlm, lane_mode));
+ BDK_CSR_INIT(pll_mode_1 , node, BDK_GSERX_PLL_PX_MODE_1(qlm, lane_mode));
+ BDK_CSR_INIT(lane_mode_0, node, BDK_GSERX_LANE_PX_MODE_0(qlm, lane_mode));
+ BDK_CSR_INIT(lane_mode_1, node, BDK_GSERX_LANE_PX_MODE_1(qlm, lane_mode));
+ switch (lane_mode)
+ {
+ case BDK_GSER_LMODE_E_R_25G_REFCLK100:
+ case BDK_GSER_LMODE_E_R_5G_REFCLK100:
+ case BDK_GSER_LMODE_E_R_8G_REFCLK100:
+ /* These modes are used for PCIe where the defaults are
+ correct. Skip programming these */
+ continue;
+ case BDK_GSER_LMODE_E_R_125G_REFCLK15625_KX:
+ pll_mode_0.s.pll_icp = 0x1;
+ pll_mode_0.s.pll_rloop = 0x3;
+ pll_mode_0.s.pll_pcs_div = 0x28;
+
+ pll_mode_1.s.pll_16p5en = 0x1;
+ pll_mode_1.s.pll_cpadj = 0x3;
+ pll_mode_1.s.pll_pcie3en = 0x0;
+ pll_mode_1.s.pll_opr = 0x0;
+ pll_mode_1.s.pll_div = 0x10;
+
+ lane_mode_0.s.ctle = 0x0;
+ lane_mode_0.s.pcie = 0x0;
+ lane_mode_0.s.tx_ldiv = 0x2;
+ lane_mode_0.s.rx_ldiv = 0x2;
+ lane_mode_0.s.srate = 0x0;
+ lane_mode_0.s.tx_mode = 0x3;
+ lane_mode_0.s.rx_mode = 0x3;
+
+ lane_mode_1.s.vma_fine_cfg_sel = 0x0;
+ lane_mode_1.s.vma_mm = 0x1;
+ lane_mode_1.s.cdr_fgain = 0xc;
+ lane_mode_1.s.ph_acc_adj = 0x1e;
+ break;
+ case BDK_GSER_LMODE_E_R_3125G_REFCLK15625_XAUI:
+ pll_mode_0.s.pll_icp = 0x1;
+ pll_mode_0.s.pll_rloop = 0x3;
+ pll_mode_0.s.pll_pcs_div = 0x14;
+
+ pll_mode_1.s.pll_16p5en = 0x1;
+ pll_mode_1.s.pll_cpadj = 0x2;
+ pll_mode_1.s.pll_pcie3en = 0x0;
+ pll_mode_1.s.pll_opr = 0x0;
+ pll_mode_1.s.pll_div = 0x14;
+
+ lane_mode_0.s.ctle = 0x0;
+ lane_mode_0.s.pcie = 0x0;
+ lane_mode_0.s.tx_ldiv = 0x1;
+ lane_mode_0.s.rx_ldiv = 0x1;
+ lane_mode_0.s.srate = 0x0;
+ lane_mode_0.s.tx_mode = 0x3;
+ lane_mode_0.s.rx_mode = 0x3;
+
+ lane_mode_1.s.vma_fine_cfg_sel = 0x0;
+ lane_mode_1.s.vma_mm = 0x1;
+ lane_mode_1.s.cdr_fgain = 0xc;
+ lane_mode_1.s.ph_acc_adj = 0x1e;
+ break;
+ case BDK_GSER_LMODE_E_R_103125G_REFCLK15625_KR:
+ pll_mode_0.s.pll_icp = 0x1;
+ pll_mode_0.s.pll_rloop = 0x5;
+ pll_mode_0.s.pll_pcs_div = 0xa;
+
+ pll_mode_1.s.pll_16p5en = 0x1;
+ pll_mode_1.s.pll_cpadj = 0x2;
+ pll_mode_1.s.pll_pcie3en = 0x0;
+ pll_mode_1.s.pll_opr = 0x1;
+ pll_mode_1.s.pll_div = 0x21;
+
+ lane_mode_0.s.ctle = 0x3;
+ lane_mode_0.s.pcie = 0x0;
+ lane_mode_0.s.tx_ldiv = 0x0;
+ lane_mode_0.s.rx_ldiv = 0x0;
+ lane_mode_0.s.srate = 0x0;
+ lane_mode_0.s.tx_mode = 0x3;
+ lane_mode_0.s.rx_mode = 0x3;
+
+ lane_mode_1.s.vma_fine_cfg_sel = 0x1;
+ lane_mode_1.s.vma_mm = 0x0;
+ lane_mode_1.s.cdr_fgain = 0xa;
+ lane_mode_1.s.ph_acc_adj = 0xf;
+ break;
+ case BDK_GSER_LMODE_E_R_125G_REFCLK15625_SGMII:
+ pll_mode_0.s.pll_icp = 0x1;
+ pll_mode_0.s.pll_rloop = 0x3;
+ pll_mode_0.s.pll_pcs_div = 0x28;
+
+ pll_mode_1.s.pll_16p5en = 0x1;
+ pll_mode_1.s.pll_cpadj = 0x3;
+ pll_mode_1.s.pll_pcie3en = 0x0;
+ pll_mode_1.s.pll_opr = 0x0;
+ pll_mode_1.s.pll_div = 0x10;
+
+ lane_mode_0.s.ctle = 0x0;
+ lane_mode_0.s.pcie = 0x0;
+ lane_mode_0.s.tx_ldiv = 0x2;
+ lane_mode_0.s.rx_ldiv = 0x2;
+ lane_mode_0.s.srate = 0x0;
+ lane_mode_0.s.tx_mode = 0x3;
+ lane_mode_0.s.rx_mode = 0x3;
+
+ lane_mode_1.s.vma_fine_cfg_sel = 0x0;
+ lane_mode_1.s.vma_mm = 0x1;
+ lane_mode_1.s.cdr_fgain = 0xc;
+ lane_mode_1.s.ph_acc_adj = 0x1e;
+ if(ref_clk == REF_100MHZ)
+ {
+ pll_mode_0.s.pll_pcs_div = 0x28;
+ pll_mode_1.s.pll_div = 0x19;
+ pll_mode_1.s.pll_cpadj = 0x2;
+ }
+ break;
+ case BDK_GSER_LMODE_E_R_5G_REFCLK15625_QSGMII:
+ pll_mode_0.s.pll_icp = 0x1; /* Per Scott McIlhenny 5/17/2016 (t81) */
+ pll_mode_0.s.pll_rloop = 0x3;
+ pll_mode_0.s.pll_pcs_div = 0xa;
+
+ pll_mode_1.s.pll_16p5en = 0x0;
+ pll_mode_1.s.pll_cpadj = 0x2;
+ pll_mode_1.s.pll_pcie3en = 0x0;
+ pll_mode_1.s.pll_opr = 0x0;
+ /* QSGMII is a special case. We use the same table entry for
+ 100Mhz and 125Mhz clocks as the normal 156Mhz */
+ switch (ref_clk)
+ {
+ case REF_100MHZ:
+ pll_mode_1.s.pll_div = 0x19;
+ break;
+ case REF_125MHZ:
+ pll_mode_1.s.pll_div = 0x14;
+ break;
+ default: /* REF_156MHZ */
+ pll_mode_1.s.pll_div = 0x10;
+ break;
+ }
+
+ lane_mode_0.s.ctle = 0x0;
+ lane_mode_0.s.pcie = 0x0;
+ lane_mode_0.s.tx_ldiv = 0x0;
+ lane_mode_0.s.rx_ldiv = 0x0;
+ lane_mode_0.s.srate = 0x0;
+ lane_mode_0.s.tx_mode = 0x3;
+ lane_mode_0.s.rx_mode = 0x3;
+
+ lane_mode_1.s.vma_fine_cfg_sel = 0x0;
+ lane_mode_1.s.vma_mm = 0x1; /* Per Scott McIlhenny 5/17/2016 (t81) */
+ lane_mode_1.s.cdr_fgain = 0xc;
+ lane_mode_1.s.ph_acc_adj = 0x1e;
+ break;
+ case BDK_GSER_LMODE_E_R_625G_REFCLK15625_RXAUI:
+ pll_mode_0.s.pll_icp = 0x1;
+ pll_mode_0.s.pll_rloop = 0x3;
+ pll_mode_0.s.pll_pcs_div = 0xa;
+
+ pll_mode_1.s.pll_16p5en = 0x0;
+ pll_mode_1.s.pll_cpadj = 0x2;
+ pll_mode_1.s.pll_pcie3en = 0x0;
+ pll_mode_1.s.pll_opr = 0x0;
+ pll_mode_1.s.pll_div = 0x14;
+
+ lane_mode_0.s.ctle = 0x0;
+ lane_mode_0.s.pcie = 0x0;
+ lane_mode_0.s.tx_ldiv = 0x0;
+ lane_mode_0.s.rx_ldiv = 0x0;
+ lane_mode_0.s.srate = 0x0;
+ lane_mode_0.s.tx_mode = 0x3;
+ lane_mode_0.s.rx_mode = 0x3;
+
+ lane_mode_1.s.vma_fine_cfg_sel = 0x0;
+ lane_mode_1.s.vma_mm = 0x0;
+ lane_mode_1.s.cdr_fgain = 0xa;
+ lane_mode_1.s.ph_acc_adj = 0x14;
+ break;
+ case BDK_GSER_LMODE_E_R_25G_REFCLK125:
+ pll_mode_0.s.pll_icp = 0x3;
+ pll_mode_0.s.pll_rloop = 0x3;
+ pll_mode_0.s.pll_pcs_div = 0x5;
+
+ pll_mode_1.s.pll_16p5en = 0x0;
+ pll_mode_1.s.pll_cpadj = 0x1;
+ pll_mode_1.s.pll_pcie3en = 0x0;
+ pll_mode_1.s.pll_opr = 0x0;
+ pll_mode_1.s.pll_div = 0x14;
+
+ lane_mode_0.s.ctle = 0x0;
+ lane_mode_0.s.pcie = 0x1;
+ lane_mode_0.s.tx_ldiv = 0x1;
+ lane_mode_0.s.rx_ldiv = 0x1;
+ lane_mode_0.s.srate = 0x0;
+ lane_mode_0.s.tx_mode = 0x3;
+ lane_mode_0.s.rx_mode = 0x3;
+
+ lane_mode_1.s.vma_fine_cfg_sel = 0x0;
+ lane_mode_1.s.vma_mm = 0x1;
+ lane_mode_1.s.cdr_fgain = 0xa;
+ lane_mode_1.s.ph_acc_adj = 0x14;
+ break;
+ case BDK_GSER_LMODE_E_R_5G_REFCLK125:
+ pll_mode_0.s.pll_icp = 0x3;
+ pll_mode_0.s.pll_rloop = 0x3;
+ pll_mode_0.s.pll_pcs_div = 0xa;
+
+ pll_mode_1.s.pll_16p5en = 0x0;
+ pll_mode_1.s.pll_cpadj = 0x1;
+ pll_mode_1.s.pll_pcie3en = 0x0;
+ pll_mode_1.s.pll_opr = 0x0;
+ pll_mode_1.s.pll_div = 0x14;
+
+ lane_mode_0.s.ctle = 0x0;
+ lane_mode_0.s.pcie = 0x1;
+ lane_mode_0.s.tx_ldiv = 0x0;
+ lane_mode_0.s.rx_ldiv = 0x0;
+ lane_mode_0.s.srate = 0x0;
+ lane_mode_0.s.tx_mode = 0x3;
+ lane_mode_0.s.rx_mode = 0x3;
+
+ lane_mode_1.s.vma_fine_cfg_sel = 0x0;
+ lane_mode_1.s.vma_mm = 0x0;
+ lane_mode_1.s.cdr_fgain = 0xa;
+ lane_mode_1.s.ph_acc_adj = 0x14;
+ break;
+ case BDK_GSER_LMODE_E_R_8G_REFCLK125:
+ pll_mode_0.s.pll_icp = 0x2;
+ pll_mode_0.s.pll_rloop = 0x5;
+ pll_mode_0.s.pll_pcs_div = 0xa;
+
+ pll_mode_1.s.pll_16p5en = 0x0;
+ pll_mode_1.s.pll_cpadj = 0x1;
+ pll_mode_1.s.pll_pcie3en = 0x1;
+ pll_mode_1.s.pll_opr = 0x1;
+ pll_mode_1.s.pll_div = 0x20;
+
+ lane_mode_0.s.ctle = 0x3;
+ lane_mode_0.s.pcie = 0x0;
+ lane_mode_0.s.tx_ldiv = 0x0;
+ lane_mode_0.s.rx_ldiv = 0x0;
+ lane_mode_0.s.srate = 0x0;
+ lane_mode_0.s.tx_mode = 0x2;
+ lane_mode_0.s.rx_mode = 0x2;
+
+ lane_mode_1.s.vma_fine_cfg_sel = 0x0;
+ lane_mode_1.s.vma_mm = 0x0;
+ lane_mode_1.s.cdr_fgain = 0xb;
+ lane_mode_1.s.ph_acc_adj = 0x23;
+ break;
+ }
+ BDK_CSR_WRITE(node, BDK_GSERX_PLL_PX_MODE_0(qlm, lane_mode), pll_mode_0.u);
+ BDK_CSR_WRITE(node, BDK_GSERX_PLL_PX_MODE_1(qlm, lane_mode), pll_mode_1.u);
+ BDK_CSR_WRITE(node, BDK_GSERX_LANE_PX_MODE_0(qlm, lane_mode), lane_mode_0.u);
+ BDK_CSR_WRITE(node, BDK_GSERX_LANE_PX_MODE_1(qlm, lane_mode), lane_mode_1.u);
+ }
+}
+
+/**
+ * Given a valid PEM number, return its speed in Gbaud
+ *
+ * @param node Node to use in numa setup
+ * @param pem PEM to get speed of
+ *
+ * @return Speed in Gbaud. Zero if disabled
+ */
+int __bdk_qlm_get_gbaud_mhz_pem(bdk_node_t node, int pem)
+{
+ BDK_CSR_INIT(pem_cfg, node, BDK_PEMX_CFG(pem));
+ switch (pem_cfg.cn83xx.md)
+ {
+ case 0: /* Gen 1 */
+ return 2500;
+ case 1: /* Gen 2 */
+ return 5000;
+ case 2: /* Gen 3 */
+ return 8000;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Get the speed of a QLM using its LMODE. This can't be used on PCIe QLMs.
+ *
+ * @param node Node to use in numa setup
+ * @param qlm Which QLM
+ *
+ * @return QLM speed on Gbaud
+ */
+int __bdk_qlm_get_gbaud_mhz_lmode(bdk_node_t node, int qlm)
+{
+ /* QLM is not in PCIe, assume LMODE is good enough for determining
+ the speed */
+ BDK_CSR_INIT(lane_mode, node, BDK_GSERX_LANE_MODE(qlm));
+ switch (lane_mode.s.lmode)
+ {
+ case BDK_GSER_LMODE_E_R_25G_REFCLK100:
+ return 2500;
+ case BDK_GSER_LMODE_E_R_5G_REFCLK100:
+ return 5000;
+ case BDK_GSER_LMODE_E_R_8G_REFCLK100:
+ return 8000;
+ case BDK_GSER_LMODE_E_R_125G_REFCLK15625_KX:
+ return 1250;
+ case BDK_GSER_LMODE_E_R_3125G_REFCLK15625_XAUI:
+ return 3125;
+ case BDK_GSER_LMODE_E_R_103125G_REFCLK15625_KR:
+ return 10312;
+ case BDK_GSER_LMODE_E_R_125G_REFCLK15625_SGMII:
+ return 1250;
+ case BDK_GSER_LMODE_E_R_5G_REFCLK15625_QSGMII:
+ return 5000;
+ case BDK_GSER_LMODE_E_R_625G_REFCLK15625_RXAUI:
+ return 6250;
+ case BDK_GSER_LMODE_E_R_25G_REFCLK125:
+ return 2500;
+ case BDK_GSER_LMODE_E_R_5G_REFCLK125:
+ return 5000;
+ case BDK_GSER_LMODE_E_R_8G_REFCLK125:
+ return 8000;
+ default:
+ return 0;
+ }
+}
+
+/**
+ * Converts a measured reference clock to a likely ideal value. Rounds
+ * clock speed to the nearest REF_*Mhz define.
+ *
+ * @param node Node to use in numa setup
+ * @param qlm Which QLM
+ * @param measured_hz
+ * Measured value
+ *
+ * @return Value exactly matching a define
+ */
+int __bdk_qlm_round_refclock(bdk_node_t node, int qlm, int measured_hz)
+{
+ int ref_clk;
+ if ((measured_hz > REF_100MHZ - REF_100MHZ / 10) && (measured_hz < REF_100MHZ + REF_100MHZ / 10))
+ {
+ ref_clk = REF_100MHZ;
+ }
+ else if ((measured_hz > REF_125MHZ - REF_125MHZ / 10) && (measured_hz < REF_125MHZ + REF_125MHZ / 10))
+ {
+ ref_clk = REF_125MHZ;
+ }
+ else if ((measured_hz > REF_156MHZ - REF_156MHZ / 10) && (measured_hz < REF_156MHZ + REF_156MHZ / 10))
+ {
+ ref_clk = REF_156MHZ;
+ }
+ else if (measured_hz < 1000000)
+ {
+ ref_clk = 0; /* Used for disabled QLMs */
+ }
+ else
+ {
+ ref_clk = measured_hz;
+ bdk_error("N%d.QLM%d: Unexpected reference clock speed of %d Mhz\n", node, qlm, measured_hz / 1000000);
+ }
+ return ref_clk;
+}
+
+/**
+ * TWSI reads from the MCU randomly timeout. Retry a few times on
+ * failure to try and recover
+ *
+ * @param node Node to use in a Numa setup. Can be an exact ID or a special
+ * value.
+ * @param twsi_id which TWSI bus to use
+ * @param dev_addr Device address (7 bit)
+ * @param internal_addr
+ * Internal address. Can be 0, 1 or 2 bytes in width
+ * @param num_bytes Number of data bytes to read (1-4)
+ * @param ia_width_bytes
+ * Internal address size in bytes (0, 1, or 2)
+ *
+ * @return Read data, or -1 on failure
+ */
+static int64_t mcu_read(bdk_node_t node, int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes)
+{
+ int read_tries = 0;
+ int64_t result;
+ do
+ {
+ result = bdk_twsix_read_ia(node, twsi_id, dev_addr, internal_addr, num_bytes, ia_width_bytes);
+ read_tries++;
+ if (result < 0)
+ {
+ BDK_TRACE(QLM, "Timeout %d reading from MCU\n", read_tries);
+ bdk_wait_usec(100000);
+ }
+ } while ((result < 0) && (read_tries < 3));
+ return result;
+}
+
+static void __bdk_qlm_set_reference(bdk_node_t node, int qlm, int ref_clk)
+{
+ int use_clock;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) || CAVIUM_IS_MODEL(CAVIUM_CN83XX) || CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ {
+ switch (ref_clk)
+ {
+ case REF_100MHZ:
+ use_clock = 0; /* Common clock 0 */
+ BDK_TRACE(QLM, "Setting N%d.QLM%d to use common clock 0\n", node, qlm);
+ break;
+ case REF_156MHZ:
+ use_clock = 1; /* Common clock 1 */
+ BDK_TRACE(QLM, "Setting N%d.QLM%d to use common clock 1\n", node, qlm);
+ break;
+ default:
+ use_clock = 2; /* External clock */
+ BDK_TRACE(QLM, "Setting N%d.QLM%d to use external clock\n", node, qlm);
+ break;
+ }
+ }
+ else
+ {
+ bdk_error("Update __bdk_qlm_set_reference() for qlm auto config of this chip\n");
+ return;
+ }
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_REFCLK_SEL(qlm),
+ c.s.com_clk_sel = (use_clock != 2);
+ c.s.use_com1 = (use_clock == 1));
+}
+
+/**
+ * For Cavium EVB and EBB board, query the MCU to determine the QLM setup. Applying
+ * any configuration found.
+ *
+ * @param node Node to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_qlm_mcu_auto_config(bdk_node_t node)
+{
+ const int MCU_TWSI_BUS = 0;
+ const int MCU_TWSI_ADDRESS = 0x60;
+ int64_t data;
+
+ /* Check the two magic number bytes the MCU should return */
+ data = mcu_read(node, MCU_TWSI_BUS, MCU_TWSI_ADDRESS, 0x00, 1, 1);
+ if (data != 0xa5)
+ {
+ printf("QLM Config: MCU not found, skipping auto configuration\n");
+ return -1;
+ }
+ data = mcu_read(node, MCU_TWSI_BUS, MCU_TWSI_ADDRESS, 0x01, 1, 1);
+ if (data != 0x5a)
+ {
+ bdk_error("QLM Config: MCU magic number incorrect\n");
+ return -1;
+ }
+
+ /* Read the MCU version */
+ int mcu_major = mcu_read(node, MCU_TWSI_BUS, MCU_TWSI_ADDRESS, 0x02, 1, 1);
+ int mcu_minor = mcu_read(node, MCU_TWSI_BUS, MCU_TWSI_ADDRESS, 0x03, 1, 1);
+ BDK_TRACE(QLM, "MCU version %d.%d\n", mcu_major, mcu_minor);
+ if ((mcu_major < 2) || ((mcu_major == 2) && (mcu_minor < 30)))
+ {
+ bdk_error("QLM Config: Unexpected MCU version %d.%d\n", mcu_major, mcu_minor);
+ return -1;
+ }
+
+ /* Find out how many lanes the MCU thinks are available */
+ int lanes = mcu_read(node, MCU_TWSI_BUS, MCU_TWSI_ADDRESS, 0x16, 1, 1);
+ BDK_TRACE(QLM, "MCU says board has %d lanes\n", lanes);
+ int correct_lanes = 0;
+ if (cavium_is_altpkg(CAVIUM_CN88XX))
+ correct_lanes = 22;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ correct_lanes = 32;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ correct_lanes = 22;
+ else if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ correct_lanes = 8;
+ if (lanes != correct_lanes)
+ {
+ bdk_error("QLM Config: Unexpected number of lanes (%d) from MCU\n", lanes);
+ return -1;
+ }
+
+ int lane = 0;
+ int qlm = 0;
+ while (lane < lanes)
+ {
+ int write_status;
+ int width;
+ int mode;
+ int speed;
+ int refclk;
+ /* TWSI reads from the MCU randomly timeout. Retry a few times on
+ failure to try and recover */
+ int read_tries = 0;
+ do
+ {
+ read_tries++;
+ if (read_tries > 3)
+ {
+ bdk_error("QLM Config: Timeouts reading from MCU\n");
+ return -1;
+ }
+ /* Space request out 20ms */
+ bdk_wait_usec(20000);
+ /* Select the lane we are interested in */
+ write_status = bdk_twsix_write_ia(node, MCU_TWSI_BUS, MCU_TWSI_ADDRESS, 0x16, 1, 1, lane);
+ /* Space request out 20ms */
+ bdk_wait_usec(20000);
+ /* Get the mode */
+ width = mcu_read(node, MCU_TWSI_BUS, MCU_TWSI_ADDRESS, 0x17, 1, 1);
+ mode = mcu_read(node, MCU_TWSI_BUS, MCU_TWSI_ADDRESS, 0x18, 2, 1);
+ speed = mcu_read(node, MCU_TWSI_BUS, MCU_TWSI_ADDRESS, 0x19, 2, 1);
+ refclk = mcu_read(node, MCU_TWSI_BUS, MCU_TWSI_ADDRESS, 0x1a, 1, 1);
+ } while ((write_status < 0) || (width < 0) || (mode < 0) || (speed < 0) || (refclk < 0));
+
+ BDK_TRACE(QLM, "MCU lane %d, width %d, mode 0x%x, speed 0x%x, ref 0x%x\n",
+ lane, width, mode, speed, refclk);
+ if ((width != 0) && (width != 1) && (width != 2) && (width != 4) && (width != 8))
+ {
+ bdk_error("QLM Config: Unexpected interface width (%d) from MCU\n", width);
+ return -1;
+ }
+ /* MCU reports a width of 0 for unconfigured QLMs. It reports a width
+ of 1 for some combinations on CN80XX, and two on others. Convert
+ either 0 or 1 to the actual width, or 2 for CN80XX. Yuck */
+ if ((width == 0) || (width == 1))
+ {
+ if (cavium_is_altpkg(CAVIUM_CN81XX) && (qlm < 2))
+ width = 2;
+ else
+ width = bdk_qlm_get_lanes(node, qlm);
+ }
+ bdk_qlm_modes_t qlm_mode;
+ int qlm_speed = (speed >> 8) * 1000 + (speed & 0xff) * 1000 / 256;
+ int use_ref = 0;
+ bdk_qlm_mode_flags_t qlm_flags = 0;
+ if (mode < 0x4000)
+ {
+ switch (mode)
+ {
+ case 0x0000: /* No Configuration */
+ qlm_mode = BDK_QLM_MODE_DISABLED;
+ break;
+ case 0x0101: /* PCIe Host */
+ qlm_mode = (width == 8) ? BDK_QLM_MODE_PCIE_1X8 :
+ (width == 4) ? BDK_QLM_MODE_PCIE_1X4 :
+ BDK_QLM_MODE_PCIE_1X2;
+ use_ref = REF_100MHZ;
+ break;
+ case 0x0102: /* PCIe Endpoint */
+ qlm_mode = (width == 8) ? BDK_QLM_MODE_PCIE_1X8 :
+ (width == 4) ? BDK_QLM_MODE_PCIE_1X4 :
+ BDK_QLM_MODE_PCIE_1X2;
+ qlm_flags = BDK_QLM_MODE_FLAG_ENDPOINT;
+ use_ref = 0; /* Use the external reference for EP mode */
+ break;
+ case 0x1000: /* SGMII */
+ qlm_mode = (width == 4) ? BDK_QLM_MODE_SGMII_4X1 :
+ (width == 2) ? BDK_QLM_MODE_SGMII_2X1 :
+ BDK_QLM_MODE_SGMII_1X1;
+ use_ref = REF_156MHZ;
+ /* CN80XX parts on EBBs use phy port 2 for SGMII, while QSGMII
+ uses the correct port. Fix this for DLM1 and DLM3 */
+ if (cavium_is_altpkg(CAVIUM_CN81XX))
+ {
+ int bgx = (qlm == 3) ? 1 : 0;
+ uint64_t phy = bdk_config_get_int(BDK_CONFIG_PHY_ADDRESS, 0, bgx, 2);
+ bdk_config_set_int(phy, BDK_CONFIG_PHY_ADDRESS, 0, bgx, 1);
+ }
+ break;
+ case 0x1100: /* QSGMII */
+ qlm_mode = BDK_QLM_MODE_QSGMII_4X1;
+ use_ref = REF_100MHZ;
+ break;
+ case 0x2000: /* XAUI */
+ qlm_mode = BDK_QLM_MODE_XAUI_1X4;
+ use_ref = REF_156MHZ;
+ break;
+ case 0x2100: /* RXAUI */
+ qlm_mode = (width == 2) ? BDK_QLM_MODE_RXAUI_1X2 : BDK_QLM_MODE_RXAUI_2X2;
+ use_ref = REF_156MHZ;
+ break;
+ case 0x2200: /* DXAUI */
+ qlm_mode = BDK_QLM_MODE_XAUI_1X4;
+ use_ref = REF_156MHZ;
+ break;
+ case 0x3001: /* Interlaken */
+ qlm_mode = BDK_QLM_MODE_ILK;
+ use_ref = REF_156MHZ;
+ break;
+ default:
+ bdk_error("QLM Config: Unexpected interface mode (0x%x) from MCU\n", mode);
+ qlm_mode = BDK_QLM_MODE_DISABLED;
+ break;
+ }
+ }
+ else
+ {
+ switch (mode)
+ {
+ case 0x4000: /* SATA */
+ qlm_mode = (width == 2) ? BDK_QLM_MODE_SATA_2X1 : BDK_QLM_MODE_SATA_4X1;
+ use_ref = REF_100MHZ;
+ break;
+ case 0x5001: /* XFI */
+ qlm_mode = (width == 4) ? BDK_QLM_MODE_XFI_4X1 :
+ (width == 2) ? BDK_QLM_MODE_XFI_2X1 :
+ BDK_QLM_MODE_XFI_1X1;
+ use_ref = REF_156MHZ;
+ break;
+ case 0x5002: /* 10G-KR */
+ qlm_mode = (width == 4) ? BDK_QLM_MODE_10G_KR_4X1 :
+ (width == 2) ? BDK_QLM_MODE_10G_KR_2X1 :
+ BDK_QLM_MODE_10G_KR_1X1;
+ use_ref = REF_156MHZ;
+ break;
+ case 0x6001: /* XLAUI */
+ qlm_mode = BDK_QLM_MODE_XLAUI_1X4;
+ use_ref = REF_156MHZ;
+ break;
+ case 0x6002: /* 40G-KR4 */
+ qlm_mode = BDK_QLM_MODE_40G_KR4_1X4;
+ use_ref = REF_156MHZ;
+ break;
+ default:
+ bdk_error("QLM Config: Unexpected interface mode (0x%x) from MCU\n", mode);
+ qlm_mode = BDK_QLM_MODE_DISABLED;
+ break;
+ }
+ }
+ lane += width;
+ do
+ {
+ int internal_qlm = qlm;
+ /* Alternate package parts have different QLM numbers for internal
+ versus external. The MCU uses the external numbers */
+ if (cavium_is_altpkg(CAVIUM_CN88XX))
+ {
+ switch (qlm)
+ {
+ case 0: /* QLM0 -> QLM4 */
+ internal_qlm = 4;
+ break;
+ case 1: /* QLM1 -> QLM5 */
+ internal_qlm = 5;
+ break;
+ case 2: /* QLM2 -> QLM0 */
+ internal_qlm = 0;
+ break;
+ case 3: /* QLM3 -> QLM1 */
+ internal_qlm = 1;
+ break;
+ case 4: /* DLM4 -> QLM2 */
+ internal_qlm = 2;
+ break;
+ case 5: /* DLM5 -> QLM6 */
+ internal_qlm = 6;
+ break;
+ case 6: /* DLM6 -> QLM7 */
+ internal_qlm = 7;
+ break;
+ default:
+ bdk_error("Invalid external QLM%d from MCU\n", qlm);
+ return -1;
+ }
+ }
+ if (qlm_flags & BDK_QLM_MODE_FLAG_ENDPOINT)
+ {
+ BDK_TRACE(QLM, "Skipping N%d.QLM%d mode %s(%d), speed %d, flags 0x%x (EP should already be setup)\n",
+ node, internal_qlm, bdk_qlm_mode_tostring(qlm_mode), qlm_mode, qlm_speed, qlm_flags);
+ }
+ else
+ {
+ BDK_TRACE(QLM, "Setting N%d.QLM%d mode %s(%d), speed %d, flags 0x%x\n",
+ node, internal_qlm, bdk_qlm_mode_tostring(qlm_mode), qlm_mode, qlm_speed, qlm_flags);
+ /* Set the reference clock for this QLM */
+ __bdk_qlm_set_reference(node, internal_qlm, use_ref);
+ if (bdk_qlm_set_mode(node, internal_qlm, qlm_mode, qlm_speed, qlm_flags))
+ return -1;
+ }
+ int num_lanes = bdk_qlm_get_lanes(node, internal_qlm);
+ /* CN86XX looks like two lanes each for DLM4-7 */
+ if (cavium_is_altpkg(CAVIUM_CN88XX) && (qlm >= 4))
+ num_lanes = 2;
+ if (qlm_mode == BDK_QLM_MODE_PCIE_1X8)
+ {
+ /* PCIe x8 is a special case as the QLM config function
+ actually configures both QLMs in one go */
+ qlm++;
+ width -= 8;
+ }
+ else if ((qlm_mode == BDK_QLM_MODE_PCIE_1X4) && (width > num_lanes))
+ {
+ /* PCIe x4 is a special case as the QLM config function
+ actually configures both QLMs in one go */
+ qlm++;
+ width -= 4;
+ }
+ else if (width >= num_lanes)
+ {
+ if (num_lanes == 1)
+ width -= 2; /* Special case for CN80XX */
+ else
+ width -= num_lanes;
+ }
+ else
+ width = 0;
+ qlm++;
+ } while (width > 0);
+ }
+ return 0;
+}
+
+/**
+ * Display the current settings of a QLM lane
+ *
+ * @param node Node the QLM is on
+ * @param qlm QLM to display
+ * @param qlm_lane Lane to use
+ * @param show_tx Display TX parameters
+ * @param show_rx Display RX parameters
+ */
+void bdk_qlm_display_settings(bdk_node_t node, int qlm, int qlm_lane, bool show_tx, bool show_rx)
+{
+ const char *dir_label[] = {"Hold", "Inc", "Dec", "Hold"};
+
+ uint64_t rx_aeq_out_0 = BDK_CSR_READ(node, BDK_GSERX_LANEX_RX_AEQ_OUT_0(qlm, qlm_lane));
+ uint64_t rx_aeq_out_1 = BDK_CSR_READ(node, BDK_GSERX_LANEX_RX_AEQ_OUT_1(qlm, qlm_lane));
+ uint64_t rx_aeq_out_2 = BDK_CSR_READ(node, BDK_GSERX_LANEX_RX_AEQ_OUT_2(qlm, qlm_lane));
+ uint64_t rx_vma_status_0 = BDK_CSR_READ(node, BDK_GSERX_LANEX_RX_VMA_STATUS_0(qlm, qlm_lane));
+ uint64_t rx_vma_status_1 = BDK_CSR_READ(node, BDK_GSERX_LANEX_RX_VMA_STATUS_1(qlm, qlm_lane));
+ uint64_t sds_pin_mon_1 = BDK_CSR_READ(node, BDK_GSERX_LANEX_SDS_PIN_MON_1(qlm, qlm_lane));
+ uint64_t sds_pin_mon_2 = BDK_CSR_READ(node, BDK_GSERX_LANEX_SDS_PIN_MON_2(qlm, qlm_lane));
+ uint64_t br_rxx_eer = BDK_CSR_READ(node, BDK_GSERX_BR_RXX_EER(qlm, qlm_lane));
+
+ printf("N%d.QLM%d Lane %d:\n", node, qlm, qlm_lane);
+ if (show_rx)
+ {
+ printf(" DFE Tap 1: %llu, Tap 2: %lld, Tap 3: %lld, Tap 4: %lld, Tap 5: %lld\n",
+ bdk_extract(rx_aeq_out_1, 0, 5),
+ bdk_extract_smag(rx_aeq_out_1, 5, 9),
+ bdk_extract_smag(rx_aeq_out_1, 10, 14),
+ bdk_extract_smag(rx_aeq_out_0, 0, 4),
+ bdk_extract_smag(rx_aeq_out_0, 5, 9));
+ printf(" Pre-CTLE Gain: %llu, Post-CTLE Gain: %llu, CTLE Peak: %llu, CTLE Pole: %llu\n",
+ bdk_extract(rx_aeq_out_2, 4, 4),
+ bdk_extract(rx_aeq_out_2, 0, 4),
+ bdk_extract(rx_vma_status_0, 2, 4),
+ bdk_extract(rx_vma_status_0, 0, 2));
+ printf(" RX Equalization Tx Directions Hints TXPRE: %s, TXMAIN: %s, TXPOST: %s, Figure of Merit: %llu\n",
+ dir_label[bdk_extract(br_rxx_eer, 0, 2)],
+ dir_label[bdk_extract(br_rxx_eer, 2, 2)],
+ dir_label[bdk_extract(br_rxx_eer, 4, 2)],
+ bdk_extract(br_rxx_eer, 6, 8));
+ }
+ if (show_tx)
+ {
+ printf(" TX Swing: %llu, Pre-emphasis Pre-cursor: %llu, Post-cursor: %llu\n",
+ bdk_extract(sds_pin_mon_1, 1, 5),
+ bdk_extract(sds_pin_mon_2, 0, 4),
+ bdk_extract(sds_pin_mon_2, 4, 5));
+ printf(" TX Boost Enable: %llu, TX Turbo Mode: %llu\n",
+ bdk_extract(sds_pin_mon_2, 10, 1),
+ bdk_extract(sds_pin_mon_2, 9, 1));
+ }
+ printf(" Training-done: %llu\n",
+ bdk_extract(rx_vma_status_1, 7, 1));
+}
+
+/**
+ * Perform RX equalization on a QLM
+ *
+ * @param node Node the QLM is on
+ * @param qlm QLM to perform RX equalization on
+ * @param qlm_lane Lane to use, or -1 for all lanes
+ *
+ * @return Zero on success, negative if any lane failed RX equalization
+ */
+int __bdk_qlm_rx_equalization(bdk_node_t node, int qlm, int qlm_lane)
+{
+ /* Don't touch QLMs is reset or powered down */
+ BDK_CSR_INIT(phy_ctl, node, BDK_GSERX_PHY_CTL(qlm));
+ if (phy_ctl.s.phy_pd || phy_ctl.s.phy_reset)
+ return -1;
+ /* Don't run on PCIe links */
+ if (bdk_qlm_get_mode(node, qlm) <= BDK_QLM_MODE_PCIE_1X8)
+ return -1;
+
+ int fail = 0; /* Bitmask of lanes that failed CDR Lock or Eltrical Idle check */
+ int pending = 0; /* Bitmask of lanes that we're waiting for */
+ int MAX_LANES = bdk_qlm_get_lanes(node, qlm);
+
+ BDK_TRACE(QLM, "N%d.QLM%d: Starting RX equalization on lane %d\n", node, qlm, qlm_lane);
+ for (int lane = 0; lane < MAX_LANES; lane++)
+ {
+ /* Skip lanes we don't care about */
+ if ((qlm_lane != -1) && (qlm_lane != lane))
+ continue;
+ /* Check that the lane has completed CDR lock */
+ BDK_CSR_INIT(eie_detsts, node, BDK_GSERX_RX_EIE_DETSTS(qlm));
+ if (((1 << lane) & eie_detsts.s.cdrlock) == 0)
+ {
+ /* Mark bad so we skip this lane below */
+ fail |= 1 << lane;
+ continue;
+ }
+ /* Enable software control */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_BR_RXX_CTL(qlm, lane),
+ c.s.rxt_swm = 1);
+ /* Clear the completion flag and initiate a new request */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_BR_RXX_EER(qlm, lane),
+ c.s.rxt_esv = 0;
+ c.s.rxt_eer = 1);
+ /* Remember that we have to wait for this lane */
+ pending |= 1 << lane;
+ }
+
+ /* Timing a few of these over XFI on CN73XX, each takes 21-23ms. XLAUI
+ was about the same time. DXAUI and RXAUI both took 2-3ms. Put the
+ timeout at 250ms, which is roughly 10x my measurements. */
+ uint64_t timeout = bdk_clock_get_count(BDK_CLOCK_TIME) + bdk_clock_get_rate(node, BDK_CLOCK_TIME) / 4;
+ while (pending)
+ {
+ for (int lane = 0; lane < MAX_LANES; lane++)
+ {
+ int lane_mask = 1 << lane;
+ /* Only check lanes that are pending */
+ if (!(pending & lane_mask))
+ continue;
+ /* Read the registers for checking Electrical Idle / CDR lock and
+ the status of the RX equalization */
+ BDK_CSR_INIT(eie_detsts, node, BDK_GSERX_RX_EIE_DETSTS(qlm));
+ BDK_CSR_INIT(gserx_br_rxx_eer, node, BDK_GSERX_BR_RXX_EER(qlm, lane));
+ /* Mark failure if lane entered Electrical Idle or lost CDR Lock. The
+ bit for the lane will have cleared in either EIESTS or CDRLOCK */
+ if (!(eie_detsts.s.eiests & eie_detsts.s.cdrlock & lane_mask))
+ {
+ fail |= lane_mask;
+ pending &= ~lane_mask;
+ }
+ else if (gserx_br_rxx_eer.s.rxt_esv)
+ {
+ /* Clear pending if RX equalization finished */
+ pending &= ~lane_mask;
+ }
+ }
+ /* Break out of the loop on timeout */
+ if (bdk_clock_get_count(BDK_CLOCK_TIME) > timeout)
+ break;
+ }
+
+ /* Cleanup and report status */
+ for (int lane = 0; lane < MAX_LANES; lane++)
+ {
+ /* Skip lanes we don't care about */
+ if ((qlm_lane != -1) && (qlm_lane != lane))
+ continue;
+ int lane_mask = 1 << lane;
+ /* Get the final RX equalization status */
+ BDK_CSR_INIT(gserx_br_rxx_eer, node, BDK_GSERX_BR_RXX_EER(qlm, lane));
+ /* Disable software control */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_BR_RXX_CTL(qlm, lane),
+ c.s.rxt_swm = 0);
+ /* Report status */
+ if (fail & lane_mask)
+ {
+ BDK_TRACE(QLM, "N%d.QLM%d: Lane %d RX equalization lost CDR Lock or entered Electrical Idle\n", node, qlm, lane);
+ }
+ else if ((pending & lane_mask) || !gserx_br_rxx_eer.s.rxt_esv)
+ {
+ BDK_TRACE(QLM, "N%d.QLM%d: Lane %d RX equalization timeout\n", node, qlm, lane);
+ fail |= 1 << lane;
+ }
+ else
+ {
+ bdk_qlm_display_settings(node, qlm, lane, false, true);
+ }
+ }
+
+ return (fail) ? -1 : 0;
+}
+
+/**
+ * Configure the TX tuning parameters for a QLM lane. The tuning parameters can
+ * be specified as -1 to maintain their current value
+ *
+ * @param node Node to configure
+ * @param qlm QLM to configure
+ * @param lane Lane to configure
+ * @param tx_swing Transmit swing (coef 0) Range 0-31
+ * @param tx_pre Pre cursor emphasis (Coef -1). Range 0-15
+ * @param tx_post Post cursor emphasis (Coef +1). Range 0-31
+ * @param tx_gain Transmit gain. Range 0-7
+ * @param tx_vboost Transmit voltage boost. Range 0-1
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_tune_lane_tx(bdk_node_t node, int qlm, int lane, int tx_swing, int tx_pre, int tx_post, int tx_gain, int tx_vboost)
+{
+ /* Check tuning constraints */
+ if ((tx_swing < -1) || (tx_swing > 25))
+ {
+ bdk_error("N%d.QLM%d: Lane %d: Invalid TX_SWING(%d)\n", node, qlm, lane, tx_swing);
+ return -1;
+ }
+ if ((tx_pre < -1) || (tx_pre > 10))
+ {
+ bdk_error("N%d.QLM%d: Lane %d: Invalid TX_PRE(%d)\n", node, qlm, lane, tx_pre);
+ return -1;
+ }
+ if ((tx_post < -1) || (tx_post > 15))
+ {
+ bdk_error("N%d.QLM%d: Lane %d: Invalid TX_POST(%d)\n", node, qlm, lane, tx_post);
+ return -1;
+ }
+ if ((tx_pre >= 0) && (tx_post >= 0) && (tx_swing >= 0) && (tx_pre + tx_post - tx_swing > 2))
+ {
+ bdk_error("N%d.QLM%d: Lane %d: TX_PRE(%d) + TX_POST(%d) - TX_SWING(%d) must be less than or equal to 2\n", node, qlm, lane, tx_pre, tx_post, tx_swing);
+ return -1;
+ }
+ if ((tx_pre >= 0) && (tx_post >= 0) && (tx_swing >= 0) && (tx_pre + tx_post + tx_swing > 35))
+ {
+ bdk_error("N%d.QLM%d: Lane %d: TX_PRE(%d) + TX_POST(%d) + TX_SWING(%d) must be less than or equal to 35\n", node, qlm, lane, tx_pre, tx_post, tx_swing);
+ return -1;
+ }
+
+ if ((tx_gain < -1) || (tx_gain > 7))
+ {
+ bdk_error("N%d.QLM%d: Lane %d: Invalid TX_GAIN(%d). TX_GAIN must be between 0 and 7\n", node, qlm, lane, tx_gain);
+ return -1;
+ }
+
+ if ((tx_vboost < -1) || (tx_vboost > 1))
+ {
+ bdk_error("N%d.QLM%d: Lane %d: Invalid TX_VBOOST(%d). TX_VBOOST must be 0 or 1.\n", node, qlm, lane, tx_vboost);
+ return -1;
+ }
+
+ if ((tx_pre != -1) && (tx_post == -1))
+ {
+ BDK_CSR_INIT(emphasis, node, BDK_GSERX_LANEX_TX_PRE_EMPHASIS(qlm, lane));
+ tx_post = emphasis.s.cfg_tx_premptap >> 4;
+ }
+
+ if ((tx_post != -1) && (tx_pre == -1))
+ {
+ BDK_CSR_INIT(emphasis, node, BDK_GSERX_LANEX_TX_PRE_EMPHASIS(qlm, lane));
+ tx_pre = emphasis.s.cfg_tx_premptap & 0xf;
+ }
+
+ BDK_TRACE(QLM, "N%d.QLM%d: Lane %d: TX_SWING=%d, TX_PRE=%d, TX_POST=%d, TX_GAIN=%d, TX_VBOOST=%d\n",
+ node, qlm, lane, tx_swing, tx_pre, tx_post, tx_gain, tx_vboost);
+
+ /* Manual Tx Swing and Tx Equalization Programming Steps */
+
+ /* 1) Enable Tx swing and Tx emphasis overrides */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_TX_CFG_1(qlm, lane),
+ c.s.tx_swing_ovrrd_en = (tx_swing != -1);
+ c.s.tx_premptap_ovrrd_val = (tx_pre != -1) && (tx_post != -1);
+ c.s.tx_vboost_en_ovrrd_en = (tx_vboost != -1)); /* Vboost override */
+ /* 2) Program the Tx swing and Tx emphasis Pre-cursor and Post-cursor values */
+ if (tx_swing != -1)
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_TX_CFG_0(qlm, lane),
+ c.s.cfg_tx_swing = tx_swing);
+ if ((tx_pre != -1) && (tx_post != -1))
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_TX_PRE_EMPHASIS(qlm, lane),
+ c.s.cfg_tx_premptap = (tx_post << 4) | tx_pre);
+ /* Apply TX gain settings */
+ if (tx_gain != -1)
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_TX_CFG_3(qlm, lane),
+ c.s.pcs_sds_tx_gain = tx_gain);
+ /* Apply TX vboost settings */
+ if (tx_vboost != -1)
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_TX_CFG_3(qlm, lane),
+ c.s.cfg_tx_vboost_en = tx_vboost);
+ /* 3) Program override for the Tx coefficient request */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_0(qlm, lane),
+ if (((tx_pre != -1) && (tx_post != -1)) || (tx_swing != -1))
+ c.s.cfg_tx_coeff_req_ovrrd_val = 1;
+ if (tx_vboost != -1)
+ c.s.cfg_tx_vboost_en_ovrrd_val = 1;
+ );
+ /* 4) Enable the Tx coefficient request override enable */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, lane),
+ if (((tx_pre != -1) && (tx_post != -1)) || (tx_swing != -1))
+ c.s.cfg_tx_coeff_req_ovrrd_en = 1;
+ if (tx_vboost != -1)
+ c.s.cfg_tx_vboost_en_ovrrd_en = 1
+ );
+ /* 5) Issue a Control Interface Configuration Override request to start
+ the Tx equalizer Optimization cycle which applies the new Tx swing
+ and equalization settings */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, lane),
+ c.s.ctlifc_ovrrd_req = 1);
+
+ /* 6) Prepare for a subsequent Tx swing and Tx equalization adjustment:
+ a) Disable the Tx coefficient request override enable */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, lane),
+ c.s.cfg_tx_coeff_req_ovrrd_en = 0);
+ /* b) Issue a Control Interface Configuration Override request */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, lane),
+ c.s.ctlifc_ovrrd_req = 1);
+ /* The new Tx swing and Pre-cursor and Post-cursor settings will now take
+ effect. */
+ return 0;
+}
+
+/**
+ * Some QLM speeds need to override the default tuning parameters
+ *
+ * @param node Node to use in a Numa setup
+ * @param qlm QLM to configure
+ * @param mode Desired mode
+ * @param baud_mhz Desired speed
+ */
+void __bdk_qlm_tune(bdk_node_t node, int qlm, bdk_qlm_modes_t mode, int baud_mhz)
+{
+ /* Note: This function is not called for CCPI. For CCPI tuning, see
+ bdk-init-nz-node.c */
+ /* Tuning parameters override the KR training. Don't apply them for KR links */
+ switch (mode)
+ {
+ case BDK_QLM_MODE_10G_KR_1X1:
+ case BDK_QLM_MODE_10G_KR_2X1:
+ case BDK_QLM_MODE_10G_KR_4X1:
+ case BDK_QLM_MODE_40G_KR4_1X4:
+ return;
+ case BDK_QLM_MODE_PCIE_1X1:
+ case BDK_QLM_MODE_PCIE_2X1:
+ case BDK_QLM_MODE_PCIE_1X2:
+ case BDK_QLM_MODE_PCIE_1X4:
+ case BDK_QLM_MODE_PCIE_1X8:
+ /* Don't tune PCIe Gen3 as it has its own builtin, similar to KR */
+ if (baud_mhz > 5000)
+ return;
+ break;
+ default:
+ break;
+ }
+
+ /* We're apply tuning for all lanes on this QLM */
+ int num_lanes = bdk_qlm_get_lanes(node, qlm);
+ for (int lane = 0; lane < num_lanes; lane++)
+ {
+ /* TX Swing: First read any board specific setting from the environment */
+ int swing = bdk_config_get_int(BDK_CONFIG_QLM_TUNING_TX_SWING, node, qlm, lane);
+ /* If no setting, use hard coded generic defaults */
+ if (swing == -1)
+ {
+ if (baud_mhz == 6250)
+ {
+ /* Email from Brendan Metzner about RXAUI around 2/7/2016 */
+ swing = 0x12;
+ }
+ else if (baud_mhz == 10312)
+ {
+ /* From lab measurements of EBB8800 at 10.3125G */
+ swing = 0xd;
+ }
+ }
+
+ /* TX Premptap: First read any board specific setting from the environment */
+ int premptap = bdk_config_get_int(BDK_CONFIG_QLM_TUNING_TX_PREMPTAP, node, qlm, lane);
+ /* If no setting, use hard coded generic defaults */
+ if (premptap == -1)
+ {
+ if (baud_mhz == 6250)
+ {
+ /* From lab measurements of EBB8800 at 6.25G */
+ premptap = 0xa0;
+ }
+ else if (baud_mhz == 10312)
+ {
+ /* From lab measurements of EBB8800 at 10.3125G */
+ premptap = 0xd0;
+ }
+ }
+
+ int tx_pre = (premptap == -1) ? -1 : premptap & 0xf;
+ int tx_post = (premptap == -1) ? -1 : premptap >> 4;
+ int gain = bdk_config_get_int(BDK_CONFIG_QLM_TUNING_TX_GAIN, node, qlm, lane);
+ int vboost = bdk_config_get_int(BDK_CONFIG_QLM_TUNING_TX_VBOOST, node, qlm, lane);
+
+ __bdk_qlm_tune_lane_tx(node, qlm, lane, swing, tx_pre, tx_post, gain, vboost);
+
+ /* Email from Brendan Metzner about RXAUI around 2/7/2016 suggested the
+ following setting for RXAUI at 6.25G with both PHY or cable. I'm
+ applying it to all lanes running at 6.25G */
+ if (baud_mhz == 6250)
+ {
+ /* This is changing the Q/QB error sampler 0 threshold from 0xD
+ to 0xF */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_CFG_4(qlm, lane),
+ c.s.cfg_rx_errdet_ctrl = 0xcf6f);
+ }
+ }
+}
+
+/**
+ * Disables DFE for the specified QLM lane(s).
+ * This function should only be called for low-loss channels.
+ *
+ * @param node Node to configure
+ * @param qlm QLM to configure
+ * @param lane Lane to configure, or -1 for all lanes
+ */
+void __bdk_qlm_dfe_disable(int node, int qlm, int lane)
+{
+ int num_lanes = bdk_qlm_get_lanes(node, qlm);
+ int l;
+
+ for (l = 0; l < num_lanes; l++) {
+ if ((lane != -1) && (lane != l))
+ continue;
+ /* 1. Write GSERX_LANEx_RX_LOOP_CTRL = 0x0270 (var "loop_ctrl" with bits 8 & 1 cleared).
+ * bit<1> dfe_en_byp = 1'b0 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_LOOP_CTRL(qlm, l),
+ c.s.cfg_rx_lctrl = c.s.cfg_rx_lctrl & 0x3fd);
+
+ /* 2. Write GSERX_LANEx_RX_VALBBD_CTRL_1 = 0x0000 (var "ctrl1" with all bits cleared)
+ * bits<14:11> CFG_RX_DFE_C3_MVAL = 4'b0000
+ * bit<10> CFG_RX_DFE_C3_MSGN = 1'b0
+ * bits<9:6> CFG_RX_DFE_C2_MVAL = 4'b0000
+ * bit<5> CFG_RX_DFE_C2_MSGN = 1'b0
+ * bits<4:1> CFG_RX_DFE_C1_MVAL = 5'b0000
+ * bits<0> CFG_RX_DFE_C1_MSGN = 1'b0 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_VALBBD_CTRL_1(qlm, l),
+ c.s.dfe_c3_mval = 0;
+ c.s.dfe_c3_msgn = 0;
+ c.s.dfe_c2_mval = 0;
+ c.s.dfe_c2_msgn = 0;
+ c.s.dfe_c1_mval = 0;
+ c.s.dfe_c1_msgn = 0);
+
+ /* 3. Write GSERX_LANEx_RX_VALBBD_CTRL_0 = 0x2400 (var "ctrl0" with following bits set/cleared)
+ * bits<11:10> CFG_RX_DFE_GAIN = 0x1
+ * bits<9:6> CFG_RX_DFE_C5_MVAL = 4'b0000
+ * bit<5> CFG_RX_DFE_C5_MSGN = 1'b0
+ * bits<4:1> CFG_RX_DFE_C4_MVAL = 4'b0000
+ * bit<0> CFG_RX_DFE_C4_MSGN = 1'b0 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_VALBBD_CTRL_0(qlm, l),
+ c.s.dfe_gain = 0x1;
+ c.s.dfe_c5_mval = 0;
+ c.s.dfe_c5_msgn = 0;
+ c.s.dfe_c4_mval = 0;
+ c.s.dfe_c4_msgn = 0);
+
+ /* 4. Write GSER(0..13)_LANE(0..3)_RX_VALBBD_CTRL_2 = 0x003F //enable DFE tap overrides
+ * bit<5> dfe_ovrd_en = 1
+ * bit<4> dfe_c5_ovrd_val = 1
+ * bit<3> dfe_c4_ovrd_val = 1
+ * bit<2> dfe_c3_ovrd_val = 1
+ * bit<1> dfe_c2_ovrd_val = 1
+ * bit<0> dfe_c1_ovrd_val = 1
+ */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_VALBBD_CTRL_2(qlm, l),
+ c.s.dfe_ovrd_en = 0x1;
+ c.s.dfe_c5_ovrd_val = 0x1;
+ c.s.dfe_c4_ovrd_val = 0x1;
+ c.s.dfe_c3_ovrd_val = 0x1;
+ c.s.dfe_c2_ovrd_val = 0x1;
+ c.s.dfe_c1_ovrd_val = 0x1);
+
+ }
+}
+
+/**
+ * Check if a specific lane is using KR training. This is used by low level GSER
+ * code to remember which QLMs and lanes need to support KR training for BGX. The
+ * hardware doesn't have a bit set aside to record this, so we repurpose the
+ * register GSERX_SCRATCH.
+ *
+ * @param node Node to check
+ * @param qlm QLM to check
+ * @param lane Lane to check
+ *
+ * @return True if this lane uses KR with BGX, false otherwise
+ */
+bool __bdk_qlm_is_lane_kr(bdk_node_t node, int qlm, int lane)
+{
+ uint64_t mask = BDK_CSR_READ(node, BDK_GSERX_SCRATCH(qlm));
+ return 1 & (mask >> lane);
+}
+
+/**
+ * Set if a specific lane is using KR training. This is used by low level GSER
+ * code to remember which QLMs and lanes need to support KR training for BGX. The
+ * hardware doesn't have a bit set aside to record this, so we repurpose the
+ * register GSERX_SCRATCH.
+ *
+ * @param node Node to set
+ * @param qlm QLM to set
+ * @param lane Lane to set
+ * @param is_kr KR (true) or XFI/XLAUI (false)
+ */
+void __bdk_qlm_set_lane_kr(bdk_node_t node, int qlm, int lane, bool is_kr)
+{
+ uint64_t mask = BDK_CSR_READ(node, BDK_GSERX_SCRATCH(qlm));
+ if (is_kr)
+ mask |= 1 << lane;
+ else
+ mask &= ~(1 << lane);
+ BDK_CSR_WRITE(node, BDK_GSERX_SCRATCH(qlm), mask);
+}
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.c b/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.c
new file mode 100644
index 0000000000..a7602de758
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.c
@@ -0,0 +1,398 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-gser.h"
+#include "libbdk-arch/bdk-csrs-rst.h"
+#include "libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.h"
+
+/**
+ * Delay for the specified microseconds. When this code runs on secondary nodes
+ * before full init, the normal bdk-clock functions do not work. This function
+ * serves as a replacement that runs everywhere.
+ *
+ * @param usec Microseconds to wait
+ */
+static void wait_usec(bdk_node_t node, uint64_t usec)
+{
+ const uint64_t REF_CLOCK = 50000000; /* This is currently defined to be 50Mhz */
+ uint64_t refclock = BDK_CSR_READ(node, BDK_RST_REF_CNTR);
+ uint64_t timeout = refclock + REF_CLOCK * usec / 1000000;
+ while (refclock < timeout)
+ {
+ refclock = BDK_CSR_READ(node, BDK_RST_REF_CNTR);
+ }
+}
+
+/**
+ * Errata GSER-25992 - RX EQ Default Settings Update<p>
+ * For all GSER and all lanes when not PCIe EP:
+ * set GSER()_LANE()_RX_CFG_4[CFG_RX_ERRDET_CTRL<13:8>] = 13 (decimal)
+ * set GSER()_LANE()_RX_CTLE_CTRL[PCS_SDS_RX_CTLE_BIAS_CTRL] = 3
+ * Applied when SERDES are configured for 8G and 10G.<p>
+ * Applies to:
+ * CN88XX pass 1.x
+ * Fixed in hardware:
+ * CN88XX pass 2.x
+ * CN81XX
+ * CN83XX
+ *
+ * @param node Node to apply errata fix for
+ * @param qlm QLM to apply errata fix to
+ * @param baud_mhz QLM speed in Mhz
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_errata_gser_25992(bdk_node_t node, int qlm, int baud_mhz)
+{
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ return 0;
+ if (baud_mhz < 8000)
+ return 0;
+
+ int num_lanes = 4; /* Only applies to CN88XX, where always 4 lanes */
+ for (int lane = 0; lane < num_lanes; lane++)
+ {
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_CTLE_CTRL(qlm, lane),
+ c.s.pcs_sds_rx_ctle_bias_ctrl = 3);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_CFG_4(qlm, lane),
+ c.s.cfg_rx_errdet_ctrl = 0xcd6f);
+ }
+ return 0;
+}
+
+/**
+ * (GSER-26150) 10G PHY PLL Temperature Failure
+ *
+ * 10 Gb temperature excursions can cause lock failure. Change
+ * the calibration point of the VCO at start up to shift some
+ * available range of the VCO from -deltaT direction to the
+ * +deltaT ramp direction allowing a greater range of VCO
+ * temperatures before experiencing the failure.
+ *
+ * Applies to:
+ * CN88XX pass 1.x
+ * Fix in hardware:
+ * CN88XX pass 2.x
+ * CN81XX
+ * CN83XX
+ *
+ * Only applies to QLMs running 8G and 10G
+ *
+ * @param node Node to apply errata to
+ * @param qlm QLM to apply errata fix to
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_errata_gser_26150(bdk_node_t node, int qlm, int baud_mhz)
+{
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ return 0;
+ if (baud_mhz < 8000)
+ return 0;
+
+ int num_lanes = 4; /* Only applies to CN88XX, where always 4 lanes */
+
+ BDK_CSR_INIT(gserx_cfg, node, BDK_GSERX_CFG(qlm));
+ if (gserx_cfg.s.pcie)
+ {
+ /* Update PLL parameters */
+ /* Step 1: Set GSER()_GLBL_PLL_CFG_3[PLL_VCTRL_SEL_LCVCO_VAL] = 0x2, and
+ GSER()_GLBL_PLL_CFG_3[PCS_SDS_PLL_VCO_AMP] = 0 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_GLBL_PLL_CFG_3(qlm),
+ c.s.pll_vctrl_sel_lcvco_val = 0x2;
+ c.s.pcs_sds_pll_vco_amp = 0);
+ /* Step 2: Set GSER()_GLBL_MISC_CONFIG_1[PCS_SDS_TRIM_CHP_REG] = 0x2. */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_GLBL_MISC_CONFIG_1(qlm),
+ c.s.pcs_sds_trim_chp_reg = 0x2);
+ return 0;
+ }
+
+ /* Applying this errata twice causes problems */
+ BDK_CSR_INIT(pll_cfg_3, node, BDK_GSERX_GLBL_PLL_CFG_3(qlm));
+ if (pll_cfg_3.s.pll_vctrl_sel_lcvco_val == 0x2)
+ return 0;
+
+ /* Put PHY in P2 Power-down state Need to Power down all lanes in a
+ QLM/DLM to force PHY to P2 state */
+ for (int i=0; i<num_lanes; i++)
+ {
+ /* Step 1: Set GSER()_LANE(lane_n)_PCS_CTLIFC_0[CFG_TX_PSTATE_REQ_OVERRD_VAL] = 0x3
+ Select P2 power state for Tx lane */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_0(qlm, i),
+ c.s.cfg_tx_pstate_req_ovrrd_val = 0x3);
+ /* Step 2: Set GSER()_LANE(lane_n)_PCS_CTLIFC_1[CFG_RX_PSTATE_REQ_OVERRD_VAL] = 0x3
+ Select P2 power state for Rx lane */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_1(qlm, i),
+ c.s.cfg_rx_pstate_req_ovrrd_val = 0x3);
+ /* Step 3: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN] = 1
+ Enable Tx power state override and Set
+ GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_RX_PSTATE_REQ_OVRRD_EN] = 1
+ Enable Rx power state override */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, i),
+ c.s.cfg_tx_pstate_req_ovrrd_en = 0x1;
+ c.s.cfg_rx_pstate_req_ovrrd_en = 0X1);
+ /* Step 4: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ] = 1
+ Start the CTLIFC override state machine */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, i),
+ c.s.ctlifc_ovrrd_req = 0x1);
+ }
+
+ /* Update PLL parameters */
+ /* Step 5: Set GSER()_GLBL_PLL_CFG_3[PLL_VCTRL_SEL_LCVCO_VAL] = 0x2, and
+ GSER()_GLBL_PLL_CFG_3[PCS_SDS_PLL_VCO_AMP] = 0 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_GLBL_PLL_CFG_3(qlm),
+ c.s.pll_vctrl_sel_lcvco_val = 0x2;
+ c.s.pcs_sds_pll_vco_amp = 0);
+ /* Step 6: Set GSER()_GLBL_MISC_CONFIG_1[PCS_SDS_TRIM_CHP_REG] = 0x2. */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_GLBL_MISC_CONFIG_1(qlm),
+ c.s.pcs_sds_trim_chp_reg = 0x2);
+ /* Wake up PHY and transition to P0 Power-up state to bring-up the lanes,
+ need to wake up all PHY lanes */
+ for (int i=0; i<num_lanes; i++)
+ {
+ /* Step 7: Set GSER()_LANE(lane_n)_PCS_CTLIFC_0[CFG_TX_PSTATE_REQ_OVERRD_VAL] = 0x0
+ Select P0 power state for Tx lane */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_0(qlm, i),
+ c.s.cfg_tx_pstate_req_ovrrd_val = 0x0);
+ /* Step 8: Set GSER()_LANE(lane_n)_PCS_CTLIFC_1[CFG_RX_PSTATE_REQ_OVERRD_VAL] = 0x0
+ Select P0 power state for Rx lane */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_1(qlm, i),
+ c.s.cfg_rx_pstate_req_ovrrd_val = 0x0);
+ /* Step 9: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN] = 1
+ Enable Tx power state override and Set
+ GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_RX_PSTATE_REQ_OVRRD_EN] = 1
+ Enable Rx power state override */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, i),
+ c.s.cfg_tx_pstate_req_ovrrd_en = 0x1;
+ c.s.cfg_rx_pstate_req_ovrrd_en = 0X1);
+ /* Step 10: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ] = 1
+ Start the CTLIFC override state machine */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, i),
+ c.s.ctlifc_ovrrd_req = 0x1);
+ }
+
+ /* Step 11: Wait 10 msec */
+ wait_usec(node, 10000);
+
+ /* Release Lane Tx/Rx Power state override enables. */
+ for (int i=0; i<num_lanes; i++)
+ {
+ /* Step 12: Set GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN] = 0
+ Disable Tx power state override and Set
+ GSER()_LANE(lane_n)_PCS_CTLIFC_2[CFG_RX_PSTATE_REQ_OVRRD_EN] = 0
+ Disable Rx power state override */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, i),
+ c.s.cfg_tx_pstate_req_ovrrd_en = 0x0;
+ c.s.cfg_rx_pstate_req_ovrrd_en = 0X0);
+ }
+ /* Step 13: Poll GSER()_PLL_STAT.[PLL_LOCK] = 1
+ Poll and check that PLL is locked */
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_GSERX_PLL_STAT(qlm), pll_lock, ==, 1, 10000))
+ {
+ bdk_error("QLM%d: Timeout waiting for GSERX_PLL_STAT[pll_lock]\n", qlm);
+ return -1;
+ }
+
+ /* Step 14: Poll GSER()_QLM_STAT.[RST_RDY] = 1
+ Poll and check that QLM/DLM is Ready */
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_GSERX_QLM_STAT(qlm), rst_rdy, ==, 1, 10000))
+ {
+ bdk_error("QLM%d: Timeout waiting for GSERX_QLM_STAT[rst_rdy]\n", qlm);
+ return -1;
+ }
+ return 0;
+}
+
+/**
+ * Errata (GSER-26636) 10G-KR/40G-KR - Inverted Tx Coefficient Direction Change
+ * Applied to all 10G standards (required for KR) but also applied to other
+ * standards in case software training is used.
+ * Applies to:
+ * CN88XX pass 1.x
+ * Fixed in hardware:
+ * CN88XX pass 2.x
+ * CN81XX
+ * CN83XX
+ *
+ * @param node Node to apply errata fix for
+ * @param qlm QLM to apply errata fix to
+ * @param baud_mhz QLM speed in Mhz
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_errata_gser_26636(bdk_node_t node, int qlm, int baud_mhz)
+{
+ if (!CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
+ return 0;
+
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_RX_TXDIR_CTRL_1(qlm),
+ c.s.rx_precorr_chg_dir = 1;
+ c.s.rx_tap1_chg_dir = 1);
+ return 0;
+}
+
+/**
+ * (GSER-27140) SERDES has temperature drift sensitivity in the RX EQ<p>
+ * SERDES temperature drift sensitivity in receiver. Issues have
+ * been found with the Bit Error Rate (BER) reliability of
+ * 10GBASE-KR links over the commercial temperature range (0 to 100C),
+ * especially when subjected to rapid thermal ramp stress testing.
+ * (See HRM for corresponding case temperature requirements for each speed grade.)<p>
+ * Applies to:
+ * CN88XX pass 1.x
+ * CN88XX pass 2.x
+ * CN83XX pass 1.x
+ * CN81XX pass 1.x
+ * Fixed in hardware:
+ * TBD<p>
+ * Only applies to QLMs running 10G
+ *
+ * @param node Note to apply errata fix to
+ * @param qlm QLM to apply errata fix to
+ * @param baud_mhz QLM baud rate in Mhz
+ * @param channel_loss
+ * Insertion loss at Nyquist rate (e.g. 5.125Ghz for XFI/XLAUI) in dB
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_errata_gser_27140(bdk_node_t node, int qlm, int baud_mhz, int channel_loss)
+{
+ if (baud_mhz != 10312)
+ return 0;
+
+ /* A channel loss of -1 means the loss is unknown. A short channel is
+ considered to have loss between 0 and 10 dB */
+ bool short_channel = (channel_loss >= 0) && (channel_loss <= 10);
+
+ /* I. For each GSER QLM: */
+ /* Workaround GSER-27140: */
+ /* (1) GSER-26150 = Applied by the caller */
+ /* (2) Write GSER()_LANE_VMA_FINE_CTRL_0[RX_SDLL_IQ_MAX_FINE] = 0xE */
+ /* (3) Write GSER()_LANE_VMA_FINE_CTRL_0[RX_SDLL_IQ_MIN_FINE] = 0x8 */
+ /* (4) Write GSER()_LANE_VMA_FINE_CTRL_0[RX_SDLL_IQ_STEP_FINE] = 0x2 */
+ /* (5) Write GSER()_LANE_VMA_FINE_CTRL_0[VMA_WINDOW_WAIT_FINE] = 0x5 */
+ /* (6) Write GSER()_LANE_VMA_FINE_CTRL_0[LMS_WAIT_TIME_FINE] = 0x5 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANE_VMA_FINE_CTRL_0(qlm),
+ c.s.rx_sdll_iq_max_fine = 0xE;
+ c.s.rx_sdll_iq_min_fine = 0x8;
+ c.s.rx_sdll_iq_step_fine = 0x2;
+ c.s.vma_window_wait_fine = 0x5;
+ c.s.lms_wait_time_fine = 0x5);
+ /* (7) Write GSER()_LANE_VMA_FINE_CTRL_2[RX_PRECTLE_GAIN_MAX_FINE] = 0xB */
+ /* (8) Write GSER()_LANE_VMA_FINE_CTRL_2[RX_PRECTLE_GAIN_MIN_FINE] = 0x6(long) or 0x0(short) */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANE_VMA_FINE_CTRL_2(qlm),
+ c.s.rx_prectle_gain_max_fine = 0xB;
+ c.s.rx_prectle_gain_min_fine = short_channel ? 0x0 : 0x6);
+ /* (9) Write GSER()_RX_TXDIR_CTRL_0[RX_BOOST_LO_THRES] = 0x4 */
+ /* (10) Write GSER()_RX_TXDIR_CTRL_0[RX_BOOST_HI_THRES] = 0xB */
+ /* (11) Write GSER()_RX_TXDIR_CTRL_0[RX_BOOST_HI_VAL] = 0xF */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_RX_TXDIR_CTRL_0(qlm),
+ c.s.rx_boost_lo_thrs = 0x4;
+ c.s.rx_boost_hi_thrs = 0xB;
+ c.s.rx_boost_hi_val = 0xF);
+ /* (12) Write GSER()_RX_TXDIR_CTRL_1[RX_TAP1_LO_THRS] = 0x8 */
+ /* (13) Write GSER()_RX_TXDIR_CTRL_1[RX_TAP1_HI_THRS] = 0x17 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_RX_TXDIR_CTRL_1(qlm),
+ c.s.rx_tap1_lo_thrs = 0x8;
+ c.s.rx_tap1_hi_thrs = 0x17);
+
+ /* (14) Write GSER()_EQ_WAIT_TIME[RXEQ_WAIT_CNT] = 0x6 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_EQ_WAIT_TIME(qlm),
+ c.s.rxeq_wait_cnt = 0x6);
+ /* (15) Write GSER()_RX_TXDIR_CTRL_2[RX_PRECORR_HI_THRS] = 0xC0 */
+ /* (16) Write GSER()_RX_TXDIR_CTRL_2[RX_PRECORR_LO_THRS] = 0x40 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_RX_TXDIR_CTRL_2(qlm),
+ c.s.rx_precorr_hi_thrs = 0xc0;
+ c.s.rx_precorr_lo_thrs = 0x40);
+
+ /* We can't call the normal bdk-qlm function as it uses pointers that
+ don't work when running in secondary nodes before CCPI is up */
+ int num_lanes = 4;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) || (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (qlm >= 4)))
+ num_lanes = 2;
+
+ /* II. For each GSER QLM SerDes lane: */
+ /* Establish typical values, which are already reset values in pass 2: */
+ for (int lane = 0; lane < num_lanes; lane++)
+ {
+ /* (17) For each GSER lane in the 10GBASE-KR link: */
+ /* (a) Write GSER()_LANE()_RX_VALBBD_CTRL_0[AGC_GAIN] = 0x3 */
+ /* (b) Write GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_GAIN] = 0x2 */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_VALBBD_CTRL_0(qlm, lane),
+ c.s.agc_gain = 0x3;
+ c.s.dfe_gain = 0x2);
+ }
+
+ /* III. The GSER QLM SerDes Lanes are now ready. */
+ return 0;
+}
+
+/**
+ * Errata GSER-27882 -GSER 10GBASE-KR Transmit Equalizer
+ * Training may not update PHY Tx Taps. This function is not static
+ * so we can share it with BGX KR
+ * Applies to:
+ * CN88XX pass 1.x, 2.0, 2.1
+ * Fixed in hardware:
+ * CN88XX pass 2.2 and higher
+ * CN81XX
+ * CN83XX
+ *
+ * @param node Node to apply errata fix for
+ * @param qlm QLM to apply errata fix to
+ * @param lane
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_errata_gser_27882(bdk_node_t node, int qlm, int lane)
+{
+ /* Toggle Toggle Tx Coeff Req override to force an update */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_0(qlm, lane),
+ c.s.cfg_tx_coeff_req_ovrrd_val = 1);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, lane),
+ c.s.cfg_tx_coeff_req_ovrrd_en = 1);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, lane),
+ c.s.ctlifc_ovrrd_req = 1);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, lane),
+ c.s.cfg_tx_coeff_req_ovrrd_en = 0);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PCS_CTLIFC_2(qlm, lane),
+ c.s.ctlifc_ovrrd_req = 1);
+ return 0;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-margin-cn8xxx.c b/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-margin-cn8xxx.c
new file mode 100644
index 0000000000..c970f2189e
--- /dev/null
+++ b/src/vendorcode/cavium/bdk/libbdk-hal/qlm/bdk-qlm-margin-cn8xxx.c
@@ -0,0 +1,271 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#include <bdk.h>
+#include "libbdk-arch/bdk-csrs-gser.h"
+#include "libbdk-hal/if/bdk-if.h"
+#include "libbdk-hal/bdk-qlm.h"
+#include "libbdk-hal/bdk-utils.h"
+
+/* This code is an optional part of the BDK. It is only linked in
+ if BDK_REQUIRE() needs it */
+BDK_REQUIRE_DEFINE(QLM_MARGIN);
+
+typedef union
+{
+ struct
+ {
+ uint64_t rx_os_mvalbbd_2 :16;
+ uint64_t rx_os_mvalbbd_1 :16;
+ uint64_t reserved_63_32 :32;
+
+ } s;
+ struct
+ {
+ uint64_t Qb :6;
+ uint64_t Q :6;
+ uint64_t Lb :6; // Spans the two registers
+ uint64_t L :6;
+ uint64_t qerr0 :6;
+ int64_t reserved_63_30 :34;
+ } f;
+ uint64_t u;
+} rx_os_mvalbbd_t;
+
+int __bdk_disable_ccpi_error_report = 0;
+
+static int convert_to_signed_mag(int source)
+{
+ /* Synopsis encoded sign in an unexpected way. 0=negative and 1=positive
+ So bit 5 should be 0 for negative numbers, 1 for positive numbers */
+ if (source < 0)
+ source = -source;
+ else
+ source |= 0x20;
+ return source;
+}
+
+static rx_os_mvalbbd_t get_current_settings(bdk_node_t node, int qlm, int qlm_lane)
+{
+ rx_os_mvalbbd_t mvalbbd;
+ mvalbbd.u = 0;
+
+ BDK_CSR_INIT(rx_cfg_1, node, BDK_GSERX_LANEX_RX_CFG_1(qlm, qlm_lane));
+ if (!rx_cfg_1.s.pcs_sds_rx_os_men)
+ {
+ /* Get the current settings */
+ BDK_CSR_INIT(rx_os_out_1, node, BDK_GSERX_LANEX_RX_OS_OUT_1(qlm, qlm_lane));
+ BDK_CSR_INIT(rx_os_out_2, node, BDK_GSERX_LANEX_RX_OS_OUT_2(qlm, qlm_lane));
+ BDK_CSR_INIT(rx_os_out_3, node, BDK_GSERX_LANEX_RX_OS_OUT_3(qlm, qlm_lane));
+ int qerr0 = bdk_extracts(rx_os_out_1.u, 0, 6);
+ int lb = bdk_extracts(rx_os_out_2.u, 0, 6);
+ int l = bdk_extracts(rx_os_out_2.u, 6, 6);
+ int qb = bdk_extracts(rx_os_out_3.u, 0, 6);
+ int q = bdk_extracts(rx_os_out_3.u, 6, 6);
+ /* Enable the override with the current values */
+ mvalbbd.f.Qb = convert_to_signed_mag(qb);
+ mvalbbd.f.Q = convert_to_signed_mag(q);
+ mvalbbd.f.Lb = convert_to_signed_mag(lb);
+ mvalbbd.f.L = convert_to_signed_mag(l);
+ mvalbbd.f.qerr0 = convert_to_signed_mag(qerr0);
+ }
+ else
+ {
+ BDK_CSR_INIT(mvalbbd_1, node, BDK_GSERX_LANEX_RX_OS_MVALBBD_1(qlm, qlm_lane));
+ mvalbbd.s.rx_os_mvalbbd_1 = mvalbbd_1.s.pcs_sds_rx_os_mval;
+ BDK_CSR_INIT(mvalbbd_2, node, BDK_GSERX_LANEX_RX_OS_MVALBBD_2(qlm, qlm_lane));
+ mvalbbd.s.rx_os_mvalbbd_2 = mvalbbd_2.s.pcs_sds_rx_os_mval;
+ }
+ //printf("qerr0=%d, lb=%d, l=%d, qb=%d, q=%d\n",
+ // mvalbbd.f.qerr0, mvalbbd.f.Lb, mvalbbd.f.L, mvalbbd.f.Qb, mvalbbd.f.Q);
+ return mvalbbd;
+}
+
+/**
+ * Get the current RX margining parameter
+ *
+ * @param node Node to read margin value from
+ * @param qlm QLM to read from
+ * @param qlm_lane Lane to read
+ * @param margin_type
+ * Type of margining parameter to read
+ *
+ * @return Current margining parameter value
+ */
+int64_t bdk_qlm_margin_rx_get(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_margin_t margin_type)
+{
+ rx_os_mvalbbd_t mvalbbd = get_current_settings(node, qlm, qlm_lane);
+
+ switch (margin_type)
+ {
+ case BDK_QLM_MARGIN_VERTICAL:
+ if (mvalbbd.f.Q & 0x20) /* Check if sign bit says positive */
+ return mvalbbd.f.Q & 0x1f; /* positive, strip off sign */
+ else
+ return -mvalbbd.f.Q; /* negative */
+ case BDK_QLM_MARGIN_HORIZONTAL:
+ return 0;
+ }
+ return 0;
+}
+
+/**
+ * Get the current RX margining parameter minimum value
+ *
+ * @param node Node to read margin value from
+ * @param qlm QLM to read from
+ * @param qlm_lane Lane to read
+ * @param margin_type
+ * Type of margining parameter to read
+ *
+ * @return Current margining parameter minimum value
+ */
+int64_t bdk_qlm_margin_rx_get_min(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_margin_t margin_type)
+{
+ switch (margin_type)
+ {
+ case BDK_QLM_MARGIN_VERTICAL:
+ return -31;
+ case BDK_QLM_MARGIN_HORIZONTAL:
+ return 0;
+ }
+ return 0;
+}
+
+/**
+ * Get the current RX margining parameter maximum value
+ *
+ * @param node Node to read margin value from
+ * @param qlm QLM to read from
+ * @param qlm_lane Lane to read
+ * @param margin_type
+ * Type of margining parameter to read
+ *
+ * @return Current margining parameter maximum value
+ */
+int64_t bdk_qlm_margin_rx_get_max(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_margin_t margin_type)
+{
+ switch (margin_type)
+ {
+ case BDK_QLM_MARGIN_VERTICAL:
+ return 31;
+ case BDK_QLM_MARGIN_HORIZONTAL:
+ return 0;
+ }
+ return 0;
+}
+
+/**
+ * Set the current RX margining parameter value
+ *
+ * @param node Node to set margin value on
+ * @param qlm QLM to set
+ * @param qlm_lane Lane to set
+ * @param margin_type
+ * Type of margining parameter to set
+ * @param value Value of margining parameter
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_qlm_margin_rx_set(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_margin_t margin_type, int value)
+{
+ rx_os_mvalbbd_t mvalbbd = get_current_settings(node, qlm, qlm_lane);
+
+ switch (margin_type)
+ {
+ case BDK_QLM_MARGIN_VERTICAL:
+ if (value < 0)
+ mvalbbd.f.Q = -value; /* Sign bit is zero, weird Synopsys */
+ else
+ mvalbbd.f.Q = value | 0x20; /* Sign bit is one, weird Synopsys */
+ break;
+ case BDK_QLM_MARGIN_HORIZONTAL:
+ return -1;
+ }
+
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_OS_MVALBBD_1(qlm, qlm_lane),
+ c.s.pcs_sds_rx_os_mval = mvalbbd.s.rx_os_mvalbbd_1);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_OS_MVALBBD_2(qlm, qlm_lane),
+ c.s.pcs_sds_rx_os_mval = mvalbbd.s.rx_os_mvalbbd_2);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_CFG_1(qlm, qlm_lane),
+ c.s.pcs_sds_rx_os_men = 1);
+
+ /* Disable the DFE(s), gives a better eye measurement */
+ BDK_CSR_INIT(pwr_ctrl, node, BDK_GSERX_LANEX_PWR_CTRL(qlm, qlm_lane));
+ if (!pwr_ctrl.s.rx_lctrl_ovrrd_en)
+ {
+ BDK_CSR_WRITE(node, BDK_GSERX_LANEX_RX_LOOP_CTRL(qlm, qlm_lane), 0xF1);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PWR_CTRL(qlm, qlm_lane),
+ c.s.rx_lctrl_ovrrd_en = 1);
+ }
+
+ if (qlm >= 8)
+ __bdk_disable_ccpi_error_report = 1;
+
+ return 0;
+}
+
+/**
+ * Restore the supplied RX margining parameter value as if it was never set. This
+ * disables any overrides in the SERDES need to perform margining
+ *
+ * @param node Node to restore margin value on
+ * @param qlm QLM to restore
+ * @param qlm_lane Lane to restore
+ * @param margin_type
+ * Type of margining parameter to restore
+ * @param value Value of margining parameter
+ *
+ * @return Zero on success, negative on failure
+ */
+int bdk_qlm_margin_rx_restore(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_margin_t margin_type, int value)
+{
+ BDK_CSR_INIT(rx_cfg_1, node, BDK_GSERX_LANEX_RX_CFG_1(qlm, qlm_lane));
+ /* Return if no overrides have been applied */
+ if (!rx_cfg_1.s.pcs_sds_rx_os_men)
+ return 0;
+ bdk_qlm_margin_rx_set(node, qlm, qlm_lane, margin_type, value);
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_RX_CFG_1(qlm, qlm_lane),
+ c.s.pcs_sds_rx_os_men = 0);
+ /* Enable the DFE(s) */
+ BDK_CSR_MODIFY(c, node, BDK_GSERX_LANEX_PWR_CTRL(qlm, qlm_lane),
+ c.s.rx_lctrl_ovrrd_en = 0);
+ __bdk_disable_ccpi_error_report = 0;
+ return 0;
+}
+
diff --git a/src/vendorcode/cavium/bdk/libbdk-os/bdk-init.c b/src/vendorcode/cavium/bdk/libbdk-os/bdk-init.c
index 25d6b9eed3..1594beaf3b 100644
--- a/src/vendorcode/cavium/bdk/libbdk-os/bdk-init.c
+++ b/src/vendorcode/cavium/bdk/libbdk-os/bdk-init.c
@@ -37,501 +37,7 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <bdk.h>
-#include <stdio.h>
-#include <unistd.h>
-#include "libbdk-arch/bdk-csrs-ap.h"
-#include "libbdk-arch/bdk-csrs-l2c.h"
-#include "libbdk-arch/bdk-csrs-l2c_tad.h"
-#include "libbdk-arch/bdk-csrs-mio_boot.h"
-#include "libbdk-arch/bdk-csrs-rom.h"
-#include "libbdk-arch/bdk-csrs-uaa.h"
-
-uint64_t __bdk_init_reg_x0; /* The contents of X0 when this image started */
-uint64_t __bdk_init_reg_x1; /* The contents of X1 when this image started */
-uint64_t __bdk_init_reg_pc; /* The contents of PC when this image started */
-static int64_t __bdk_alive_coremask[BDK_NUMA_MAX_NODES];
-
-/**
- * Set the baud rate on a UART
- *
- * @param uart uart to set
- * @param baudrate Baud rate (9600, 19200, 115200, etc)
- * @param use_flow_control
- * Non zero if hardware flow control should be enabled
- */
-void bdk_set_baudrate(bdk_node_t node, int uart, int baudrate, int use_flow_control)
-{
- /* 1.2.1 Initialization Sequence (Power-On/Hard/Cold Reset) */
- /* 1. Wait for IOI reset (srst_n) to deassert. */
- /* 2. Assert all resets:
- a. UAA reset: UCTL_CTL[UAA_RST] = 1
- b. UCTL reset: UCTL_CTL[UCTL_RST] = 1 */
- BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart),
- c.s.uaa_rst = 1;
- c.s.uctl_rst = 1);
-
- /* 3. Configure the HCLK:
- a. Reset the clock dividers: UCTL_CTL[H_CLKDIV_RST] = 1.
- b. Select the HCLK frequency
- i. UCTL_CTL[H_CLKDIV] = desired value,
- ii. UCTL_CTL[H_CLKDIV_EN] = 1 to enable the HCLK.
- iii. Readback UCTL_CTL to ensure the values take effect.
- c. Deassert the HCLK clock divider reset: UCTL_CTL[H_CLKDIV_RST] = 0. */
- BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart),
- c.s.h_clkdiv_sel = 3; /* Run at SCLK / 6, matches emulator */
- c.s.h_clk_byp_sel = 0;
- c.s.h_clk_en = 1);
- BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart),
- c.s.h_clkdiv_rst = 0);
-
- /* 4. Wait 20 HCLK cycles from step 3 for HCLK to start and async fifo
- to properly reset. */
- bdk_wait(200); /* Overkill */
-
- /* 5. Deassert UCTL and UAHC resets:
- a. UCTL_CTL[UCTL_RST] = 0
- b. Wait 10 HCLK cycles.
- c. UCTL_CTL[UAHC_RST] = 0
- d. You will have to wait 10 HCLK cycles before accessing any
- HCLK-only registers. */
- BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart), c.s.uctl_rst = 0);
- bdk_wait(100); /* Overkill */
- BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart), c.s.uaa_rst = 0);
- bdk_wait(100); /* Overkill */
-
- /* 6. Enable conditional SCLK of UCTL by writing UCTL_CTL[CSCLK_EN] = 1. */
- BDK_CSR_MODIFY(c, node, BDK_UAAX_UCTL_CTL(uart), c.s.csclk_en = 1);
-
- /* 7. Initialize the integer and fractional baud rate divider registers
- UARTIBRD and UARTFBRD as follows:
- a. Baud Rate Divisor = UARTCLK/(16xBaud Rate) = BRDI + BRDF
- b. The fractional register BRDF, m is calculated as integer(BRDF x 64 + 0.5)
- Example calculation:
- If the required baud rate is 230400 and hclk = 4MHz then:
- Baud Rate Divisor = (4x10^6)/(16x230400) = 1.085
- This means BRDI = 1 and BRDF = 0.085.
- Therefore, fractional part, BRDF = integer((0.085x64)+0.5) = 5
- Generated baud rate divider = 1+5/64 = 1.078 */
- uint64_t divisor_x_64 = bdk_clock_get_rate(node, BDK_CLOCK_SCLK) / (baudrate * 16 * 6 / 64);
- if (bdk_is_platform(BDK_PLATFORM_EMULATOR))
- {
- /* The hardware emulator currently fixes the uart at a fixed rate */
- divisor_x_64 = 64;
- }
- BDK_CSR_MODIFY(c, node, BDK_UAAX_IBRD(uart),
- c.s.baud_divint = divisor_x_64 >> 6);
- BDK_CSR_MODIFY(c, node, BDK_UAAX_FBRD(uart),
- c.s.baud_divfrac = divisor_x_64 & 0x3f);
-
- /* 8. Program the line control register UAA(0..1)_LCR_H and the control
- register UAA(0..1)_CR */
- BDK_CSR_MODIFY(c, node, BDK_UAAX_LCR_H(uart),
- c.s.sps = 0; /* No parity */
- c.s.wlen = 3; /* 8 bits */
- c.s.fen = 1; /* FIFOs enabled */
- c.s.stp2 = 0; /* Use one stop bit, not two */
- c.s.eps = 0; /* No parity */
- c.s.pen = 0; /* No parity */
- c.s.brk = 0); /* Don't send a break */
- BDK_CSR_MODIFY(c, node, BDK_UAAX_CR(uart),
- c.s.ctsen = use_flow_control;
- c.s.rtsen = use_flow_control;
- c.s.out1 = 1; /* Drive data carrier detect */
- c.s.rts = 0; /* Don't override RTS */
- c.s.dtr = 0; /* Don't override DTR */
- c.s.rxe = 1; /* Enable receive */
- c.s.txe = 1; /* Enable transmit */
- c.s.lbe = 0; /* Disable loopback */
- c.s.uarten = 1); /* Enable uart */
-}
-
-/**
- * First C code run when a BDK application starts. It is called by bdk-start.S.
- *
- * @param image_crc A CRC32 of the entire image before any variables might have been updated by C.
- * This should match the CRC32 in the image header.
- * @param reg_x0 The contents of the X0 register when the image started. In images loaded after
- * the boot stub, this contains a "environment" string containing "BOARD=xxx". The
- * use of this is deprecated as it has been replaced with a expandable device tree
- * in X1.
- * @param reg_x1 The contents of the X1 register when the image started. For all images after the
- * boot stub, this contains a physical address of a device tree in memory. This
- * should be used by all images to identify and configure the board we are running
- * on.
- * @param reg_pc This is the PC the code started at before relocation. This is useful for
- * the first stage to determine if it from trusted or non-trusted code.
- */
-void __bdk_init(uint32_t image_crc, uint64_t reg_x0, uint64_t reg_x1, uint64_t reg_pc) __attribute((noreturn));
-void __bdk_init(uint32_t image_crc, uint64_t reg_x0, uint64_t reg_x1, uint64_t reg_pc)
-{
- extern void __bdk_exception_current_el_sync_sp0();
- BDK_MSR(VBAR_EL3, __bdk_exception_current_el_sync_sp0);
- BDK_MSR(VBAR_EL2, __bdk_exception_current_el_sync_sp0);
- BDK_MSR(VBAR_EL1, __bdk_exception_current_el_sync_sp0);
-
- /* Use Cavium specific function to change memory to normal instead of
- device attributes. DCVA47=1 makes unmapped addresses behave as
- non-shared memory (not inner or outer shared in ARM speak) */
- bdk_ap_cvmmemctl0_el1_t cvmmemctl0_el1;
- BDK_MRS(s3_0_c11_c0_4, cvmmemctl0_el1.u);
- cvmmemctl0_el1.s.dcva47 = 1;
- BDK_MSR(s3_0_c11_c0_4, cvmmemctl0_el1.u);
-
-
- /* Setup running with no mmu */
- bdk_ap_sctlr_el3_t sctlr_el3;
- BDK_MRS(SCTLR_EL3, sctlr_el3.u);
- sctlr_el3.s.wxn = 0; /* No write perm changes */
- sctlr_el3.s.i = 1; /* Enable Icache */
- sctlr_el3.s.sa = 1; /* Enable stack alignment checking */
- sctlr_el3.s.cc = 1; /* Enable Dcache */
- sctlr_el3.s.aa = 0; /* Allow unaligned accesses */
- sctlr_el3.s.m = 0; /* Disable MMU */
- BDK_MSR(SCTLR_EL3, sctlr_el3.u);
-
- bdk_node_t node = bdk_numa_local();
- bdk_numa_set_exists(node);
-
- /* Default color, Reset scroll region and goto bottom */
- static const char BANNER_1[] = "\33[0m\33[1;r\33[100;1H"
- "\n\n\nCavium SOC\n";
- static const char BANNER_2[] = "Locking L2 cache\n";
- static const char BANNER_CRC_RIGHT[] = "PASS: CRC32 verification\n";
- static const char BANNER_CRC_WRONG[] = "FAIL: CRC32 verification\n";
- static const char BANNER_3[] = "Transferring to thread scheduler\n";
-
- BDK_MSR(TPIDR_EL3, 0);
-
- if (bdk_is_boot_core())
- {
- /* Initialize the platform */
- __bdk_platform_init();
- if (!bdk_is_platform(BDK_PLATFORM_EMULATOR) && CAVIUM_IS_MODEL(CAVIUM_CN88XX))
- {
- BDK_CSR_INIT(l2c_oci_ctl, node, BDK_L2C_OCI_CTL);
- if (l2c_oci_ctl.s.iofrcl)
- {
- /* CCPI isn't being used, so don't reset if the links change */
- BDK_CSR_WRITE(node, BDK_RST_OCX, 0);
- BDK_CSR_READ(node, BDK_RST_OCX);
- /* Force CCPI links down so they aren't trying to run while
- we're configuring the QLMs */
- __bdk_init_ccpi_early(1);
- }
- }
-
- /* AP-23192: The DAP in pass 1.0 has an issue where its state isn't cleared for
- cores in reset. Put the DAPs in reset as their associated cores are
- also in reset */
- if (!bdk_is_platform(BDK_PLATFORM_EMULATOR) && CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_0))
- BDK_CSR_WRITE(node, BDK_RST_DBG_RESET, BDK_CSR_READ(node, BDK_RST_PP_RESET));
-
- /* Enable the timer */
- BDK_MSR(CNTFRQ_EL0, BDK_GTI_RATE); /* Needed for Asim */
- bdk_clock_setup(node);
-
- /* Only setup the uarts if they haven't been already setup */
- BDK_CSR_INIT(uctl_ctl0, node, BDK_UAAX_UCTL_CTL(0));
- if (!uctl_ctl0.s.h_clk_en)
- bdk_set_baudrate(node, 0, BDK_UART_BAUDRATE, 0);
- BDK_CSR_INIT(uctl_ctl1, node, BDK_UAAX_UCTL_CTL(1));
- if (!uctl_ctl1.s.h_clk_en)
- bdk_set_baudrate(node, 1, BDK_UART_BAUDRATE, 0);
-
- __bdk_fs_init_early();
- if (BDK_SHOW_BOOT_BANNERS)
- write(1, BANNER_1, sizeof(BANNER_1)-1);
-
- /* Only lock L2 if DDR3 isn't initialized */
- if (bdk_is_platform(BDK_PLATFORM_HW) && !__bdk_is_dram_enabled(node))
- {
- if (BDK_TRACE_ENABLE_INIT)
- write(1, BANNER_2, sizeof(BANNER_2)-1);
- /* Lock the entire cache for chips with less than 4MB of
- L2/LLC. Larger chips can use the 1/4 of the cache to
- speed up DRAM init and testing */
- int lock_size = bdk_l2c_get_cache_size_bytes(node);
- if (lock_size >= (4 << 20))
- lock_size = lock_size * 3 / 4;
- bdk_l2c_lock_mem_region(node, bdk_numa_get_address(node, 0), lock_size);
- /* The locked region isn't considered dirty by L2. Do read
- read/write of each cache line to force each to be dirty. This
- is needed across the whole line to make sure the L2 dirty bits
- are all up to date */
- volatile uint64_t *ptr = bdk_phys_to_ptr(bdk_numa_get_address(node, 8));
- /* The above pointer got address 8 to avoid NULL pointer checking
- in bdk_phys_to_ptr(). Correct it here */
- ptr--;
- uint64_t *end = bdk_phys_to_ptr(bdk_numa_get_address(node, bdk_l2c_get_cache_size_bytes(node)));
- while (ptr < end)
- {
- *ptr = *ptr;
- ptr++;
- }
- /* The above locking will cause L2 to load zeros without DRAM setup.
- This will cause L2C_TADX_INT[rddislmc], which we suppress below */
- BDK_CSR_DEFINE(l2c_tadx_int, BDK_L2C_TADX_INT_W1C(0));
- l2c_tadx_int.u = 0;
- l2c_tadx_int.s.wrdislmc = 1;
- l2c_tadx_int.s.rddislmc = 1;
- l2c_tadx_int.s.rdnxm = 1;
-
- BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(0), l2c_tadx_int.u);
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) || CAVIUM_IS_MODEL(CAVIUM_CN83XX))
- {
- BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(1), l2c_tadx_int.u);
- BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(2), l2c_tadx_int.u);
- BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(3), l2c_tadx_int.u);
- }
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
- {
- BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(4), l2c_tadx_int.u);
- BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(5), l2c_tadx_int.u);
- BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(6), l2c_tadx_int.u);
- BDK_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(7), l2c_tadx_int.u);
- }
- }
-
- /* Validate the image CRC */
- extern void _start();
- uint32_t *ptr_crc32 = (uint32_t *)(_start + 16);
- uint32_t correct_crc = bdk_le32_to_cpu(*ptr_crc32);
- if (correct_crc == image_crc)
- write(1, BANNER_CRC_RIGHT, sizeof(BANNER_CRC_RIGHT) - 1);
- else
- write(1, BANNER_CRC_WRONG, sizeof(BANNER_CRC_WRONG) - 1);
-
- if (BDK_TRACE_ENABLE_INIT)
- write(1, BANNER_3, sizeof(BANNER_3)-1);
- bdk_thread_initialize();
- }
-
- /* Enable the core timer */
- BDK_MSR(CNTFRQ_EL0, BDK_GTI_RATE); /* Needed for Asim */
- bdk_ap_cntps_ctl_el1_t cntps_ctl_el1;
- cntps_ctl_el1.u = 0;
- cntps_ctl_el1.s.imask = 1;
- cntps_ctl_el1.s.enable = 1;
- BDK_MSR(CNTPS_CTL_EL1, cntps_ctl_el1.u);
-
- /* Setup an exception stack in case we crash */
- int EX_STACK_SIZE = 16384;
- void *exception_stack = malloc(EX_STACK_SIZE);
- extern void __bdk_init_exception_stack(void *ptr);
- __bdk_init_exception_stack(exception_stack + EX_STACK_SIZE);
-
- bdk_atomic_add64(&__bdk_alive_coremask[node], bdk_core_to_mask());
-
- /* Record our input registers for use later */
- __bdk_init_reg_x0 = reg_x0;
- __bdk_init_reg_x1 = reg_x1;
- __bdk_init_reg_pc = reg_pc;
- bdk_thread_first(__bdk_init_main, 0, NULL, 0);
-}
-
-/**
- * Call this function to take secondary cores out of reset and have
- * them start running threads
- *
- * @param node Node to use in a Numa setup. Can be an exact ID or a special
- * value.
- * @param coremask Cores to start. Zero is a shortcut for all.
- *
- * @return Zero on success, negative on failure.
- */
-int bdk_init_cores(bdk_node_t node, uint64_t coremask)
-{
- extern void __bdk_start_cores();
- if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
- {
- /* Write the address of the main entry point */
- BDK_TRACE(INIT, "N%d: Setting address for boot jump\n", node);
- BDK_CSR_WRITE(node, BDK_MIO_BOOT_AP_JUMP, (uint64_t)__bdk_start_cores);
- }
- else
- {
- BDK_TRACE(INIT, "N%d: Setting ROM boot code\n", node);
- /* Assembly for ROM memory:
- d508711f ic ialluis
- d503201f nop
- 58000040 ldr x0, 328 <branch_addr>
- d61f0000 br x0
- branch_addr:
- Memory is little endain, so 64 bit constants have the first
- instruction in the low word */
- BDK_CSR_WRITE(node, BDK_ROM_MEMX(0), 0xd503201fd508711f);
- BDK_CSR_WRITE(node, BDK_ROM_MEMX(1), 0xd61f000058000040);
- BDK_CSR_WRITE(node, BDK_ROM_MEMX(2), (uint64_t)__bdk_start_cores);
- }
-
- /* Choose all cores by default */
- if (coremask == 0)
- coremask = -1;
-
- /* Limit to the cores that aren't already running */
- coremask &= ~__bdk_alive_coremask[node];
-
- /* Limit to the cores that are specified in configuration menu */
- uint64_t config_coremask = bdk_config_get_int(BDK_CONFIG_COREMASK);
- if (config_coremask)
- coremask &= config_coremask;
-
- /* Limit to the cores that exist */
- coremask &= (1ull<<bdk_get_num_cores(node)) - 1;
-
- uint64_t reset = BDK_CSR_READ(node, BDK_RST_PP_RESET);
- BDK_TRACE(INIT, "N%d: Cores currently in reset: 0x%lx\n", node, reset);
- uint64_t need_reset_off = reset & coremask;
- if (need_reset_off)
- {
- BDK_TRACE(INIT, "N%d: Taking cores out of reset (0x%lx)\n", node, need_reset_off);
- BDK_CSR_WRITE(node, BDK_RST_PP_RESET, reset & ~need_reset_off);
- /* Wait for cores to finish coming out of reset */
- bdk_wait_usec(1);
- if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_RST_PP_PENDING, pend, ==, 0, 100000))
- bdk_error("Timeout wating for reset pending to clear");
- /* AP-23192: The DAP in pass 1.0 has an issue where its state isn't cleared for
- cores in reset. Put the DAPs in reset as their associated cores are
- also in reset */
- if (!bdk_is_platform(BDK_PLATFORM_EMULATOR) && CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_0))
- BDK_CSR_WRITE(node, BDK_RST_DBG_RESET, reset & ~need_reset_off);
- }
-
- BDK_TRACE(INIT, "N%d: Wait up to 1s for the cores to boot\n", node);
- uint64_t timeout = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) + bdk_clock_get_count(BDK_CLOCK_TIME);
- while ((bdk_clock_get_count(BDK_CLOCK_TIME) < timeout) && ((bdk_atomic_get64(&__bdk_alive_coremask[node]) & coremask) != coremask))
- {
- /* Tight spin, no thread schedules */
- }
-
- if ((bdk_atomic_get64(&__bdk_alive_coremask[node]) & coremask) != coremask)
- {
- bdk_error("Node %d: Some cores failed to start. Alive mask 0x%lx, requested 0x%lx\n",
- node, __bdk_alive_coremask[node], coremask);
- return -1;
- }
- BDK_TRACE(INIT, "N%d: All cores booted\n", node);
- return 0;
-}
-
-/**
- * Put cores back in reset and power them down
- *
- * @param node Node to update
- * @param coremask Each bit will be a core put in reset. Cores already in reset are unaffected
- *
- * @return Zero on success, negative on failure
- */
-int bdk_reset_cores(bdk_node_t node, uint64_t coremask)
-{
- extern void __bdk_reset_thread(int arg1, void *arg2);
-
- /* Limit to the cores that exist */
- coremask &= (1ull<<bdk_get_num_cores(node)) - 1;
-
- /* Update which cores are in reset */
- uint64_t reset = BDK_CSR_READ(node, BDK_RST_PP_RESET);
- BDK_TRACE(INIT, "N%d: Cores currently in reset: 0x%lx\n", node, reset);
- coremask &= ~reset;
- BDK_TRACE(INIT, "N%d: Cores to put into reset: 0x%lx\n", node, coremask);
-
- /* Check if everything is already done */
- if (coremask == 0)
- return 0;
-
- int num_cores = bdk_get_num_cores(node);
- for (int core = 0; core < num_cores; core++)
- {
- uint64_t my_mask = 1ull << core;
- /* Skip cores not in mask */
- if ((coremask & my_mask) == 0)
- continue;
- BDK_TRACE(INIT, "N%d: Telling core %d to go into reset\n", node, core);
- if (bdk_thread_create(node, my_mask, __bdk_reset_thread, 0, NULL, 0))
- {
- bdk_error("Failed to create thread for putting core in reset");
- continue;
- }
- /* Clear the core in the alive mask */
- bdk_atomic_fetch_and_bclr64_nosync((uint64_t*)&__bdk_alive_coremask[node], my_mask);
- }
-
- BDK_TRACE(INIT, "N%d: Waiting for all reset bits to be set\n", node);
- uint64_t timeout = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) + bdk_clock_get_count(BDK_CLOCK_TIME);
- while (bdk_clock_get_count(BDK_CLOCK_TIME) < timeout)
- {
- reset = BDK_CSR_READ(node, BDK_RST_PP_RESET);
- if ((reset & coremask) == coremask)
- break;
- bdk_thread_yield();
- }
- /* AP-23192: The DAP in pass 1.0 has an issue where its state isn't cleared for
- cores in reset. Put the DAPs in reset as their associated cores are
- also in reset */
- if (!bdk_is_platform(BDK_PLATFORM_EMULATOR) && CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_0))
- BDK_CSR_WRITE(node, BDK_RST_DBG_RESET, BDK_CSR_READ(node, BDK_RST_PP_RESET));
-
- BDK_TRACE(INIT, "N%d: Cores now in reset: 0x%lx\n", node, reset);
-
- return ((reset & coremask) == coremask) ? 0 : -1;
-}
-
-/**
- * Call this function to take secondary nodes and cores out of
- * reset and have them start running threads
- *
- * @param skip_cores If non-zero, cores are not started. Only the nodes are setup
- * @param ccpi_sw_gbaud
- * If CCPI is in software mode, this is the speed the CCPI QLMs will be configured
- * for
- *
- * @return Zero on success, negative on failure.
- */
-int bdk_init_nodes(int skip_cores, int ccpi_sw_gbaud)
-{
- int result = 0;
- int do_oci_init = (__bdk_init_ccpi_links != NULL);
-
- /* Only init OCI/CCPI on chips that support it */
- do_oci_init &= CAVIUM_IS_MODEL(CAVIUM_CN88XX);
-
- /* Check that the BDK config says multi-node is enabled */
- if (bdk_config_get_int(BDK_CONFIG_MULTI_NODE) == 0)
- do_oci_init = 0;
-
- /* Simulation under Asim is a special case. Multi-node is simulaoted, but
- not the details of the low level link */
- if (do_oci_init && bdk_is_platform(BDK_PLATFORM_ASIM))
- {
- bdk_numa_set_exists(0);
- bdk_numa_set_exists(1);
- /* Skip the rest in simulation */
- do_oci_init = 0;
- }
-
- if (do_oci_init)
- {
- if (__bdk_init_ccpi_links(ccpi_sw_gbaud) == 0)
- {
- /* Don't run node init if L2C_OCI_CTL shows that it has already
- been done */
- BDK_CSR_INIT(l2c_oci_ctl, bdk_numa_local(), BDK_L2C_OCI_CTL);
- if (l2c_oci_ctl.s.enaoci == 0)
- result |= __bdk_init_ccpi_multinode();
- }
- }
-
- /* Start cores on all node unless it was disabled */
- if (!skip_cores)
- {
- for (bdk_node_t node=0; node<BDK_NUMA_MAX_NODES; node++)
- {
- if (bdk_numa_exists(node))
- result |= bdk_init_cores(node, 0);
- }
- }
- return result;
-}
+#include <libbdk-os/bdk-init.h>
/**
* Get the coremask of the cores actively running the BDK. Doesn't count cores
@@ -543,7 +49,7 @@ int bdk_init_nodes(int skip_cores, int ccpi_sw_gbaud)
*/
uint64_t bdk_get_running_coremask(bdk_node_t node)
{
- return __bdk_alive_coremask[node];
+ return 0x1;
}
/**
@@ -556,6 +62,6 @@ uint64_t bdk_get_running_coremask(bdk_node_t node)
*/
int bdk_get_num_running_cores(bdk_node_t node)
{
- return __builtin_popcountl(bdk_get_running_coremask(node));
+ return bdk_pop(bdk_get_running_coremask(node));
}
diff --git a/src/vendorcode/cavium/bdk/libbdk-os/bdk-thread.c b/src/vendorcode/cavium/bdk/libbdk-os/bdk-thread.c
deleted file mode 100644
index df1d02864b..0000000000
--- a/src/vendorcode/cavium/bdk/libbdk-os/bdk-thread.c
+++ /dev/null
@@ -1,384 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-#include <bdk.h>
-#include <stdio.h>
-#include <malloc.h>
-
-#define STACK_CANARY 0x0BADBADBADBADBADull
-
-typedef struct bdk_thread
-{
- struct bdk_thread *next;
- uint64_t coremask;
- uint64_t gpr[32]; /* Reg 31 is SP */
- struct _reent lib_state;
- uint64_t stack_canary;
- uint64_t stack[0];
-} bdk_thread_t;
-
-typedef struct
-{
- bdk_thread_t* head;
- bdk_thread_t* tail;
- bdk_spinlock_t lock;
- int64_t __padding1[16-3]; /* Stats in different cache line for speed */
- int64_t stat_num_threads;
- int64_t stat_no_schedulable_threads;
- int64_t stat_next_calls;
- int64_t stat_next_walks;
- int64_t __padding2[16-4];
-} bdk_thread_node_t;
-
-static bdk_thread_node_t bdk_thread_node[BDK_NUMA_MAX_NODES];
-
-extern void __bdk_thread_switch(bdk_thread_t* next_context, int delete_old);
-
-/**
- * Main thread body for all threads
- *
- * @param func User function to call
- * @param arg0 First argument to the user function
- * @param arg1 Second argument to the user function
- */
-static void __bdk_thread_body(bdk_thread_func_t func, int arg0, void *arg1)
-{
- func(arg0, arg1);
- bdk_thread_destroy();
-}
-
-
-/**
- * Initialize the BDK thread library
- *
- * @return Zero on success, negative on failure
- */
-int bdk_thread_initialize(void)
-{
- bdk_zero_memory(bdk_thread_node, sizeof(bdk_thread_node));
- _REENT_INIT_PTR(&__bdk_thread_global_reent);
- return 0;
-}
-
-static bdk_thread_t *__bdk_thread_next(void)
-{
- bdk_thread_node_t *t_node = &bdk_thread_node[bdk_numa_local()];
- uint64_t coremask = bdk_core_to_mask();
-
- bdk_atomic_add64_nosync(&t_node->stat_next_calls, 1);
- bdk_thread_t *prev = NULL;
- bdk_thread_t *next = t_node->head;
- int walks = 0;
- while (next && !(next->coremask & coremask))
- {
- prev = next;
- next = next->next;
- walks++;
- }
- if (walks)
- bdk_atomic_add64_nosync(&t_node->stat_next_walks, walks);
-
- if (next)
- {
- if (t_node->tail == next)
- t_node->tail = prev;
- if (prev)
- prev->next = next->next;
- else
- t_node->head = next->next;
- next->next = NULL;
- }
- else
- bdk_atomic_add64_nosync(&t_node->stat_no_schedulable_threads, 1);
-
- return next;
-}
-
-/**
- * Yield the current thread and run a new one
- */
-void bdk_thread_yield(void)
-{
- if (BDK_DBG_MAGIC_ENABLE && (bdk_numa_local() == bdk_numa_master()))
- bdk_dbg_check_magic();
- bdk_thread_node_t *t_node = &bdk_thread_node[bdk_numa_local()];
- bdk_thread_t *current;
- BDK_MRS_NV(TPIDR_EL3, current);
-
- /* Yield can be called without a thread context during core init. The
- cores call bdk_wait_usec(), which yields. In this case yielding
- does nothing */
- if (bdk_unlikely(!current))
- return;
-
- if (bdk_unlikely(current->stack_canary != STACK_CANARY))
- bdk_fatal("bdk_thread_yield() detected a stack overflow\n");
-
- if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
- bdk_sso_process_work();
-
- if (t_node->head == NULL)
- return;
-
- bdk_spinlock_lock(&t_node->lock);
-
- /* Find the first thread that can run on this core */
- bdk_thread_t *next = __bdk_thread_next();
-
- /* If next is NULL then there are no other threads ready to run and we
- will continue without doing anything */
- if (next)
- {
- __bdk_thread_switch(next, 0);
- /* Unlock performed in __bdk_thread_switch_complete */
- return;
- }
- bdk_spinlock_unlock(&t_node->lock);
-}
-
-
-/**
- * Create a new thread and return it. The thread will not be scheduled
- * as it isn't put in the thread list.
- *
- * @param coremask Mask of cores the thread can run on. Each set bit is an allowed
- * core. Zero and -1 are both shortcuts for all cores.
- * @param func Function to run as a thread
- * @param arg0 First argument to the function
- * @param arg1 Second argument to the function
- * @param stack_size Stack size for the new thread. Set to zero for the system default.
- *
- * @return Thread or NULL on failure
- */
-static void *__bdk_thread_create(uint64_t coremask, bdk_thread_func_t func, int arg0, void *arg1, int stack_size)
-{
- bdk_thread_t *thread;
- if (!stack_size)
- stack_size = BDK_THREAD_DEFAULT_STACK_SIZE;
-
- thread = memalign(16, sizeof(bdk_thread_t) + stack_size);
- if (thread == NULL)
- {
- bdk_error("Unable to allocate memory for new thread\n");
- return NULL;
- }
- memset(thread, 0, sizeof(bdk_thread_t) + stack_size);
- if (coremask == 0)
- coremask = -1;
- thread->coremask = coremask;
- thread->gpr[0] = (uint64_t)func; /* x0 = Argument 0 to __bdk_thread_body */
- thread->gpr[1] = arg0; /* x1 = Argument 1 to __bdk_thread_body */
- thread->gpr[2] = (uint64_t)arg1; /* x2 = Argument 2 to __bdk_thread_body */
- thread->gpr[29] = 0; /* x29 = Frame pointer */
- thread->gpr[30] = (uint64_t)__bdk_thread_body; /* x30 = Link register */
- thread->gpr[31] = (uint64_t)thread->stack + stack_size; /* x31 = Stack pointer */
- if (thread->gpr[31] & 0xf)
- bdk_fatal("Stack not aligned 0x%lx\n", thread->gpr[31]);
- _REENT_INIT_PTR(&thread->lib_state);
- extern void __sinit(struct _reent *);
- __sinit(&thread->lib_state);
- thread->stack_canary = STACK_CANARY;
- thread->next = NULL;
- return thread;
-}
-
-
-/**
- * Create a new thread. The thread may be scheduled to any of the
- * cores supplied in the coremask. Note that a single thread is
- * created and may only run on one core at a time. The thread may
- * not start executing until the next yield call if all cores in
- * the coremask are currently busy.
- *
- * @param node Node to use in a Numa setup. Can be an exact ID or a
- * special value.
- * @param coremask Mask of cores the thread can run on. Each set bit is an allowed
- * core. Zero and -1 are both shortcuts for all cores.
- * @param func Function to run as a thread
- * @param arg0 First argument to the function
- * @param arg1 Second argument to the function
- * @param stack_size Stack size for the new thread. Set to zero for the system default.
- *
- * @return Zero on success, negative on failure
- */
-int bdk_thread_create(bdk_node_t node, uint64_t coremask, bdk_thread_func_t func, int arg0, void *arg1, int stack_size)
-{
- bdk_thread_node_t *t_node = &bdk_thread_node[node];
- bdk_thread_t *thread = __bdk_thread_create(coremask, func, arg0, arg1, stack_size);
- if (thread == NULL)
- return -1;
-
- bdk_atomic_add64_nosync(&t_node->stat_num_threads, 1);
- bdk_spinlock_lock(&t_node->lock);
- if (t_node->tail)
- t_node->tail->next = thread;
- else
- t_node->head = thread;
- t_node->tail = thread;
- bdk_spinlock_unlock(&t_node->lock);
- BDK_SEV;
- return 0;
-}
-
-
-/**
- * Destroy the currently running thread. This never returns.
- */
-void bdk_thread_destroy(void)
-{
- bdk_thread_node_t *t_node = &bdk_thread_node[bdk_numa_local()];
- bdk_thread_t *current;
- BDK_MRS_NV(TPIDR_EL3, current);
- if (bdk_unlikely(!current))
- bdk_fatal("bdk_thread_destroy() called without thread context\n");
- if (bdk_unlikely(current->stack_canary != STACK_CANARY))
- bdk_fatal("bdk_thread_destroy() detected a stack overflow\n");
-
- fflush(NULL);
- bdk_atomic_add64_nosync(&t_node->stat_num_threads, -1);
-
- while (1)
- {
- if (BDK_DBG_MAGIC_ENABLE && (bdk_numa_local() == bdk_numa_master()))
- bdk_dbg_check_magic();
- if (t_node->head)
- {
- bdk_spinlock_lock(&t_node->lock);
- /* Find the first thread that can run on this core */
- bdk_thread_t *next = __bdk_thread_next();
-
- /* If next is NULL then there are no other threads ready to run and we
- will continue without doing anything */
- if (next)
- {
- __bdk_thread_switch(next, 1);
- bdk_fatal("bdk_thread_destroy() should never get here\n");
- }
- bdk_spinlock_unlock(&t_node->lock);
- }
- if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
- bdk_sso_process_work();
- BDK_WFE;
- }
-}
-
-struct _reent __bdk_thread_global_reent;
-struct _reent *__bdk_thread_getreent(void)
-{
- bdk_thread_t *current;
- BDK_MRS_NV(TPIDR_EL3, current);
- if (current)
- return &current->lib_state;
- else
- return &__bdk_thread_global_reent;
-}
-
-void __bdk_thread_switch_complete(bdk_thread_t* old_context, int delete_old)
-{
- bdk_thread_node_t *t_node = &bdk_thread_node[bdk_numa_local()];
- if (bdk_unlikely(delete_old))
- {
- bdk_spinlock_unlock(&t_node->lock);
- free(old_context);
- }
- else
- {
- if (bdk_likely(old_context))
- {
- if (t_node->tail)
- t_node->tail->next = old_context;
- else
- t_node->head = old_context;
- t_node->tail = old_context;
- }
- bdk_spinlock_unlock(&t_node->lock);
- if (bdk_likely(old_context))
- BDK_SEV;
- }
-}
-
-
-/**
- * Called to create the initial thread for a CPU. Must be called
- * once for each CPU.
- *
- * @param func Function to run as new thread. It is guaranteed that this will
- * be the next thread run by the core.
- * @param arg0 First thread argument
- * @param arg1 Second thread argument
- * @param stack_size Initial stack size, or zero for the default
- */
-void bdk_thread_first(bdk_thread_func_t func, int arg0, void *arg1, int stack_size)
-{
- bdk_thread_node_t *t_node = &bdk_thread_node[bdk_numa_local()];
- void *thread = __bdk_thread_create(bdk_core_to_mask(), func, arg0, arg1, stack_size);
- if (thread)
- {
- bdk_atomic_add64_nosync(&t_node->stat_num_threads, 1);
- bdk_spinlock_lock(&t_node->lock);
- __bdk_thread_switch(thread, 0);
- }
- bdk_fatal("Create of __bdk_init_main thread failed\n");
-}
-
-/**
- * Display statistics about the number of threads and scheduling
- */
-void bdk_thread_show_stats()
-{
- for (bdk_node_t node = BDK_NODE_0; node < BDK_NUMA_MAX_NODES; node++)
- {
- if (!bdk_numa_exists(node))
- continue;
- bdk_thread_node_t *t_node = &bdk_thread_node[node];
- printf("Node %d\n", node);
- printf(" Active threads: %ld\n", t_node->stat_num_threads);
- printf(" Schedule checks: %ld\n", t_node->stat_next_calls);
- int64_t div = t_node->stat_next_calls;
- if (!div)
- div = 1;
- printf(" Average walk depth: %ld\n",
- t_node->stat_next_walks / div);
- printf(" Not switching: %ld (%ld%%)\n",
- t_node->stat_no_schedulable_threads,
- t_node->stat_no_schedulable_threads * 100 / div);
- bdk_atomic_set64(&t_node->stat_next_calls, 0);
- bdk_atomic_set64(&t_node->stat_next_walks, 0);
- bdk_atomic_set64(&t_node->stat_no_schedulable_threads, 0);
- }
-}
diff --git a/src/vendorcode/cavium/bdk/libbdk-trust/bdk-trust.c b/src/vendorcode/cavium/bdk/libbdk-trust/bdk-trust.c
index 27c3294479..52dd702e3b 100644
--- a/src/vendorcode/cavium/bdk/libbdk-trust/bdk-trust.c
+++ b/src/vendorcode/cavium/bdk/libbdk-trust/bdk-trust.c
@@ -37,225 +37,8 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <bdk.h>
-#include "libbdk-arch/bdk-csrs-fusf.h"
-#include "libbdk-arch/bdk-csrs-rom.h"
-/* The define BDK_TRUST_HARD_BLOW_NV controls whether the BDK will
- hard blow the secure NV counter on boot. This is needed for a
- production system, but can be dangerous in a development
- environment. The default value of 0 is to prevent bricking of
- chips due to CSIB[NVCOUNT] mistakes. BDK_TRUST_HARD_BLOW_NV must
- be changed to a 1 for production. The code below will display a
- warning if BDK_TRUST_HARD_BLOW_NV=0 in a trusted boot to remind
- you */
-#define BDK_TRUST_HARD_BLOW_NV 0
-
-/* The CSIB used to boot will be stored here by bsk-start.S */
-union bdk_rom_csib_s __bdk_trust_csib __attribute__((section("init")));
-static bdk_trust_level_t __bdk_trust_level = BDK_TRUST_LEVEL_BROKEN;
-
-/**
- * Update the fused secure NV counter to reflect the CSIB[NVCOUNT] value. In
- * production systems, be sure to set BDK_TRUST_HARD_BLOW_NV=1.
- */
-static void __bdk_program_nv_counter(void)
-{
- int hw_nv = bdk_trust_get_nv_counter();
- int csib_nv = __bdk_trust_csib.s.nvcnt;
-
- if (!BDK_TRUST_HARD_BLOW_NV)
- {
- printf("\33[1m"); /* Bold */
- bdk_warn("\n");
- bdk_warn("********************************************************\n");
- bdk_warn("* Configured for soft blow of secure NV counter. This\n");
- bdk_warn("* build is not suitable for production trusted boot.\n");
- bdk_warn("********************************************************\n");
- bdk_warn("\n");
- printf("\33[0m"); /* Normal */
- }
-
- /* Check if the CSIB NV counter is less than the HW fused values.
- This means the image is an old rollback. Refuse to run */
- if (csib_nv < hw_nv)
- bdk_fatal("CSIB[NVCOUNT] is less than FUSF_CTL[ROM_T_CNT]. Image rollback not allowed\n");
- /* If the CSIB NV counter matches the HW fuses, everything is
- good */
- if (csib_nv == hw_nv)
- return;
- /* CSIB NV counter is larger than the HW fuses. We must blow
- fuses to move the hardware counter forward, protecting from
- image rollback */
- if (BDK_TRUST_HARD_BLOW_NV)
- {
- BDK_TRACE(INIT, "Trust: Hard blow secure NV counter to %d\n", csib_nv);
- uint64_t v = 1ull << BDK_FUSF_FUSE_NUM_E_ROM_T_CNTX(csib_nv - 1);
- bdk_fuse_field_hard_blow(bdk_numa_master(), BDK_FUSF_FUSE_NUM_E_FUSF_LCK, v, 0);
- }
- else
- {
- BDK_TRACE(INIT, "Trust: Soft blow secure NV counter to %d\n", csib_nv);
- bdk_fuse_field_soft_blow(bdk_numa_master(), BDK_FUSF_FUSE_NUM_E_ROM_T_CNTX(csib_nv - 1));
- }
-}
-
-/**
- * Called by boot stub (TBL1FW) to initialize the state of trust
- */
-void __bdk_trust_init(void)
-{
- extern uint64_t __bdk_init_reg_pc; /* The contents of PC when this image started */
- const bdk_node_t node = bdk_numa_local();
- volatile uint64_t *huk = bdk_phys_to_ptr(bdk_numa_get_address(node, BDK_FUSF_HUKX(0)));
-
- /* Non-trusted boot address */
- if (__bdk_init_reg_pc == 0x120000)
- {
- __bdk_trust_level = BDK_TRUST_LEVEL_NONE;
- if (huk[0] | huk[1])
- {
- BDK_TRACE(INIT, "Trust: Initial image, Non-trusted boot with HUK\n");
- goto fail_trust;
- }
- else
- {
- BDK_TRACE(INIT, "Trust: Initial image, Non-trusted boot without HUK\n");
- goto skip_trust;
- }
- }
-
- if (__bdk_init_reg_pc != 0x150000)
- {
- /* Not the first image */
- BDK_CSR_INIT(rst_boot, node, BDK_RST_BOOT);
- if (!rst_boot.s.trusted_mode)
- {
- __bdk_trust_level = BDK_TRUST_LEVEL_NONE;
- BDK_TRACE(INIT, "Trust: Secondary image, non-trusted boot\n");
- goto skip_trust;
- }
- int csibsize = 0;
- const union bdk_rom_csib_s *csib = bdk_config_get_blob(&csibsize, BDK_CONFIG_TRUST_CSIB);
- if (!csib)
- {
- __bdk_trust_level = BDK_TRUST_LEVEL_NONE;
- BDK_TRACE(INIT, "Trust: Secondary image, non-trusted boot\n");
- goto skip_trust;
- }
- if (csibsize != sizeof(__bdk_trust_csib))
- {
- BDK_TRACE(INIT, "Trust: Secondary image, Trusted boot with corrupt CSIB, trust broken\n");
- goto fail_trust;
- }
- /* Record our trust level */
- switch (csib->s.crypt)
- {
- case 0:
- __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED;
- BDK_TRACE(INIT, "Trust: Secondary image, Trused boot, no encryption\n");
- goto success_trust;
- case 1:
- __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED_SSK;
- BDK_TRACE(INIT, "Trust: Secondary image, Trused boot, SSK encryption\n");
- goto success_trust;
- case 2:
- __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED_BSSK;
- BDK_TRACE(INIT, "Trust: Secondary image, Trused boot, BSSK encryption\n");
- goto success_trust;
- default:
- __bdk_trust_level = BDK_TRUST_LEVEL_BROKEN;
- BDK_TRACE(INIT, "Trust: Secondary image, Trusted boot, Corrupt CSIB[crypt], trust broken\n");
- goto fail_trust;
- }
- }
-
- /* Copy the Root of Trust public key out of the CSIB */
- volatile uint64_t *rot_pub_key = bdk_key_alloc(node, 64);
- if (!rot_pub_key)
- {
- __bdk_trust_level = BDK_TRUST_LEVEL_BROKEN;
- BDK_TRACE(INIT, "Trust: Failed to allocate ROT memory, trust broken\n");
- goto fail_trust;
- }
- rot_pub_key[0] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk0);
- rot_pub_key[1] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk1);
- rot_pub_key[2] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk2);
- rot_pub_key[3] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk3);
- rot_pub_key[4] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk4);
- rot_pub_key[5] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk5);
- rot_pub_key[6] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk6);
- rot_pub_key[7] = bdk_le64_to_cpu(__bdk_trust_csib.s.rotpk7);
- bdk_config_set_int(bdk_ptr_to_phys((void*)rot_pub_key), BDK_CONFIG_TRUST_ROT_ADDR);
- BDK_TRACE(INIT, "Trust: ROT %016lx %016lx %016lx %016lx %016lx %016lx %016lx %016lx\n",
- bdk_cpu_to_be64(rot_pub_key[0]), bdk_cpu_to_be64(rot_pub_key[1]),
- bdk_cpu_to_be64(rot_pub_key[2]), bdk_cpu_to_be64(rot_pub_key[3]),
- bdk_cpu_to_be64(rot_pub_key[4]), bdk_cpu_to_be64(rot_pub_key[5]),
- bdk_cpu_to_be64(rot_pub_key[6]), bdk_cpu_to_be64(rot_pub_key[7]));
-
- /* Update the secure NV counter with the value in the CSIB */
- __bdk_program_nv_counter();
-
- /* Create the BSSK */
- if (huk[0] | huk[1])
- {
- uint64_t iv[2] = {0, 0};
- volatile uint64_t *bssk = bdk_key_alloc(node, 16);
- if (!bssk)
- {
- __bdk_trust_level = BDK_TRUST_LEVEL_BROKEN;
- BDK_TRACE(INIT, "Trust: Failed to allocate BSSK memory, trust broken\n");
- goto fail_trust;
- }
- BDK_TRACE(INIT, "Trust: Calculating BSSK\n");
- uint64_t tmp_bssk[2];
- tmp_bssk[0] = __bdk_trust_csib.s.fs0;
- tmp_bssk[1] = __bdk_trust_csib.s.fs1;
- bdk_aes128cbc_decrypt((void*)huk, (void*)tmp_bssk, 16, iv);
- bssk[0] = tmp_bssk[0];
- bssk[1] = tmp_bssk[1];
- tmp_bssk[0] = 0;
- tmp_bssk[1] = 0;
- bdk_config_set_int(bdk_ptr_to_phys((void*)bssk), BDK_CONFIG_TRUST_BSSK_ADDR);
- //BDK_TRACE(INIT, "Trust: BSSK %016lx %016lx\n", bdk_cpu_to_be64(bssk[0]), bdk_cpu_to_be64(bssk[1]));
- }
-
- /* Record our trust level */
- switch (__bdk_trust_csib.s.crypt)
- {
- case 0:
- __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED;
- BDK_TRACE(INIT, "Trust: Trused boot, no encryption\n");
- break;
- case 1:
- __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED_SSK;
- BDK_TRACE(INIT, "Trust: Trused boot, SSK encryption\n");
- break;
- case 2:
- __bdk_trust_level = BDK_TRUST_LEVEL_SIGNED_BSSK;
- BDK_TRACE(INIT, "Trust: Trused boot, BSSK encryption\n");
- break;
- default:
- __bdk_trust_level = BDK_TRUST_LEVEL_BROKEN;
- goto fail_trust;
- }
-
- /* We started at the trusted boot address, CSIB should be
- valid */
- bdk_config_set_blob(sizeof(__bdk_trust_csib), &__bdk_trust_csib, BDK_CONFIG_TRUST_CSIB);
-success_trust:
- bdk_signed_load_public();
- return;
-
-fail_trust:
- /* Hide secrets */
- BDK_CSR_MODIFY(c, node, BDK_RST_BOOT,
- c.s.dis_huk = 1);
- BDK_TRACE(INIT, "Trust: Secrets Hidden\n");
-skip_trust:
- /* Erase CSIB as it is invalid */
- memset(&__bdk_trust_csib, 0, sizeof(__bdk_trust_csib));
- bdk_config_set_blob(0, NULL, BDK_CONFIG_TRUST_CSIB);
-}
+#include <libbdk-trust/bdk-trust.h>
/**
* Returns the current level of trust. Must be called after
@@ -265,22 +48,7 @@ skip_trust:
*/
bdk_trust_level_t bdk_trust_get_level(void)
{
- return __bdk_trust_level;
-}
-
-/**
- * Return the current secure NV counter stored in the fuses
- *
- * @return NV counter (0-31)
- */
-int bdk_trust_get_nv_counter(void)
-{
- /* Count leading zeros in FUSF_CTL[ROM_T_CNT] to dermine the
- hardware NV value */
- BDK_CSR_INIT(fusf_ctl, bdk_numa_master(), BDK_FUSF_CTL);
- int hw_nv = 0;
- if (fusf_ctl.s.rom_t_cnt)
- hw_nv = 32 - __builtin_clz(fusf_ctl.s.rom_t_cnt);
- return hw_nv;
+ // FIXME
+ return BDK_TRUST_LEVEL_NONE;
}
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-env.c b/src/vendorcode/cavium/bdk/libdram/dram-env.c
index f25e6bdb26..61cce78d3c 100644
--- a/src/vendorcode/cavium/bdk/libdram/dram-env.c
+++ b/src/vendorcode/cavium/bdk/libdram/dram-env.c
@@ -39,6 +39,9 @@
#include <bdk.h>
#include "dram-internal.h"
+#include <string.h>
+#include <lame_string.h>
+
const char* lookup_env_parameter(const char *format, ...)
{
const char *s;
@@ -55,7 +58,7 @@ const char* lookup_env_parameter(const char *format, ...)
{
value = strtoul(s, NULL, 0);
error_print("Parameter found in environment: %s = \"%s\" 0x%lx (%ld)\n",
- buffer, s, value, value);
+ buffer, s, value, value);
}
return s;
}
@@ -76,7 +79,7 @@ const char* lookup_env_parameter_ull(const char *format, ...)
{
value = strtoull(s, NULL, 0);
error_print("Parameter found in environment: %s = 0x%016llx\n",
- buffer, value);
+ buffer, value);
}
return s;
}
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-env.h b/src/vendorcode/cavium/bdk/libdram/dram-env.h
index 0f100e1b25..f190c29290 100644
--- a/src/vendorcode/cavium/bdk/libdram/dram-env.h
+++ b/src/vendorcode/cavium/bdk/libdram/dram-env.h
@@ -42,7 +42,9 @@
* Intenral use only.
*/
-
+#ifndef __DRAM_ENV_H_
+#define __DRAM_ENV_H_
extern const char *lookup_env_parameter(const char *format, ...) __attribute__ ((format(printf, 1, 2)));
extern const char *lookup_env_parameter_ull(const char *format, ...) __attribute__ ((format(printf, 1, 2)));
+#endif
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.c b/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.c
index edb42312f1..1c988088e9 100644
--- a/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.c
+++ b/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.c
@@ -41,6 +41,14 @@
#include "libbdk-arch/bdk-csrs-mio_fus.h"
#include "dram-internal.h"
+#include <stdlib.h>
+#include <string.h>
+#include <libbdk-hal/bdk-config.h>
+#include <libbdk-hal/bdk-l2c.h>
+#include <libbdk-hal/bdk-rng.h>
+#include <libbdk-trust/bdk-trust.h>
+#include <lame_string.h>
+
#define WODT_MASK_2R_1S 1 // FIXME: did not seem to make much difference with #152 1-slot?
#define DESKEW_RODT_CTL 1
@@ -130,37 +138,37 @@ Get_Deskew_Settings(bdk_node_t node, int ddr_interface_num, deskew_data_t *dskda
memset(dskdat, 0, sizeof(*dskdat));
- BDK_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
- phy_ctl.s.dsk_dbg_clk_scaler = 3);
+ BDK_CSR_MODIFY(_phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
+ _phy_ctl.s.dsk_dbg_clk_scaler = 3);
for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) {
bit_index = 0;
- for (bit_num = 0; bit_num <= bit_end; ++bit_num) { // NOTE: this is for pass 2.x
+ for (bit_num = 0; bit_num <= bit_end; ++bit_num) { // NOTE: this is for pass 2.x
- if (bit_num == 4) continue;
- if ((bit_num == 5) && is_t88p2) continue; // NOTE: this is for pass 2.x
+ if (bit_num == 4) continue;
+ if ((bit_num == 5) && is_t88p2) continue; // NOTE: this is for pass 2.x
// set byte lane and bit to read
- BDK_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
- (phy_ctl.s.dsk_dbg_bit_sel = bit_num,
- phy_ctl.s.dsk_dbg_byte_sel = byte_lane));
+ BDK_CSR_MODIFY(_phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
+ (_phy_ctl.s.dsk_dbg_bit_sel = bit_num,
+ _phy_ctl.s.dsk_dbg_byte_sel = byte_lane));
// start read sequence
- BDK_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
- phy_ctl.s.dsk_dbg_rd_start = 1);
+ BDK_CSR_MODIFY(_phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
+ _phy_ctl.s.dsk_dbg_rd_start = 1);
// poll for read sequence to complete
do {
phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(ddr_interface_num));
} while (phy_ctl.s.dsk_dbg_rd_complete != 1);
-
+
// record the data
dskdat->bytes[byte_lane].bits[bit_index] = phy_ctl.s.dsk_dbg_rd_data & 0x3ff;
bit_index++;
} /* for (bit_num = 0; bit_num <= bit_end; ++bit_num) */
} /* for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) */
-
+
return;
}
@@ -181,7 +189,7 @@ Display_Deskew_Data(bdk_node_t node, int ddr_interface_num,
if (print_enable) {
VB_PRT(print_enable, "N%d.LMC%d: Deskew Data: Bit => :",
node, ddr_interface_num);
- for (bit_num = 7; bit_num >= 0; --bit_num)
+ for (bit_num = 7; bit_num >= 0; --bit_num)
VB_PRT(print_enable, " %3d ", bit_num);
VB_PRT(print_enable, "\n");
}
@@ -194,8 +202,8 @@ Display_Deskew_Data(bdk_node_t node, int ddr_interface_num,
for (bit_num = 7; bit_num >= 0; --bit_num) {
- flags = dskdat->bytes[byte_lane].bits[bit_num] & 7;
- deskew = dskdat->bytes[byte_lane].bits[bit_num] >> 3;
+ flags = dskdat->bytes[byte_lane].bits[bit_num] & 7;
+ deskew = dskdat->bytes[byte_lane].bits[bit_num] >> 3;
if (print_enable)
VB_PRT(print_enable, " %3d %c", deskew, fc[flags^1]);
@@ -206,7 +214,7 @@ Display_Deskew_Data(bdk_node_t node, int ddr_interface_num,
VB_PRT(print_enable, "\n");
} /* for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) */
-
+
return;
}
@@ -281,7 +289,7 @@ Validate_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_
if (print_enable) {
VB_PRT(print_enable, "N%d.LMC%d: Deskew Settings: Bit => :",
node, ddr_interface_num);
- for (bit_num = 7; bit_num >= 0; --bit_num)
+ for (bit_num = 7; bit_num >= 0; --bit_num)
VB_PRT(print_enable, " %3d ", bit_num);
VB_PRT(print_enable, "\n");
}
@@ -292,10 +300,10 @@ Validate_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_
node, ddr_interface_num, byte_lane,
(print_enable >= VBL_TME) ? "FINAL" : " ");
- nib_min[0] = 127; nib_min[1] = 127;
- nib_max[0] = 0; nib_max[1] = 0;
- nib_unl[0] = 0; nib_unl[1] = 0;
- //nib_sat[0] = 0; nib_sat[1] = 0;
+ nib_min[0] = 127; nib_min[1] = 127;
+ nib_max[0] = 0; nib_max[1] = 0;
+ nib_unl[0] = 0; nib_unl[1] = 0;
+ //nib_sat[0] = 0; nib_sat[1] = 0;
#if LOOK_FOR_STUCK_BYTE
bl_mask[0] = bl_mask[1] = 0;
@@ -311,14 +319,14 @@ Validate_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_
bit_last = bit_start;
}
- for (bit_num = bit_last; bit_num >= 0; --bit_num) { // NOTE: this is for pass 2.x
- if (bit_num == 4) continue;
- if ((bit_num == 5) && is_t88p2) continue; // NOTE: this is for pass 2.x
+ for (bit_num = bit_last; bit_num >= 0; --bit_num) { // NOTE: this is for pass 2.x
+ if (bit_num == 4) continue;
+ if ((bit_num == 5) && is_t88p2) continue; // NOTE: this is for pass 2.x
- nib_num = (bit_num > 4) ? 1 : 0;
+ nib_num = (bit_num > 4) ? 1 : 0;
- flags = dskdat.bytes[byte_lane].bits[bit_index] & 7;
- deskew = dskdat.bytes[byte_lane].bits[bit_index] >> 3;
+ flags = dskdat.bytes[byte_lane].bits[bit_index] & 7;
+ deskew = dskdat.bytes[byte_lane].bits[bit_index] >> 3;
bit_index--;
counts->saturated += !!(flags & 6);
@@ -341,42 +349,42 @@ Validate_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_
} /* for (bit_num = bit_last; bit_num >= 0; --bit_num) */
- /*
+ /*
Now look for nibble errors:
- For bit 55, it looks like a bit deskew problem. When the upper nibble of byte 6
- needs to go to saturation, bit 7 of byte 6 locks prematurely at 64.
- For DIMMs with raw card A and B, can we reset the deskew training when we encounter this case?
- The reset criteria should be looking at one nibble at a time for raw card A and B;
- if the bit-deskew setting within a nibble is different by > 33, we'll issue a reset
- to the bit deskew training.
-
- LMC0 Bit Deskew Byte(6): 64 0 - 0 - 0 - 26 61 35 64
- */
- // upper nibble range, then lower nibble range
- nibrng_errs = ((nib_max[1] - nib_min[1]) > 33) ? 1 : 0;
- nibrng_errs |= ((nib_max[0] - nib_min[0]) > 33) ? 1 : 0;
-
- // check for nibble all unlocked
- nibunl_errs = ((nib_unl[0] == 4) || (nib_unl[1] == 4)) ? 1 : 0;
-
- // check for nibble all saturated
- //nibsat_errs = ((nib_sat[0] == 4) || (nib_sat[1] == 4)) ? 1 : 0;
-
- // check for bit value errors, ie < 17 or > 110
+ For bit 55, it looks like a bit deskew problem. When the upper nibble of byte 6
+ needs to go to saturation, bit 7 of byte 6 locks prematurely at 64.
+ For DIMMs with raw card A and B, can we reset the deskew training when we encounter this case?
+ The reset criteria should be looking at one nibble at a time for raw card A and B;
+ if the bit-deskew setting within a nibble is different by > 33, we'll issue a reset
+ to the bit deskew training.
+
+ LMC0 Bit Deskew Byte(6): 64 0 - 0 - 0 - 26 61 35 64
+ */
+ // upper nibble range, then lower nibble range
+ nibrng_errs = ((nib_max[1] - nib_min[1]) > 33) ? 1 : 0;
+ nibrng_errs |= ((nib_max[0] - nib_min[0]) > 33) ? 1 : 0;
+
+ // check for nibble all unlocked
+ nibunl_errs = ((nib_unl[0] == 4) || (nib_unl[1] == 4)) ? 1 : 0;
+
+ // check for nibble all saturated
+ //nibsat_errs = ((nib_sat[0] == 4) || (nib_sat[1] == 4)) ? 1 : 0;
+
+ // check for bit value errors, ie < 17 or > 110
// FIXME? assume max always > MIN_BITVAL and min < MAX_BITVAL
- bitval_errs = ((nib_max[1] > MAX_BITVAL) || (nib_max[0] > MAX_BITVAL)) ? 1 : 0;
- bitval_errs |= ((nib_min[1] < MIN_BITVAL) || (nib_min[0] < MIN_BITVAL)) ? 1 : 0;
-
- if (((nibrng_errs != 0) || (nibunl_errs != 0) /*|| (nibsat_errs != 0)*/ || (bitval_errs != 0))
+ bitval_errs = ((nib_max[1] > MAX_BITVAL) || (nib_max[0] > MAX_BITVAL)) ? 1 : 0;
+ bitval_errs |= ((nib_min[1] < MIN_BITVAL) || (nib_min[0] < MIN_BITVAL)) ? 1 : 0;
+
+ if (((nibrng_errs != 0) || (nibunl_errs != 0) /*|| (nibsat_errs != 0)*/ || (bitval_errs != 0))
&& print_enable)
{
- VB_PRT(print_enable, " %c%c%c%c",
+ VB_PRT(print_enable, " %c%c%c%c",
(nibrng_errs)?'R':' ',
(nibunl_errs)?'U':' ',
(bitval_errs)?'V':' ',
/*(nibsat_errs)?'S':*/' ');
- }
+ }
#if LOOK_FOR_STUCK_BYTE
bit_values = __builtin_popcountl(bl_mask[0]) + __builtin_popcountl(bl_mask[1]);
@@ -389,10 +397,10 @@ Validate_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_
if (print_enable)
VB_PRT(print_enable, "\n");
- counts->nibrng_errs |= (nibrng_errs << byte_lane);
- counts->nibunl_errs |= (nibunl_errs << byte_lane);
- //counts->nibsat_errs |= (nibsat_errs << byte_lane);
- counts->bitval_errs |= (bitval_errs << byte_lane);
+ counts->nibrng_errs |= (nibrng_errs << byte_lane);
+ counts->nibunl_errs |= (nibunl_errs << byte_lane);
+ //counts->nibsat_errs |= (nibsat_errs << byte_lane);
+ counts->bitval_errs |= (bitval_errs << byte_lane);
#if LOOK_FOR_STUCK_BYTE
// just for completeness, allow print of the stuck values bitmask after the bytelane print
@@ -404,7 +412,7 @@ Validate_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_
#endif
} /* for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) */
-
+
// restore original WR_DESKEW_ENA setting
change_wr_deskew_ena(node, ddr_interface_num, saved_wr_deskew_ena);
@@ -412,7 +420,7 @@ Validate_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_
}
unsigned short load_dac_override(int node, int ddr_interface_num,
- int dac_value, int byte)
+ int dac_value, int byte)
{
bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
int bytex = (byte == 0x0A) ? byte : byte + 1; // single bytelanes incr by 1; A is for ALL
@@ -455,30 +463,30 @@ int read_DAC_DBI_settings(int node, int ddr_interface_num,
bit_num = (dac_or_dbi) ? 4 : 5;
if ((bit_num == 5) && !is_t88p2) { // NOTE: this is for pass 1.x
- return -1;
+ return -1;
}
for (byte_lane = 8; byte_lane >= 0 ; --byte_lane) { // FIXME: always assume ECC is available
- //set byte lane and bit to read
+ //set byte lane and bit to read
phy_ctl.s.dsk_dbg_bit_sel = bit_num;
phy_ctl.s.dsk_dbg_byte_sel = byte_lane;
DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(ddr_interface_num), phy_ctl.u);
- //start read sequence
+ //start read sequence
phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(ddr_interface_num));
phy_ctl.s.dsk_dbg_rd_start = 1;
DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(ddr_interface_num), phy_ctl.u);
- //poll for read sequence to complete
- do {
- phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(ddr_interface_num));
- } while (phy_ctl.s.dsk_dbg_rd_complete != 1);
-
- deskew = phy_ctl.s.dsk_dbg_rd_data /*>> 3*/; // leave the flag bits for DBI
- dac_value = phy_ctl.s.dsk_dbg_rd_data & 0xff;
+ //poll for read sequence to complete
+ do {
+ phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(ddr_interface_num));
+ } while (phy_ctl.s.dsk_dbg_rd_complete != 1);
- settings[byte_lane] = (dac_or_dbi) ? dac_value : deskew;
+ deskew = phy_ctl.s.dsk_dbg_rd_data /*>> 3*/; // leave the flag bits for DBI
+ dac_value = phy_ctl.s.dsk_dbg_rd_data & 0xff;
+
+ settings[byte_lane] = (dac_or_dbi) ? dac_value : deskew;
} /* for (byte_lane = 8; byte_lane >= 0 ; --byte_lane) { */
@@ -489,7 +497,7 @@ int read_DAC_DBI_settings(int node, int ddr_interface_num,
// arg dac_or_dbi is 1 for DAC, 0 for DBI
void
display_DAC_DBI_settings(int node, int lmc, int dac_or_dbi,
- int ecc_ena, int *settings, char *title)
+ int ecc_ena, int *settings, const char *title)
{
int byte;
int flags;
@@ -552,7 +560,7 @@ Perform_Offset_Training(bdk_node_t node, int rank_mask, int ddr_interface_num)
// do not print or write if CSR does not change...
if (lmc_phy_ctl.u != orig_phy_ctl) {
- ddr_print("PHY_CTL : 0x%016lx\n", lmc_phy_ctl.u);
+ ddr_print("PHY_CTL : 0x%016llx\n", lmc_phy_ctl.u);
DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(ddr_interface_num), lmc_phy_ctl.u);
}
@@ -680,14 +688,14 @@ Perform_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_n
int loops, normal_loops = 1; // default to 1 NORMAL deskew training op...
const char *s;
if ((s = getenv("ddr_deskew_normal_loops")) != NULL) {
- normal_loops = strtoul(s, NULL, 0);
+ normal_loops = strtoul(s, NULL, 0);
}
#if LOOK_FOR_STUCK_BYTE
// provide override for STUCK BYTE RESETS
int do_stuck_reset = ENABLE_STUCK_BYTE_RESET;
if ((s = getenv("ddr_enable_stuck_byte_reset")) != NULL) {
- do_stuck_reset = !!strtoul(s, NULL, 0);
+ do_stuck_reset = !!strtoul(s, NULL, 0);
}
#endif
@@ -714,10 +722,10 @@ Perform_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_n
*
* 1. Write LMC(0)_EXT_CONFIG[VREFINT_SEQ_DESKEW] = 1.
*/
- VB_PRT(VBL_SEQ, "N%d.LMC%d: Performing LMC sequence: Set vrefint_seq_deskew = 1\n",
+ VB_PRT(VBL_SEQ, "N%d.LMC%d: Performing LMC sequence: Set vrefint_seq_deskew = 1\n",
node, ddr_interface_num);
DRAM_CSR_MODIFY(ext_config, node, BDK_LMCX_EXT_CONFIG(ddr_interface_num),
- ext_config.s.vrefint_seq_deskew = 1); /* Set Deskew sequence */
+ ext_config.s.vrefint_seq_deskew = 1); /* Set Deskew sequence */
/*
* 2. Write LMC(0)_SEQ_CTL[SEQ_SEL] = 0x0A and
@@ -726,10 +734,10 @@ Perform_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_n
* 3. Wait for LMC(0)_SEQ_CTL[SEQ_COMPLETE] to be set to 1.
*/
DRAM_CSR_MODIFY(phy_ctl, node, BDK_LMCX_PHY_CTL(ddr_interface_num),
- phy_ctl.s.phy_dsk_reset = 1); /* RESET Deskew sequence */
+ phy_ctl.s.phy_dsk_reset = 1); /* RESET Deskew sequence */
perform_octeon3_ddr3_sequence(node, rank_mask, ddr_interface_num, 0x0A); /* LMC Deskew Training */
- lock_retries = 0;
+ lock_retries = 0;
perform_read_deskew_training:
// maybe perform the NORMAL deskew training sequence multiple times before looking at lock status
@@ -744,30 +752,30 @@ Perform_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_n
// Now go look at lock and saturation status...
Validate_Read_Deskew_Training(node, rank_mask, ddr_interface_num, &dsk_counts, print_first);
- if (print_first && !print_them_all) // after printing the first and not doing them all, no more
- print_first = 0;
+ if (print_first && !print_them_all) // after printing the first and not doing them all, no more
+ print_first = 0;
unsaturated = (dsk_counts.saturated == 0);
- locked = (dsk_counts.unlocked == 0);
+ locked = (dsk_counts.unlocked == 0);
//nibble_sat = (dsk_counts.nibsat_errs != 0);
- // only do locking retries if unsaturated or rawcard A or B, otherwise full SAT retry
- if (unsaturated || (spd_rawcard_AorB && !is_t88p2 /*&& !nibble_sat*/)) {
- if (!locked) { // and not locked
- lock_retries++;
- lock_retries_total++;
- if (lock_retries <= lock_retries_limit) {
- goto perform_read_deskew_training;
- } else {
- VB_PRT(VBL_TME, "N%d.LMC%d: LOCK RETRIES failed after %d retries\n",
+ // only do locking retries if unsaturated or rawcard A or B, otherwise full SAT retry
+ if (unsaturated || (spd_rawcard_AorB && !is_t88p2 /*&& !nibble_sat*/)) {
+ if (!locked) { // and not locked
+ lock_retries++;
+ lock_retries_total++;
+ if (lock_retries <= lock_retries_limit) {
+ goto perform_read_deskew_training;
+ } else {
+ VB_PRT(VBL_TME, "N%d.LMC%d: LOCK RETRIES failed after %d retries\n",
node, ddr_interface_num, lock_retries_limit);
- }
- } else {
- if (lock_retries_total > 0) // only print if we did try
- VB_PRT(VBL_TME, "N%d.LMC%d: LOCK RETRIES successful after %d retries\n",
+ }
+ } else {
+ if (lock_retries_total > 0) // only print if we did try
+ VB_PRT(VBL_TME, "N%d.LMC%d: LOCK RETRIES successful after %d retries\n",
node, ddr_interface_num, lock_retries);
- }
- } /* if (unsaturated || spd_rawcard_AorB) */
+ }
+ } /* if (unsaturated || spd_rawcard_AorB) */
++sat_retries;
@@ -789,14 +797,14 @@ Perform_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_n
}
}
#endif
- /*
- * At this point, check for a DDR4 RDIMM that will not benefit from SAT retries; if so, no retries
- */
- if (spd_rawcard_AorB && !is_t88p2 /*&& !nibble_sat*/) {
- VB_PRT(VBL_TME, "N%d.LMC%d: Read Deskew Training Loop: Exiting for RAWCARD == A or B.\n",
+ /*
+ * At this point, check for a DDR4 RDIMM that will not benefit from SAT retries; if so, no retries
+ */
+ if (spd_rawcard_AorB && !is_t88p2 /*&& !nibble_sat*/) {
+ VB_PRT(VBL_TME, "N%d.LMC%d: Read Deskew Training Loop: Exiting for RAWCARD == A or B.\n",
node, ddr_interface_num);
- break; // no sat or lock retries
- }
+ break; // no sat or lock retries
+ }
} while (!unsaturated && (sat_retries < DEFAULT_SAT_RETRY_LIMIT));
@@ -817,9 +825,9 @@ Perform_Read_Deskew_Training(bdk_node_t node, int rank_mask, int ddr_interface_n
change_wr_deskew_ena(node, ddr_interface_num, saved_wr_deskew_ena);
if ((dsk_counts.nibrng_errs != 0) || (dsk_counts.nibunl_errs != 0)) {
- debug_print("N%d.LMC%d: NIBBLE ERROR(S) found, returning FAULT\n",
- node, ddr_interface_num);
- return -1; // we did retry locally, they did not help
+ debug_print("N%d.LMC%d: NIBBLE ERROR(S) found, returning FAULT\n",
+ node, ddr_interface_num);
+ return -1; // we did retry locally, they did not help
}
// NOTE: we (currently) always print one last training validation before starting Read Leveling...
@@ -837,7 +845,7 @@ do_write_deskew_op(bdk_node_t node, int ddr_interface_num,
SET_DDR_DLL_CTL3(bit_select, bit_sel);
SET_DDR_DLL_CTL3(byte_sel, byte_sel);
SET_DDR_DLL_CTL3(wr_deskew_ena, ena);
- DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
}
@@ -852,11 +860,11 @@ set_write_deskew_offset(bdk_node_t node, int ddr_interface_num,
SET_DDR_DLL_CTL3(bit_select, bit_sel);
SET_DDR_DLL_CTL3(byte_sel, byte_sel);
SET_DDR_DLL_CTL3(offset, offset);
- DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
SET_DDR_DLL_CTL3(wr_deskew_ld, 1);
- DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
}
@@ -872,14 +880,14 @@ Update_Write_Deskew_Settings(bdk_node_t node, int ddr_interface_num, deskew_data
byte_limit = ((lmc_config.s.mode32b) ? 4 : 8) + lmc_config.s.ecc_ena;
for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) {
- for (bit_num = 0; bit_num <= 7; ++bit_num) {
+ for (bit_num = 0; bit_num <= 7; ++bit_num) {
set_write_deskew_offset(node, ddr_interface_num, bit_num, byte_lane + 1,
dskdat->bytes[byte_lane].bits[bit_num]);
} /* for (bit_num = 0; bit_num <= 7; ++bit_num) */
} /* for (byte_lane = 0; byte_lane < byte_limit; byte_lane++) */
-
+
return;
}
@@ -1014,7 +1022,7 @@ Perform_Write_Deskew_Training(bdk_node_t node, int ddr_interface_num)
if (errors & (1 << byte)) { // yes, error(s) in the byte lane in this rank
bit_errs = ((byte == 8) ? bad_bits[1] : bad_bits[0] >> (8 * byte)) & 0xFFULL;
- VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: Byte %d Value %d: Address 0x%012lx errors 0x%x/0x%x\n",
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: Byte %d Value %d: Address 0x%012llx errors 0x%x/0x%x\n",
node, ddr_interface_num, rankx, byte,
dskval, phys_addr, errors, bit_errs);
@@ -1148,15 +1156,15 @@ static int compute_Vref_1slot_2rank(int rtt_wr, int rtt_park, int dqx_ctl, int r
Dprintf("Vrefpc = %d\n", Vrefpc);
if (Vrefpc < Rangepc) { // < range1 base, use range2
- Vref_range = 1 << 6; // set bit A6 for range2
- Rangepc = 4500; // range2 base is 45%
+ Vref_range = 1 << 6; // set bit A6 for range2
+ Rangepc = 4500; // range2 base is 45%
}
Vref_value = divide_nint(Vrefpc - Rangepc, 65);
if (Vref_value < 0)
- Vref_value = Vref_range; // set to base of range as lowest value
+ Vref_value = Vref_range; // set to base of range as lowest value
else
- Vref_value |= Vref_range;
+ Vref_value |= Vref_range;
Dprintf("Vref_value = %d (0x%02x)\n", Vref_value, Vref_value);
debug_print("rtt_wr:%d, rtt_park:%d, dqx_ctl:%d, Vref_value:%d (0x%x)\n",
@@ -1210,19 +1218,19 @@ static int compute_Vref_2slot_2rank(int rtt_wr, int rtt_park_00, int rtt_park_01
//printf("Vrefpc = %ld\n", Vrefpc);
if (Vrefpc < Rangepc) { // < range1 base, use range2
- Vref_range = 1 << 6; // set bit A6 for range2
- Rangepc = 4500; // range2 base is 45%
+ Vref_range = 1 << 6; // set bit A6 for range2
+ Rangepc = 4500; // range2 base is 45%
}
Vref_value = divide_nint(Vrefpc - Rangepc, 65);
if (Vref_value < 0)
- Vref_value = Vref_range; // set to base of range as lowest value
+ Vref_value = Vref_range; // set to base of range as lowest value
else
- Vref_value |= Vref_range;
+ Vref_value |= Vref_range;
//printf("Vref_value = %d (0x%02x)\n", Vref_value, Vref_value);
debug_print("rtt_wr:%d, rtt_park_00:%d, rtt_park_01:%d, dqx_ctl:%d, rtt_nom:%d, Vref_value:%d (0x%x)\n",
- rtt_wr, rtt_park_00, rtt_park_01, dqx_ctl, rtt_nom, Vref_value, Vref_value);
+ rtt_wr, rtt_park_00, rtt_park_01, dqx_ctl, rtt_nom, Vref_value, Vref_value);
return Vref_value;
}
@@ -1230,8 +1238,8 @@ static int compute_Vref_2slot_2rank(int rtt_wr, int rtt_park_00, int rtt_park_01
// NOTE: only call this for DIMMs with 1 or 2 ranks, not 4.
int
compute_vref_value(bdk_node_t node, int ddr_interface_num,
- int rankx, int dimm_count, int rank_count,
- impedence_values_t *imp_values, int is_stacked_die)
+ int rankx, int dimm_count, int rank_count,
+ impedence_values_t *imp_values, int is_stacked_die)
{
int computed_final_vref_value = 0;
@@ -1260,56 +1268,56 @@ compute_vref_value(bdk_node_t node, int ddr_interface_num,
// separate calculations for 1 vs 2 DIMMs per LMC
if (dimm_count == 1) {
- // PARK comes from this rank if 1-rank, otherwise other rank
- index = (lmc_modereg_params2.u >> ((rankx ^ (rank_count - 1)) * 10 + 0)) & 0x07;
- int rtt_park = imp_values->rtt_nom_ohms[index];
- computed_final_vref_value = compute_Vref_1slot_2rank(rtt_wr, rtt_park, dqx_ctl, rank_count);
+ // PARK comes from this rank if 1-rank, otherwise other rank
+ index = (lmc_modereg_params2.u >> ((rankx ^ (rank_count - 1)) * 10 + 0)) & 0x07;
+ int rtt_park = imp_values->rtt_nom_ohms[index];
+ computed_final_vref_value = compute_Vref_1slot_2rank(rtt_wr, rtt_park, dqx_ctl, rank_count);
} else {
- // get both PARK values from the other DIMM
- index = (lmc_modereg_params2.u >> ((rankx ^ 0x02) * 10 + 0)) & 0x07;
- int rtt_park_00 = imp_values->rtt_nom_ohms[index];
- index = (lmc_modereg_params2.u >> ((rankx ^ 0x03) * 10 + 0)) & 0x07;
- int rtt_park_01 = imp_values->rtt_nom_ohms[index];
- // NOM comes from this rank if 1-rank, otherwise other rank
- index = (lmc_modereg_params1.u >> ((rankx ^ (rank_count - 1)) * 12 + 9)) & 0x07;
- rtt_nom = imp_values->rtt_nom_ohms[index];
- computed_final_vref_value = compute_Vref_2slot_2rank(rtt_wr, rtt_park_00, rtt_park_01, dqx_ctl, rtt_nom);
+ // get both PARK values from the other DIMM
+ index = (lmc_modereg_params2.u >> ((rankx ^ 0x02) * 10 + 0)) & 0x07;
+ int rtt_park_00 = imp_values->rtt_nom_ohms[index];
+ index = (lmc_modereg_params2.u >> ((rankx ^ 0x03) * 10 + 0)) & 0x07;
+ int rtt_park_01 = imp_values->rtt_nom_ohms[index];
+ // NOM comes from this rank if 1-rank, otherwise other rank
+ index = (lmc_modereg_params1.u >> ((rankx ^ (rank_count - 1)) * 12 + 9)) & 0x07;
+ rtt_nom = imp_values->rtt_nom_ohms[index];
+ computed_final_vref_value = compute_Vref_2slot_2rank(rtt_wr, rtt_park_00, rtt_park_01, dqx_ctl, rtt_nom);
}
#if ENABLE_COMPUTED_VREF_ADJUSTMENT
{
int saved_final_vref_value = computed_final_vref_value;
- BDK_CSR_INIT(lmc_config, node, BDK_LMCX_CONFIG(ddr_interface_num));
- /*
- New computed Vref = existing computed Vref – X
+ BDK_CSR_INIT(lmc_config, node, BDK_LMCX_CONFIG(ddr_interface_num));
+ /*
+ New computed Vref = existing computed Vref – X
- The value of X is depending on different conditions. Both #122 and #139 are 2Rx4 RDIMM,
- while #124 is stacked die 2Rx4, so I conclude the results into two conditions:
+ The value of X is depending on different conditions. Both #122 and #139 are 2Rx4 RDIMM,
+ while #124 is stacked die 2Rx4, so I conclude the results into two conditions:
- 1. Stacked Die: 2Rx4
- 1-slot: offset = 7. i, e New computed Vref = existing computed Vref – 7
- 2-slot: offset = 6
+ 1. Stacked Die: 2Rx4
+ 1-slot: offset = 7. i, e New computed Vref = existing computed Vref – 7
+ 2-slot: offset = 6
2. Regular: 2Rx4
1-slot: offset = 3
- 2-slot: offset = 2
- */
- // we know we never get called unless DDR4, so test just the other conditions
- if((!!__bdk_dram_is_rdimm(node, 0)) &&
- (rank_count == 2) &&
- (lmc_config.s.mode_x4dev))
- { // it must first be RDIMM and 2-rank and x4
- if (is_stacked_die) { // now do according to stacked die or not...
- computed_final_vref_value -= (dimm_count == 1) ? 7 : 6;
- } else {
- computed_final_vref_value -= (dimm_count == 1) ? 3 : 2;
- }
+ 2-slot: offset = 2
+ */
+ // we know we never get called unless DDR4, so test just the other conditions
+ if((!!__bdk_dram_is_rdimm(node, 0)) &&
+ (rank_count == 2) &&
+ (lmc_config.s.mode_x4dev))
+ { // it must first be RDIMM and 2-rank and x4
+ if (is_stacked_die) { // now do according to stacked die or not...
+ computed_final_vref_value -= (dimm_count == 1) ? 7 : 6;
+ } else {
+ computed_final_vref_value -= (dimm_count == 1) ? 3 : 2;
+ }
// we have adjusted it, so print it out if verbosity is right
VB_PRT(VBL_TME, "N%d.LMC%d.R%d: adjusting computed vref from %2d (0x%02x) to %2d (0x%02x)\n",
node, ddr_interface_num, rankx,
saved_final_vref_value, saved_final_vref_value,
computed_final_vref_value, computed_final_vref_value);
- }
+ }
}
#endif
return computed_final_vref_value;
@@ -1376,7 +1384,7 @@ static int encode_pbank_lsb_ddr3(int pbank_lsb, int ddr_interface_wide)
static uint64_t octeon_read_lmcx_ddr3_rlevel_dbg(bdk_node_t node, int ddr_interface_num, int idx)
{
DRAM_CSR_MODIFY(c, node, BDK_LMCX_RLEVEL_CTL(ddr_interface_num),
- c.s.byte = idx);
+ c.s.byte = idx);
BDK_CSR_READ(node, BDK_LMCX_RLEVEL_CTL(ddr_interface_num));
BDK_CSR_INIT(rlevel_dbg, node, BDK_LMCX_RLEVEL_DBG(ddr_interface_num));
return rlevel_dbg.s.bitmask;
@@ -1463,29 +1471,29 @@ validate_ddr3_rlevel_bitmask(rlevel_bitmask_t *rlevel_bitmask_p, int ddr_type)
uint64_t temp;
if (bitmask == 0) {
- blank += RLEVEL_BITMASK_BLANK_ERROR;
+ blank += RLEVEL_BITMASK_BLANK_ERROR;
} else {
- /* Look for fb, the first bit */
+ /* Look for fb, the first bit */
temp = bitmask;
while (!(temp & 1)) {
firstbit++;
temp >>= 1;
}
- /* Look for lb, the last bit */
+ /* Look for lb, the last bit */
lastbit = firstbit;
while ((temp >>= 1))
lastbit++;
- /* Start with the max range to try to find the largest mask within the bitmask data */
+ /* Start with the max range to try to find the largest mask within the bitmask data */
width = MASKRANGE_BITS;
for (mask = MASKRANGE; mask > 0; mask >>= 1, --width) {
- for (mstart = lastbit - width + 1; mstart >= firstbit; --mstart) {
+ for (mstart = lastbit - width + 1; mstart >= firstbit; --mstart) {
temp = mask << mstart;
- if ((bitmask & temp) == temp)
+ if ((bitmask & temp) == temp)
goto done_now;
- }
+ }
}
done_now:
/* look for any more contiguous 1's to the right of mstart */
@@ -1501,16 +1509,16 @@ validate_ddr3_rlevel_bitmask(rlevel_bitmask_t *rlevel_bitmask_p, int ddr_type)
if (extras > 0)
toolong = RLEVEL_BITMASK_TOOLONG_ERROR * ((1 << extras) - 1);
- /* Detect if bitmask is too narrow. */
- if (width < 4)
- narrow = (4 - width) * RLEVEL_BITMASK_NARROW_ERROR;
+ /* Detect if bitmask is too narrow. */
+ if (width < 4)
+ narrow = (4 - width) * RLEVEL_BITMASK_NARROW_ERROR;
- /* detect leading bubble bits, that is, any 0's between first and mstart */
+ /* detect leading bubble bits, that is, any 0's between first and mstart */
temp = bitmask >> (firstbit + 1);
i = mstart - firstbit - 1;
while (--i >= 0) {
- if ((temp & 1) == 0)
- bubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
+ if ((temp & 1) == 0)
+ bubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
temp >>= 1;
}
@@ -1520,7 +1528,7 @@ validate_ddr3_rlevel_bitmask(rlevel_bitmask_t *rlevel_bitmask_p, int ddr_type)
if (temp & 1) { /* Detect 1 bits after the trailing end of the mask, including last. */
trailing += RLEVEL_BITMASK_TRAILING_BITS_ERROR;
} else { /* Detect trailing bubble bits, that is, any 0's between end-of-mask and last */
- tbubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
+ tbubble += RLEVEL_BITMASK_BUBBLE_BITS_ERROR;
}
temp >>= 1;
}
@@ -1532,7 +1540,7 @@ validate_ddr3_rlevel_bitmask(rlevel_bitmask_t *rlevel_bitmask_p, int ddr_type)
rlevel_bitmask_p->mstart = mstart;
rlevel_bitmask_p->width = width;
- VB_PRT(VBL_DEV2, "bm:%08lx mask:%02lx, width:%2u, mstart:%2d, fb:%2u, lb:%2u"
+ VB_PRT(VBL_DEV2, "bm:%08lx mask:%02llx, width:%2u, mstart:%2d, fb:%2u, lb:%2u"
" (bu:%2d, tb:%2d, bl:%2d, n:%2d, t:%2d, x:%2d) errors:%3d %s\n",
(unsigned long) bitmask, mask, width, mstart,
firstbit, lastbit, bubble, tbubble, blank, narrow,
@@ -1548,17 +1556,17 @@ static int compute_ddr3_rlevel_delay(uint8_t mstart, uint8_t width, bdk_lmcx_rle
debug_bitmask_print(" offset_en:%d", rlevel_ctl.cn8.offset_en);
if (rlevel_ctl.s.offset_en) {
- delay = max(mstart, mstart + width - 1 - rlevel_ctl.s.offset);
+ delay = max(mstart, mstart + width - 1 - rlevel_ctl.s.offset);
} else {
- /* if (rlevel_ctl.s.offset) { */ /* Experimental */
- if (0) {
- delay = max(mstart + rlevel_ctl.s.offset, mstart + 1);
- /* Insure that the offset delay falls within the bitmask */
- delay = min(delay, mstart + width-1);
- } else {
- delay = (width - 1) / 2 + mstart; /* Round down */
- /* delay = (width/2) + mstart; */ /* Round up */
- }
+ /* if (rlevel_ctl.s.offset) { */ /* Experimental */
+ if (0) {
+ delay = max(mstart + rlevel_ctl.s.offset, mstart + 1);
+ /* Insure that the offset delay falls within the bitmask */
+ delay = min(delay, mstart + width-1);
+ } else {
+ delay = (width - 1) / 2 + mstart; /* Round down */
+ /* delay = (width/2) + mstart; */ /* Round up */
+ }
}
return delay;
@@ -1568,23 +1576,23 @@ static int compute_ddr3_rlevel_delay(uint8_t mstart, uint8_t width, bdk_lmcx_rle
#define WLEVEL_BYTE_MSK ((1UL << 5) - 1)
static void update_wlevel_rank_struct(bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank,
- int byte, int delay)
+ int byte, int delay)
{
bdk_lmcx_wlevel_rankx_t temp_wlevel_rank;
if (byte >= 0 && byte <= 8) {
- temp_wlevel_rank.u = lmc_wlevel_rank->u;
- temp_wlevel_rank.u &= ~(WLEVEL_BYTE_MSK << (WLEVEL_BYTE_BITS * byte));
- temp_wlevel_rank.u |= ((delay & WLEVEL_BYTE_MSK) << (WLEVEL_BYTE_BITS * byte));
- lmc_wlevel_rank->u = temp_wlevel_rank.u;
+ temp_wlevel_rank.u = lmc_wlevel_rank->u;
+ temp_wlevel_rank.u &= ~(WLEVEL_BYTE_MSK << (WLEVEL_BYTE_BITS * byte));
+ temp_wlevel_rank.u |= ((delay & WLEVEL_BYTE_MSK) << (WLEVEL_BYTE_BITS * byte));
+ lmc_wlevel_rank->u = temp_wlevel_rank.u;
}
}
static int get_wlevel_rank_struct(bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank,
- int byte)
+ int byte)
{
int delay = 0;
if (byte >= 0 && byte <= 8) {
- delay = ((lmc_wlevel_rank->u) >> (WLEVEL_BYTE_BITS * byte)) & WLEVEL_BYTE_MSK;
+ delay = ((lmc_wlevel_rank->u) >> (WLEVEL_BYTE_BITS * byte)) & WLEVEL_BYTE_MSK;
}
return delay;
}
@@ -1593,9 +1601,9 @@ static int get_wlevel_rank_struct(bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank,
// entry = 1 is valid, entry = 0 is invalid
static int
validity_matrix[4][4] = {[0] {1,1,1,0}, // valid pairs when cv == 0: 0,0 + 0,1 + 0,2 == "7"
- [1] {0,1,1,1}, // valid pairs when cv == 1: 1,1 + 1,2 + 1,3 == "E"
- [2] {1,0,1,1}, // valid pairs when cv == 2: 2,2 + 2,3 + 2,0 == "D"
- [3] {1,1,0,1}}; // valid pairs when cv == 3: 3,3 + 3,0 + 3,1 == "B"
+ [1] {0,1,1,1}, // valid pairs when cv == 1: 1,1 + 1,2 + 1,3 == "E"
+ [2] {1,0,1,1}, // valid pairs when cv == 2: 2,2 + 2,3 + 2,0 == "D"
+ [3] {1,1,0,1}}; // valid pairs when cv == 3: 3,3 + 3,0 + 3,1 == "B"
#endif
static int
validate_seq(int *wl, int *seq)
@@ -1604,23 +1612,23 @@ validate_seq(int *wl, int *seq)
int bitnum;
seqx = 0;
while (seq[seqx+1] >= 0) { // stop on next seq entry == -1
- // but now, check current versus next
+ // but now, check current versus next
#if 0
- if ( !validity_matrix [wl[seq[seqx]]] [wl[seq[seqx+1]]] )
- return 1;
+ if ( !validity_matrix [wl[seq[seqx]]] [wl[seq[seqx+1]]] )
+ return 1;
#else
- bitnum = (wl[seq[seqx]] << 2) | wl[seq[seqx+1]];
- if (!((1 << bitnum) & 0xBDE7)) // magic validity number (see matrix above)
- return 1;
+ bitnum = (wl[seq[seqx]] << 2) | wl[seq[seqx+1]];
+ if (!((1 << bitnum) & 0xBDE7)) // magic validity number (see matrix above)
+ return 1;
#endif
- seqx++;
+ seqx++;
}
return 0;
}
static int
Validate_HW_WL_Settings(bdk_node_t node, int ddr_interface_num,
- bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank,
+ bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank,
int ecc_ena)
{
int wl[9], byte, errors;
@@ -1634,15 +1642,15 @@ Validate_HW_WL_Settings(bdk_node_t node, int ddr_interface_num,
// in the CSR, bytes 0-7 are always data, byte 8 is ECC
for (byte = 0; byte < 8+ecc_ena; byte++) {
- wl[byte] = (get_wlevel_rank_struct(lmc_wlevel_rank, byte) >> 1) & 3; // preprocess :-)
+ wl[byte] = (get_wlevel_rank_struct(lmc_wlevel_rank, byte) >> 1) & 3; // preprocess :-)
}
errors = 0;
if (__bdk_dram_is_rdimm(node, 0) != 0) { // RDIMM order
- errors = validate_seq(wl, (ecc_ena) ? rseq1 : rseq1no);
- errors += validate_seq(wl, rseq2);
+ errors = validate_seq(wl, (ecc_ena) ? rseq1 : rseq1no);
+ errors += validate_seq(wl, rseq2);
} else { // UDIMM order
- errors = validate_seq(wl, (ecc_ena) ? useq : useqno);
+ errors = validate_seq(wl, (ecc_ena) ? useq : useqno);
}
return errors;
@@ -1652,51 +1660,51 @@ Validate_HW_WL_Settings(bdk_node_t node, int ddr_interface_num,
#define RLEVEL_BYTE_MSK ((1UL << 6) - 1)
static void update_rlevel_rank_struct(bdk_lmcx_rlevel_rankx_t *lmc_rlevel_rank,
- int byte, int delay)
+ int byte, int delay)
{
bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
if (byte >= 0 && byte <= 8) {
- temp_rlevel_rank.u = lmc_rlevel_rank->u & ~(RLEVEL_BYTE_MSK << (RLEVEL_BYTE_BITS * byte));
- temp_rlevel_rank.u |= ((delay & RLEVEL_BYTE_MSK) << (RLEVEL_BYTE_BITS * byte));
- lmc_rlevel_rank->u = temp_rlevel_rank.u;
+ temp_rlevel_rank.u = lmc_rlevel_rank->u & ~(RLEVEL_BYTE_MSK << (RLEVEL_BYTE_BITS * byte));
+ temp_rlevel_rank.u |= ((delay & RLEVEL_BYTE_MSK) << (RLEVEL_BYTE_BITS * byte));
+ lmc_rlevel_rank->u = temp_rlevel_rank.u;
}
}
#if RLEXTRAS_PATCH || !DISABLE_SW_WL_PASS_2
static int get_rlevel_rank_struct(bdk_lmcx_rlevel_rankx_t *lmc_rlevel_rank,
- int byte)
+ int byte)
{
int delay = 0;
if (byte >= 0 && byte <= 8) {
- delay = ((lmc_rlevel_rank->u) >> (RLEVEL_BYTE_BITS * byte)) & RLEVEL_BYTE_MSK;
+ delay = ((lmc_rlevel_rank->u) >> (RLEVEL_BYTE_BITS * byte)) & RLEVEL_BYTE_MSK;
}
return delay;
}
#endif
static void unpack_rlevel_settings(int ddr_interface_bytemask, int ecc_ena,
- rlevel_byte_data_t *rlevel_byte,
- bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank)
+ rlevel_byte_data_t *rlevel_byte,
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank)
{
if ((ddr_interface_bytemask & 0xff) == 0xff) {
- if (ecc_ena) {
- rlevel_byte[8].delay = lmc_rlevel_rank.cn83xx.byte7;
- rlevel_byte[7].delay = lmc_rlevel_rank.cn83xx.byte6;
- rlevel_byte[6].delay = lmc_rlevel_rank.cn83xx.byte5;
- rlevel_byte[5].delay = lmc_rlevel_rank.cn83xx.byte4;
- rlevel_byte[4].delay = lmc_rlevel_rank.cn83xx.byte8; /* ECC */
- } else {
- rlevel_byte[7].delay = lmc_rlevel_rank.cn83xx.byte7;
- rlevel_byte[6].delay = lmc_rlevel_rank.cn83xx.byte6;
- rlevel_byte[5].delay = lmc_rlevel_rank.cn83xx.byte5;
- rlevel_byte[4].delay = lmc_rlevel_rank.cn83xx.byte4;
- }
+ if (ecc_ena) {
+ rlevel_byte[8].delay = lmc_rlevel_rank.cn83xx.byte7;
+ rlevel_byte[7].delay = lmc_rlevel_rank.cn83xx.byte6;
+ rlevel_byte[6].delay = lmc_rlevel_rank.cn83xx.byte5;
+ rlevel_byte[5].delay = lmc_rlevel_rank.cn83xx.byte4;
+ rlevel_byte[4].delay = lmc_rlevel_rank.cn83xx.byte8; /* ECC */
+ } else {
+ rlevel_byte[7].delay = lmc_rlevel_rank.cn83xx.byte7;
+ rlevel_byte[6].delay = lmc_rlevel_rank.cn83xx.byte6;
+ rlevel_byte[5].delay = lmc_rlevel_rank.cn83xx.byte5;
+ rlevel_byte[4].delay = lmc_rlevel_rank.cn83xx.byte4;
+ }
} else {
- rlevel_byte[8].delay = lmc_rlevel_rank.cn83xx.byte8; /* unused */
- rlevel_byte[7].delay = lmc_rlevel_rank.cn83xx.byte7; /* unused */
- rlevel_byte[6].delay = lmc_rlevel_rank.cn83xx.byte6; /* unused */
- rlevel_byte[5].delay = lmc_rlevel_rank.cn83xx.byte5; /* unused */
- rlevel_byte[4].delay = lmc_rlevel_rank.cn83xx.byte4; /* ECC */
+ rlevel_byte[8].delay = lmc_rlevel_rank.cn83xx.byte8; /* unused */
+ rlevel_byte[7].delay = lmc_rlevel_rank.cn83xx.byte7; /* unused */
+ rlevel_byte[6].delay = lmc_rlevel_rank.cn83xx.byte6; /* unused */
+ rlevel_byte[5].delay = lmc_rlevel_rank.cn83xx.byte5; /* unused */
+ rlevel_byte[4].delay = lmc_rlevel_rank.cn83xx.byte4; /* ECC */
}
rlevel_byte[3].delay = lmc_rlevel_rank.cn83xx.byte3;
rlevel_byte[2].delay = lmc_rlevel_rank.cn83xx.byte2;
@@ -1705,30 +1713,30 @@ static void unpack_rlevel_settings(int ddr_interface_bytemask, int ecc_ena,
}
static void pack_rlevel_settings(int ddr_interface_bytemask, int ecc_ena,
- rlevel_byte_data_t *rlevel_byte,
- bdk_lmcx_rlevel_rankx_t *final_rlevel_rank)
+ rlevel_byte_data_t *rlevel_byte,
+ bdk_lmcx_rlevel_rankx_t *final_rlevel_rank)
{
bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank = *final_rlevel_rank;
if ((ddr_interface_bytemask & 0xff) == 0xff) {
- if (ecc_ena) {
- lmc_rlevel_rank.cn83xx.byte7 = rlevel_byte[8].delay;
- lmc_rlevel_rank.cn83xx.byte6 = rlevel_byte[7].delay;
- lmc_rlevel_rank.cn83xx.byte5 = rlevel_byte[6].delay;
- lmc_rlevel_rank.cn83xx.byte4 = rlevel_byte[5].delay;
- lmc_rlevel_rank.cn83xx.byte8 = rlevel_byte[4].delay; /* ECC */
- } else {
- lmc_rlevel_rank.cn83xx.byte7 = rlevel_byte[7].delay;
- lmc_rlevel_rank.cn83xx.byte6 = rlevel_byte[6].delay;
- lmc_rlevel_rank.cn83xx.byte5 = rlevel_byte[5].delay;
- lmc_rlevel_rank.cn83xx.byte4 = rlevel_byte[4].delay;
- }
+ if (ecc_ena) {
+ lmc_rlevel_rank.cn83xx.byte7 = rlevel_byte[8].delay;
+ lmc_rlevel_rank.cn83xx.byte6 = rlevel_byte[7].delay;
+ lmc_rlevel_rank.cn83xx.byte5 = rlevel_byte[6].delay;
+ lmc_rlevel_rank.cn83xx.byte4 = rlevel_byte[5].delay;
+ lmc_rlevel_rank.cn83xx.byte8 = rlevel_byte[4].delay; /* ECC */
+ } else {
+ lmc_rlevel_rank.cn83xx.byte7 = rlevel_byte[7].delay;
+ lmc_rlevel_rank.cn83xx.byte6 = rlevel_byte[6].delay;
+ lmc_rlevel_rank.cn83xx.byte5 = rlevel_byte[5].delay;
+ lmc_rlevel_rank.cn83xx.byte4 = rlevel_byte[4].delay;
+ }
} else {
- lmc_rlevel_rank.cn83xx.byte8 = rlevel_byte[8].delay;
- lmc_rlevel_rank.cn83xx.byte7 = rlevel_byte[7].delay;
- lmc_rlevel_rank.cn83xx.byte6 = rlevel_byte[6].delay;
- lmc_rlevel_rank.cn83xx.byte5 = rlevel_byte[5].delay;
- lmc_rlevel_rank.cn83xx.byte4 = rlevel_byte[4].delay;
+ lmc_rlevel_rank.cn83xx.byte8 = rlevel_byte[8].delay;
+ lmc_rlevel_rank.cn83xx.byte7 = rlevel_byte[7].delay;
+ lmc_rlevel_rank.cn83xx.byte6 = rlevel_byte[6].delay;
+ lmc_rlevel_rank.cn83xx.byte5 = rlevel_byte[5].delay;
+ lmc_rlevel_rank.cn83xx.byte4 = rlevel_byte[4].delay;
}
lmc_rlevel_rank.cn83xx.byte3 = rlevel_byte[3].delay;
lmc_rlevel_rank.cn83xx.byte2 = rlevel_byte[2].delay;
@@ -1740,7 +1748,7 @@ static void pack_rlevel_settings(int ddr_interface_bytemask, int ecc_ena,
#if !DISABLE_SW_WL_PASS_2
static void rlevel_to_wlevel(bdk_lmcx_rlevel_rankx_t *lmc_rlevel_rank,
- bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank, int byte)
+ bdk_lmcx_wlevel_rankx_t *lmc_wlevel_rank, int byte)
{
int byte_delay = get_rlevel_rank_struct(lmc_rlevel_rank, byte);
@@ -1756,9 +1764,9 @@ static void rlevel_to_wlevel(bdk_lmcx_rlevel_rankx_t *lmc_rlevel_rank,
static int calc_delay_trend(int v)
{
if (v == 0)
- return (0);
+ return (0);
if (v < 0)
- return (-1);
+ return (-1);
return 1;
}
@@ -1770,7 +1778,7 @@ static int calc_delay_trend(int v)
// NOTE: "max_adj_delay_inc" argument is, by default, 1 for DDR3 and 2 for DDR4
static int nonsequential_delays(rlevel_byte_data_t *rlevel_byte,
- int start, int end, int max_adj_delay_inc)
+ int start, int end, int max_adj_delay_inc)
{
int error = 0;
int delay_trend, prev_trend = 0;
@@ -1782,36 +1790,36 @@ static int nonsequential_delays(rlevel_byte_data_t *rlevel_byte,
for (byte_idx = start; byte_idx < end; ++byte_idx) {
byte_err = 0;
- delay_diff = rlevel_byte[byte_idx+1].delay - rlevel_byte[byte_idx].delay;
- delay_trend = calc_delay_trend(delay_diff);
-
- debug_bitmask_print("Byte %d: %2d, Byte %d: %2d, delay_trend: %2d, prev_trend: %2d",
- byte_idx+0, rlevel_byte[byte_idx+0].delay,
- byte_idx+1, rlevel_byte[byte_idx+1].delay,
- delay_trend, prev_trend);
+ delay_diff = rlevel_byte[byte_idx+1].delay - rlevel_byte[byte_idx].delay;
+ delay_trend = calc_delay_trend(delay_diff);
+
+ debug_bitmask_print("Byte %d: %2d, Byte %d: %2d, delay_trend: %2d, prev_trend: %2d",
+ byte_idx+0, rlevel_byte[byte_idx+0].delay,
+ byte_idx+1, rlevel_byte[byte_idx+1].delay,
+ delay_trend, prev_trend);
/* Increment error each time the trend changes to the opposite direction.
*/
- if ((prev_trend != 0) && (delay_trend != 0) && (prev_trend != delay_trend)) {
- byte_err += RLEVEL_NONSEQUENTIAL_DELAY_ERROR;
- prev_trend = delay_trend;
- debug_bitmask_print(" => Nonsequential byte delay");
- }
+ if ((prev_trend != 0) && (delay_trend != 0) && (prev_trend != delay_trend)) {
+ byte_err += RLEVEL_NONSEQUENTIAL_DELAY_ERROR;
+ prev_trend = delay_trend;
+ debug_bitmask_print(" => Nonsequential byte delay");
+ }
- delay_inc = _abs(delay_diff); // how big was the delay change, if any
+ delay_inc = _abs(delay_diff); // how big was the delay change, if any
/* Even if the trend did not change to the opposite direction, check for
the magnitude of the change, and scale the penalty by the amount that
the size is larger than the provided limit.
*/
- if ((max_adj_delay_inc != 0) && (delay_inc > max_adj_delay_inc)) {
- byte_err += (delay_inc - max_adj_delay_inc) * RLEVEL_ADJACENT_DELAY_ERROR;
- debug_bitmask_print(" => Adjacent delay error");
- }
+ if ((max_adj_delay_inc != 0) && (delay_inc > max_adj_delay_inc)) {
+ byte_err += (delay_inc - max_adj_delay_inc) * RLEVEL_ADJACENT_DELAY_ERROR;
+ debug_bitmask_print(" => Adjacent delay error");
+ }
- debug_bitmask_print("\n");
- if (delay_trend != 0)
- prev_trend = delay_trend;
+ debug_bitmask_print("\n");
+ if (delay_trend != 0)
+ prev_trend = delay_trend;
rlevel_byte[byte_idx+1].sqerrs = byte_err;
error += byte_err;
@@ -1826,15 +1834,15 @@ static int roundup_ddr3_wlevel_bitmask(int bitmask)
int delay;
for (leader=0; leader<8; ++leader) {
- shifted_bitmask = (bitmask>>leader);
- if ((shifted_bitmask&1) == 0)
- break;
+ shifted_bitmask = (bitmask>>leader);
+ if ((shifted_bitmask&1) == 0)
+ break;
}
for (/*leader=leader*/; leader<16; ++leader) {
- shifted_bitmask = (bitmask>>(leader%8));
- if (shifted_bitmask&1)
- break;
+ shifted_bitmask = (bitmask>>(leader%8));
+ if (shifted_bitmask&1)
+ break;
}
delay = (leader & 1) ? leader + 1 : leader;
@@ -1848,10 +1856,10 @@ static int is_dll_offset_provided(const int8_t *dll_offset_table)
{
int i;
if (dll_offset_table != NULL) {
- for (i=0; i<9; ++i) {
- if (dll_offset_table[i] != 0)
- return (1);
- }
+ for (i=0; i<9; ++i) {
+ if (dll_offset_table[i] != 0)
+ return (1);
+ }
}
return (0);
}
@@ -1865,27 +1873,27 @@ static int is_dll_offset_provided(const int8_t *dll_offset_table)
#define WITH_FINAL 4
#define WITH_COMPUTE 8
static void do_display_RL(bdk_node_t node, int ddr_interface_num,
- bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank,
- int rank, int flags, int score)
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank,
+ int rank, int flags, int score)
{
char score_buf[16];
if (flags & WITH_SCORE)
- snprintf(score_buf, sizeof(score_buf), "(%d)", score);
+ snprintf(score_buf, sizeof(score_buf), "(%d)", score);
else {
- score_buf[0] = ' '; score_buf[1] = 0;
+ score_buf[0] = ' '; score_buf[1] = 0;
}
- char *msg_buf;
+ const char *msg_buf;
char hex_buf[20];
if (flags & WITH_AVERAGE) {
- msg_buf = " DELAY AVERAGES ";
+ msg_buf = " DELAY AVERAGES ";
} else if (flags & WITH_FINAL) {
- msg_buf = " FINAL SETTINGS ";
+ msg_buf = " FINAL SETTINGS ";
} else if (flags & WITH_COMPUTE) {
- msg_buf = " COMPUTED DELAYS ";
+ msg_buf = " COMPUTED DELAYS ";
} else {
- snprintf(hex_buf, sizeof(hex_buf), "0x%016lX", lmc_rlevel_rank.u);
- msg_buf = hex_buf;
+ snprintf(hex_buf, sizeof(hex_buf), "0x%016lX", lmc_rlevel_rank.u);
+ msg_buf = hex_buf;
}
ddr_print("N%d.LMC%d.R%d: Rlevel Rank %#4x, %s : %5d %5d %5d %5d %5d %5d %5d %5d %5d %s\n",
@@ -1901,7 +1909,7 @@ static void do_display_RL(bdk_node_t node, int ddr_interface_num,
lmc_rlevel_rank.cn83xx.byte2,
lmc_rlevel_rank.cn83xx.byte1,
lmc_rlevel_rank.cn83xx.byte0,
- score_buf
+ score_buf
);
}
@@ -1948,8 +1956,8 @@ display_RL_with_computed(bdk_node_t node, int ddr_interface_num, bdk_lmcx_rlevel
static const char *with_rodt_canned_msgs[4] = { " ", "SKIPPING ", "BEST ROW ", "BEST SCORE" };
static void display_RL_with_RODT(bdk_node_t node, int ddr_interface_num,
- bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank, int rank, int score,
- int nom_ohms, int rodt_ohms, int flag)
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank, int rank, int score,
+ int nom_ohms, int rodt_ohms, int flag)
{
const char *msg_buf;
char set_buf[20];
@@ -1987,15 +1995,15 @@ static void display_RL_with_RODT(bdk_node_t node, int ddr_interface_num,
static void
do_display_WL(bdk_node_t node, int ddr_interface_num, bdk_lmcx_wlevel_rankx_t lmc_wlevel_rank, int rank, int flags)
{
- char *msg_buf;
+ const char *msg_buf;
char hex_buf[20];
int vbl;
if (flags & WITH_FINAL) {
- msg_buf = " FINAL SETTINGS ";
+ msg_buf = " FINAL SETTINGS ";
vbl = VBL_NORM;
} else {
- snprintf(hex_buf, sizeof(hex_buf), "0x%016lX", lmc_wlevel_rank.u);
- msg_buf = hex_buf;
+ snprintf(hex_buf, sizeof(hex_buf), "0x%016lX", lmc_wlevel_rank.u);
+ msg_buf = hex_buf;
vbl = VBL_FAE;
}
@@ -2070,7 +2078,7 @@ do_display_BM(bdk_node_t node, int ddr_interface_num, int rank, void *bm, int fl
} else
if (flags == WITH_RL_BITMASKS) { // rlevel_bitmask array in PACKED index order, so just print them
rlevel_bitmask_t *rlevel_bitmask = (rlevel_bitmask_t *)bm;
- ddr_print("N%d.LMC%d.R%d: Rlevel Debug Bitmasks 8:0 : %05lx %05lx %05lx %05lx %05lx %05lx %05lx %05lx %05lx\n",
+ ddr_print("N%d.LMC%d.R%d: Rlevel Debug Bitmasks 8:0 : %05llx %05llx %05llx %05llx %05llx %05llx %05llx %05llx %05llx\n",
node, ddr_interface_num, rank,
PPBM(rlevel_bitmask[8].bm),
PPBM(rlevel_bitmask[7].bm),
@@ -2140,7 +2148,7 @@ display_RL_SEQ_scores(bdk_node_t node, int ddr_interface_num, int rank, rlevel_b
}
unsigned short load_dll_offset(bdk_node_t node, int ddr_interface_num,
- int dll_offset_mode, int byte_offset, int byte)
+ int dll_offset_mode, int byte_offset, int byte)
{
bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
/* byte_sel:
@@ -2150,17 +2158,17 @@ unsigned short load_dll_offset(bdk_node_t node, int ddr_interface_num,
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
SET_DDR_DLL_CTL3(load_offset, 0);
- DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
SET_DDR_DLL_CTL3(mode_sel, dll_offset_mode);
SET_DDR_DLL_CTL3(offset, (_abs(byte_offset)&0x3f) | (_sign(byte_offset) << 6)); /* Always 6-bit field? */
SET_DDR_DLL_CTL3(byte_sel, byte_sel);
- DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
SET_DDR_DLL_CTL3(load_offset, 1);
- DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
return ((unsigned short) GET_DDR_DLL_CTL3(offset));
@@ -2172,33 +2180,33 @@ void change_dll_offset_enable(bdk_node_t node, int ddr_interface_num, int change
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
SET_DDR_DLL_CTL3(offset_ena, !!change);
- DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
}
static void process_custom_dll_offsets(bdk_node_t node, int ddr_interface_num, const char *enable_str,
- const int8_t *offsets, const char *byte_str, int mode)
+ const int8_t *offsets, const char *byte_str, int mode)
{
const char *s;
int enabled;
int provided;
if ((s = lookup_env_parameter("%s", enable_str)) != NULL) {
- enabled = !!strtol(s, NULL, 0);
+ enabled = !!strtol(s, NULL, 0);
} else
- enabled = -1;
+ enabled = -1;
// enabled == -1: no override, do only configured offsets if provided
// enabled == 0: override OFF, do NOT do it even if configured offsets provided
// enabled == 1: override ON, do it for overrides plus configured offsets
if (enabled == 0)
- return;
+ return;
provided = is_dll_offset_provided(offsets);
if (enabled < 0 && !provided)
- return;
+ return;
int byte_offset;
unsigned short offset[9] = {0};
@@ -2209,27 +2217,27 @@ static void process_custom_dll_offsets(bdk_node_t node, int ddr_interface_num, c
for (byte = 0; byte < 9; ++byte) {
- // always take the provided, if available
- byte_offset = (provided) ? offsets[byte] : 0;
+ // always take the provided, if available
+ byte_offset = (provided) ? offsets[byte] : 0;
- // then, if enabled, use any overrides present
- if (enabled > 0) {
- if ((s = lookup_env_parameter(byte_str, ddr_interface_num, byte)) != NULL) {
+ // then, if enabled, use any overrides present
+ if (enabled > 0) {
+ if ((s = lookup_env_parameter(byte_str, ddr_interface_num, byte)) != NULL) {
byte_offset = strtol(s, NULL, 0);
}
- }
+ }
- offset[byte] = load_dll_offset(node, ddr_interface_num, mode, byte_offset, byte);
+ offset[byte] = load_dll_offset(node, ddr_interface_num, mode, byte_offset, byte);
}
// re-enable offsets after loading
change_dll_offset_enable(node, ddr_interface_num, 1);
ddr_print("N%d.LMC%d: DLL %s Offset 8:0 :"
- " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
- node, ddr_interface_num, (mode == 2) ? "Read " : "Write",
- offset[8], offset[7], offset[6], offset[5], offset[4],
- offset[3], offset[2], offset[1], offset[0]);
+ " 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x 0x%02x\n",
+ node, ddr_interface_num, (mode == 2) ? "Read " : "Write",
+ offset[8], offset[7], offset[6], offset[5], offset[4],
+ offset[3], offset[2], offset[1], offset[0]);
}
void perform_octeon3_ddr3_sequence(bdk_node_t node, int rank_mask, int ddr_interface_num, int sequence)
@@ -2277,18 +2285,18 @@ void perform_octeon3_ddr3_sequence(bdk_node_t node, int rank_mask, int ddr_inter
const char *s;
static const char *sequence_str[] = {
- "Power-up/init",
- "Read-leveling",
- "Self-refresh entry",
- "Self-refresh exit",
- "Illegal",
- "Illegal",
- "Write-leveling",
- "Init Register Control Words",
- "Mode Register Write",
- "MPR Register Access",
- "LMC Deskew/Internal Vref training",
- "Offset Training"
+ "Power-up/init",
+ "Read-leveling",
+ "Self-refresh entry",
+ "Self-refresh exit",
+ "Illegal",
+ "Illegal",
+ "Write-leveling",
+ "Init Register Control Words",
+ "Mode Register Write",
+ "MPR Register Access",
+ "LMC Deskew/Internal Vref training",
+ "Offset Training"
};
bdk_lmcx_seq_ctl_t seq_ctl;
@@ -2307,9 +2315,13 @@ void perform_octeon3_ddr3_sequence(bdk_node_t node, int rank_mask, int ddr_inter
node, ddr_interface_num, sequence, rank_mask, sequence_str[sequence]);
if ((s = lookup_env_parameter("ddr_trigger_sequence%d", sequence)) != NULL) {
- int trigger = strtoul(s, NULL, 0);
- if (trigger)
- pulse_gpio_pin(node, 1, 2);
+ /* FIXME(dhendrix): this appears to be meant for the eval board */
+#if 0
+ int trigger = strtoul(s, NULL, 0);
+ if (trigger)
+ pulse_gpio_pin(node, 1, 2);
+#endif
+ error_print("env parameter ddr_trigger_sequence%d not found\n", sequence);
}
DRAM_CSR_WRITE(node, BDK_LMCX_SEQ_CTL(ddr_interface_num), seq_ctl.u);
@@ -2317,14 +2329,13 @@ void perform_octeon3_ddr3_sequence(bdk_node_t node, int rank_mask, int ddr_inter
/* Wait 100us minimum before checking for sequence complete */
bdk_wait_usec(100);
- if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
- BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_SEQ_CTL(ddr_interface_num), seq_complete, ==, 1, 1000000))
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_SEQ_CTL(ddr_interface_num), seq_complete, ==, 1, 1000000))
{
- error_print("N%d.LMC%d: Timeout waiting for LMC sequence=%x, rank_mask=0x%02x, ignoring...\n",
- node, ddr_interface_num, sequence, rank_mask);
+ error_print("N%d.LMC%d: Timeout waiting for LMC sequence=%x, rank_mask=0x%02x, ignoring...\n",
+ node, ddr_interface_num, sequence, rank_mask);
}
else {
- VB_PRT(VBL_SEQ, "N%d.LMC%d: LMC sequence=%x: Completed.\n", node, ddr_interface_num, sequence);
+ VB_PRT(VBL_SEQ, "N%d.LMC%d: LMC sequence=%x: Completed.\n", node, ddr_interface_num, sequence);
}
}
@@ -2406,7 +2417,7 @@ static void do_ddr4_mpr_read(bdk_node_t node, int ddr_interface_num, int rank,
}
#endif
-int set_rdimm_mode(bdk_node_t node, int ddr_interface_num, int enable)
+static int set_rdimm_mode(bdk_node_t node, int ddr_interface_num, int enable)
{
bdk_lmcx_control_t lmc_control;
int save_rdimm_mode;
@@ -2422,7 +2433,7 @@ int set_rdimm_mode(bdk_node_t node, int ddr_interface_num, int enable)
#if ENABLE_DISPLAY_MPR_PAGE
static void ddr4_mpr_read(bdk_node_t node, int ddr_interface_num, int rank,
- int page, int location, uint64_t *mpr_data)
+ int page, int location, uint64_t *mpr_data)
{
do_ddr4_mpr_read(node, ddr_interface_num, rank, page, location);
@@ -2464,7 +2475,7 @@ static void Display_MPR_Page(bdk_node_t node, int rank_mask,
}
#endif
-void ddr4_mpr_write(bdk_node_t node, int ddr_interface_num, int rank,
+static void ddr4_mpr_write(bdk_node_t node, int ddr_interface_num, int rank,
int page, int location, uint8_t mpr_data)
{
bdk_lmcx_mr_mpr_ctl_t lmc_mr_mpr_ctl;
@@ -2495,7 +2506,7 @@ void ddr4_mpr_write(bdk_node_t node, int ddr_interface_num, int rank,
}
void set_vref(bdk_node_t node, int ddr_interface_num, int rank,
- int range, int value)
+ int range, int value)
{
bdk_lmcx_mr_mpr_ctl_t lmc_mr_mpr_ctl;
bdk_lmcx_modereg_params3_t lmc_modereg_params3;
@@ -2562,7 +2573,7 @@ static void set_DRAM_output_inversion (bdk_node_t node,
lmc_dimm_ctl.s.dimm0_wmask = 0x1;
lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0x0001 : 0x0000;
- ddr_print("LMC DIMM_CTL : 0x%016lx\n",
+ ddr_print("LMC DIMM_CTL : 0x%016llx\n",
lmc_dimm_ctl.u);
DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
@@ -2984,8 +2995,8 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
#if SWL_TRY_HWL_ALT
typedef struct {
- uint16_t hwl_alt_mask; // mask of bytelanes with alternate
- uint16_t hwl_alt_delay[9]; // bytelane alternate avail if mask=1
+ uint16_t hwl_alt_mask; // mask of bytelanes with alternate
+ uint16_t hwl_alt_delay[9]; // bytelane alternate avail if mask=1
} hwl_alt_by_rank_t;
hwl_alt_by_rank_t hwl_alts[4];
memset(hwl_alts, 0, sizeof(hwl_alts));
@@ -3065,12 +3076,12 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
initialize_ddr_clock(node,
- ddr_configuration,
- cpu_hertz,
- ddr_hertz,
- ddr_ref_hertz,
- ddr_interface_num,
- ddr_interface_mask);
+ ddr_configuration,
+ cpu_hertz,
+ ddr_hertz,
+ ddr_ref_hertz,
+ ddr_interface_num,
+ ddr_interface_mask);
if (!odt_1rank_config)
odt_1rank_config = disable_odt_config;
@@ -3136,7 +3147,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
bank_bits = (2 + ((spd_banks >> 4) & 0x3)) + ((spd_banks >> 6) & 0x3);
bank_bits = min((int)bank_bits, 4); /* Controller can only address 4 bits. */
- spd_package = 0XFF & read_spd(node, &dimm_config_table[0], DDR4_SPD_PACKAGE_TYPE);
+ spd_package = 0XFF & read_spd(node, &dimm_config_table[0], DDR4_SPD_PACKAGE_TYPE);
if (spd_package & 0x80) { // non-monolithic device
is_stacked_die = (!disable_stacked_die) ? ((spd_package & 0x73) == 0x11) : 0;
ddr_print("DDR4: Package Type 0x%x (%s), %d die\n", spd_package,
@@ -3174,7 +3185,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
/* no. pkg ranks*/(1UL + ((spd_org >> 3) & 0x7));
if (is_3ds_dimm) // is it 3DS?
module_cap *= /* die_count */(uint64_t)(((spd_package >> 4) & 7) + 1);
- ddr_print("DDR4: Module Organization: SYMMETRICAL: capacity per module %ld GB\n",
+ ddr_print("DDR4: Module Organization: SYMMETRICAL: capacity per module %lld GB\n",
module_cap >> 30);
}
@@ -3189,16 +3200,16 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
spd_dimm_type = spd_module_type & 0x0F;
spd_rdimm = (spd_dimm_type == 1) || (spd_dimm_type == 5) || (spd_dimm_type == 8);
- if (spd_rdimm) {
- int spd_mfgr_id = read_spd(node, &dimm_config_table[0], DDR4_SPD_REGISTER_MANUFACTURER_ID_LSB) |
- (read_spd(node, &dimm_config_table[0], DDR4_SPD_REGISTER_MANUFACTURER_ID_MSB) << 8);
- int spd_register_rev = read_spd(node, &dimm_config_table[0], DDR4_SPD_REGISTER_REVISION_NUMBER);
- ddr_print("DDR4: RDIMM Register Manufacturer ID 0x%x Revision 0x%x\n",
- spd_mfgr_id, spd_register_rev);
+ if (spd_rdimm) {
+ int spd_mfgr_id = read_spd(node, &dimm_config_table[0], DDR4_SPD_REGISTER_MANUFACTURER_ID_LSB) |
+ (read_spd(node, &dimm_config_table[0], DDR4_SPD_REGISTER_MANUFACTURER_ID_MSB) << 8);
+ int spd_register_rev = read_spd(node, &dimm_config_table[0], DDR4_SPD_REGISTER_REVISION_NUMBER);
+ ddr_print("DDR4: RDIMM Register Manufacturer ID 0x%x Revision 0x%x\n",
+ spd_mfgr_id, spd_register_rev);
// RAWCARD A or B must be bit 7=0 and bits 4-0 either 00000(A) or 00001(B)
- spd_rawcard_AorB = ((spd_rawcard & 0x9fUL) <= 1);
- }
+ spd_rawcard_AorB = ((spd_rawcard & 0x9fUL) <= 1);
+ }
} else {
imp_values = &ddr3_impedence_values;
dimm_type_name = ddr3_dimm_types[spd_dimm_type];
@@ -3254,17 +3265,13 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
++fatal_error;
}
- if (bdk_is_platform(BDK_PLATFORM_ASIM))
- wlevel_loops = 0;
- else {
- wlevel_loops = WLEVEL_LOOPS_DEFAULT;
- // accept generic or interface-specific override but not for ASIM...
- if ((s = lookup_env_parameter("ddr_wlevel_loops")) == NULL)
- s = lookup_env_parameter("ddr%d_wlevel_loops", ddr_interface_num);
- if (s != NULL) {
- wlevel_loops = strtoul(s, NULL, 0);
- }
- }
+ wlevel_loops = WLEVEL_LOOPS_DEFAULT;
+ // accept generic or interface-specific override but not for ASIM...
+ if ((s = lookup_env_parameter("ddr_wlevel_loops")) == NULL)
+ s = lookup_env_parameter("ddr%d_wlevel_loops", ddr_interface_num);
+ if (s != NULL) {
+ wlevel_loops = strtoul(s, NULL, 0);
+ }
bunk_enable = (num_ranks > 1);
@@ -3360,13 +3367,13 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
}
if (ddr_type == DDR4_DRAM) {
- spd_cas_latency = ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE0)) << 0);
- spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE1)) << 8);
- spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE2)) << 16);
- spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE3)) << 24);
+ spd_cas_latency = ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE0)) << 0);
+ spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE1)) << 8);
+ spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE2)) << 16);
+ spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR4_SPD_CAS_LATENCIES_BYTE3)) << 24);
} else {
- spd_cas_latency = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_CAS_LATENCIES_LSB);
- spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_CAS_LATENCIES_MSB)) << 8);
+ spd_cas_latency = 0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_CAS_LATENCIES_LSB);
+ spd_cas_latency |= ((0xff & read_spd(node, &dimm_config_table[0], DDR3_SPD_CAS_LATENCIES_MSB)) << 8);
}
debug_print("spd_cas_latency : %#06x\n", spd_cas_latency );
@@ -3380,37 +3387,37 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
int spdFTB = 1;
tAAmin
- = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CAS_LATENCY_TAAMIN)
- + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CAS_LATENCY_FINE_TAAMIN);
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CAS_LATENCY_TAAMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CAS_LATENCY_FINE_TAAMIN);
ddr4_tCKAVGmin
- = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MINIMUM_CYCLE_TIME_TCKAVGMIN)
- + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CYCLE_TIME_FINE_TCKAVGMIN);
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MINIMUM_CYCLE_TIME_TCKAVGMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CYCLE_TIME_FINE_TCKAVGMIN);
ddr4_tCKAVGmax
- = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MAXIMUM_CYCLE_TIME_TCKAVGMAX)
- + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MAX_CYCLE_TIME_FINE_TCKAVGMAX);
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MAXIMUM_CYCLE_TIME_TCKAVGMAX)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MAX_CYCLE_TIME_FINE_TCKAVGMAX);
ddr4_tRCDmin
- = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_RAS_CAS_DELAY_TRCDMIN)
- + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_RAS_TO_CAS_DELAY_FINE_TRCDMIN);
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_RAS_CAS_DELAY_TRCDMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_RAS_TO_CAS_DELAY_FINE_TRCDMIN);
ddr4_tRPmin
- = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_TRPMIN)
- + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_FINE_TRPMIN);
+ = spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_TRPMIN)
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ROW_PRECHARGE_DELAY_FINE_TRPMIN);
ddr4_tRASmin
- = spdMTB * (((read_spd(node, &dimm_config_table[0], DDR4_SPD_UPPER_NIBBLES_TRAS_TRC) & 0xf) << 8) +
- ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACTIVE_PRECHARGE_LSB_TRASMIN) & 0xff));
+ = spdMTB * (((read_spd(node, &dimm_config_table[0], DDR4_SPD_UPPER_NIBBLES_TRAS_TRC) & 0xf) << 8) +
+ ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACTIVE_PRECHARGE_LSB_TRASMIN) & 0xff));
ddr4_tRCmin
- = spdMTB * ((((read_spd(node, &dimm_config_table[0], DDR4_SPD_UPPER_NIBBLES_TRAS_TRC) >> 4) & 0xf) << 8) +
- ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACTIVE_REFRESH_LSB_TRCMIN) & 0xff))
- + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACT_TO_ACT_REFRESH_DELAY_FINE_TRCMIN);
+ = spdMTB * ((((read_spd(node, &dimm_config_table[0], DDR4_SPD_UPPER_NIBBLES_TRAS_TRC) >> 4) & 0xf) << 8) +
+ ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACTIVE_REFRESH_LSB_TRCMIN) & 0xff))
+ + spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_ACT_TO_ACT_REFRESH_DELAY_FINE_TRCMIN);
ddr4_tRFC1min
- = spdMTB * (((read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC1MIN) & 0xff) << 8) +
- ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC1MIN) & 0xff));
+ = spdMTB * (((read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC1MIN) & 0xff) << 8) +
+ ( read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_LSB_TRFC1MIN) & 0xff));
ddr4_tRFC2min
= spdMTB * (((read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_REFRESH_RECOVERY_MSB_TRFC2MIN) & 0xff) << 8) +
@@ -3436,14 +3443,14 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
= spdMTB * read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CAS_TO_CAS_DELAY_TCCD_LMIN)
+ spdFTB * (SC_t) read_spd(node, &dimm_config_table[0], DDR4_SPD_MIN_CAS_TO_CAS_DELAY_FINE_TCCD_LMIN);
- ddr_print("%-45s : %6d ps\n", "Medium Timebase (MTB)", spdMTB);
- ddr_print("%-45s : %6d ps\n", "Fine Timebase (FTB)", spdFTB);
+ ddr_print("%-45s : %6d ps\n", "Medium Timebase (MTB)", spdMTB);
+ ddr_print("%-45s : %6d ps\n", "Fine Timebase (FTB)", spdFTB);
#define DDR4_TWR 15000
#define DDR4_TWTR_S 2500
- tCKmin = ddr4_tCKAVGmin;
+ tCKmin = ddr4_tCKAVGmin;
twr = DDR4_TWR;
trcd = ddr4_tRCDmin;
trrd = ddr4_tRRD_Smin;
@@ -3455,9 +3462,9 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
tfaw = ddr4_tFAWmin;
if (spd_rdimm) {
- spd_addr_mirror = read_spd(node, &dimm_config_table[0], DDR4_SPD_RDIMM_ADDR_MAPPING_FROM_REGISTER_TO_DRAM) & 0x1;
+ spd_addr_mirror = read_spd(node, &dimm_config_table[0], DDR4_SPD_RDIMM_ADDR_MAPPING_FROM_REGISTER_TO_DRAM) & 0x1;
} else {
- spd_addr_mirror = read_spd(node, &dimm_config_table[0], DDR4_SPD_UDIMM_ADDR_MAPPING_FROM_EDGE) & 0x1;
+ spd_addr_mirror = read_spd(node, &dimm_config_table[0], DDR4_SPD_UDIMM_ADDR_MAPPING_FROM_EDGE) & 0x1;
}
debug_print("spd_addr_mirror : %#06x\n", spd_addr_mirror );
@@ -3535,20 +3542,20 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
ddr_print("%-45s : %6d ps\n", "Minimum Act. to Act. Delay (tRRD_Lmin)", ddr4_tRRD_Lmin);
ddr_print("%-45s : %6d ps\n", "Minimum CAS to CAS Delay Time (tCCD_Lmin)", ddr4_tCCD_Lmin);
} else {
- ddr_print("Medium Timebase (MTB) : %6d ps\n", mtb_psec);
- ddr_print("Minimum Cycle Time (tCKmin) : %6d ps (%ld MT/s)\n", tCKmin,
+ ddr_print("Medium Timebase (MTB) : %6d ps\n", mtb_psec);
+ ddr_print("Minimum Cycle Time (tCKmin) : %6d ps (%ld MT/s)\n", tCKmin,
pretty_psecs_to_mts(tCKmin));
- ddr_print("Minimum CAS Latency Time (tAAmin) : %6d ps\n", tAAmin);
- ddr_print("Write Recovery Time (tWR) : %6d ps\n", twr);
- ddr_print("Minimum RAS to CAS delay (tRCD) : %6d ps\n", trcd);
- ddr_print("Minimum Row Active to Row Active delay (tRRD) : %6d ps\n", trrd);
- ddr_print("Minimum Row Precharge Delay (tRP) : %6d ps\n", trp);
- ddr_print("Minimum Active to Precharge (tRAS) : %6d ps\n", tras);
- ddr_print("Minimum Active to Active/Refresh Delay (tRC) : %6d ps\n", trc);
- ddr_print("Minimum Refresh Recovery Delay (tRFC) : %6d ps\n", trfc);
- ddr_print("Internal write to read command delay (tWTR) : %6d ps\n", twtr);
- ddr_print("Min Internal Rd to Precharge Cmd Delay (tRTP) : %6d ps\n", trtp);
- ddr_print("Minimum Four Activate Window Delay (tFAW) : %6d ps\n", tfaw);
+ ddr_print("Minimum CAS Latency Time (tAAmin) : %6d ps\n", tAAmin);
+ ddr_print("Write Recovery Time (tWR) : %6d ps\n", twr);
+ ddr_print("Minimum RAS to CAS delay (tRCD) : %6d ps\n", trcd);
+ ddr_print("Minimum Row Active to Row Active delay (tRRD) : %6d ps\n", trrd);
+ ddr_print("Minimum Row Precharge Delay (tRP) : %6d ps\n", trp);
+ ddr_print("Minimum Active to Precharge (tRAS) : %6d ps\n", tras);
+ ddr_print("Minimum Active to Active/Refresh Delay (tRC) : %6d ps\n", trc);
+ ddr_print("Minimum Refresh Recovery Delay (tRFC) : %6d ps\n", trfc);
+ ddr_print("Internal write to read command delay (tWTR) : %6d ps\n", twtr);
+ ddr_print("Min Internal Rd to Precharge Cmd Delay (tRTP) : %6d ps\n", trtp);
+ ddr_print("Minimum Four Activate Window Delay (tFAW) : %6d ps\n", tfaw);
}
@@ -3559,13 +3566,13 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
tclk_psecs = tCKmin;
if (tclk_psecs < (uint64_t)tCKmin) {
- ddr_print("WARNING!!!!: DDR Clock Rate (tCLK: %ld) exceeds DIMM specifications (tCKmin: %ld)!!!!\n",
+ ddr_print("WARNING!!!!: DDR Clock Rate (tCLK: %lld) exceeds DIMM specifications (tCKmin: %lld)!!!!\n",
tclk_psecs, (uint64_t)tCKmin);
}
- ddr_print("DDR Clock Rate (tCLK) : %6lu ps\n", tclk_psecs);
- ddr_print("Core Clock Rate (eCLK) : %6lu ps\n", eclk_psecs);
+ ddr_print("DDR Clock Rate (tCLK) : %6llu ps\n", tclk_psecs);
+ ddr_print("Core Clock Rate (eCLK) : %6llu ps\n", eclk_psecs);
if ((s = lookup_env_parameter("ddr_use_ecc")) != NULL) {
use_ecc = !!strtoul(s, NULL, 0);
@@ -3633,7 +3640,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if (CL < min_cas_latency) {
uint64_t adjusted_tclk = tAAmin / min_cas_latency;
CL = min_cas_latency;
- ddr_print("Slow clock speed. Adjusting timing: tClk = %lu, Adjusted tClk = %ld\n",
+ ddr_print("Slow clock speed. Adjusting timing: tClk = %llu, Adjusted tClk = %lld\n",
tclk_psecs, adjusted_tclk);
tclk_psecs = adjusted_tclk;
}
@@ -3832,12 +3839,12 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
}
VB_PRT(VBL_DEV, "%-45s : %d\n", "MODE32B (init)", lmc_config.s.mode32b);
lmc_config.s.mode_x4dev = (dram_width == 4) ? 1 : 0;
- lmc_config.s.bg2_enable = ((ddr_type == DDR4_DRAM) && (dram_width == 16)) ? 0 : 1;
+ lmc_config.s.bg2_enable = ((ddr_type == DDR4_DRAM) && (dram_width == 16)) ? 0 : 1;
if ((s = lookup_env_parameter_ull("ddr_config")) != NULL) {
lmc_config.u = strtoull(s, NULL, 0);
}
- ddr_print("LMC_CONFIG : 0x%016lx\n", lmc_config.u);
+ ddr_print("LMC_CONFIG : 0x%016llx\n", lmc_config.u);
DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
}
@@ -3905,7 +3912,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if ((s = lookup_env_parameter_ull("ddr_control")) != NULL) {
lmc_control.u = strtoull(s, NULL, 0);
}
- ddr_print("LMC_CONTROL : 0x%016lx\n", lmc_control.u);
+ ddr_print("LMC_CONTROL : 0x%016llx\n", lmc_control.u);
DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
}
@@ -3934,23 +3941,23 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if (ddr_type == DDR4_DRAM) {
lmc_timing_params0.s.tzqcs = divide_roundup(128*tclk_psecs, (16*tclk_psecs)); /* Always 8. */
- lmc_timing_params0.s.tcke = divide_roundup(max(3*tclk_psecs, (uint64_t) DDR3_tCKE), tclk_psecs) - 1;
- lmc_timing_params0.s.tmrd = divide_roundup((DDR4_tMRD*tclk_psecs), tclk_psecs) - 1;
- //lmc_timing_params0.s.tmod = divide_roundup(max(24*tclk_psecs, 15000ull), tclk_psecs) - 1;
- lmc_timing_params0.s.tmod = 25; /* 25 is the max allowed */
- lmc_timing_params0.s.tdllk = divide_roundup(DDR4_tDLLK, 256);
+ lmc_timing_params0.s.tcke = divide_roundup(max(3*tclk_psecs, (uint64_t) DDR3_tCKE), tclk_psecs) - 1;
+ lmc_timing_params0.s.tmrd = divide_roundup((DDR4_tMRD*tclk_psecs), tclk_psecs) - 1;
+ //lmc_timing_params0.s.tmod = divide_roundup(max(24*tclk_psecs, 15000ull), tclk_psecs) - 1;
+ lmc_timing_params0.s.tmod = 25; /* 25 is the max allowed */
+ lmc_timing_params0.s.tdllk = divide_roundup(DDR4_tDLLK, 256);
} else {
lmc_timing_params0.s.tzqcs = divide_roundup(max(64*tclk_psecs, DDR3_ZQCS), (16*tclk_psecs));
- lmc_timing_params0.s.tcke = divide_roundup(DDR3_tCKE, tclk_psecs) - 1;
- lmc_timing_params0.s.tmrd = divide_roundup((DDR3_tMRD*tclk_psecs), tclk_psecs) - 1;
- lmc_timing_params0.s.tmod = divide_roundup(max(12*tclk_psecs, 15000ull), tclk_psecs) - 1;
- lmc_timing_params0.s.tdllk = divide_roundup(DDR3_tDLLK, 256);
+ lmc_timing_params0.s.tcke = divide_roundup(DDR3_tCKE, tclk_psecs) - 1;
+ lmc_timing_params0.s.tmrd = divide_roundup((DDR3_tMRD*tclk_psecs), tclk_psecs) - 1;
+ lmc_timing_params0.s.tmod = divide_roundup(max(12*tclk_psecs, 15000ull), tclk_psecs) - 1;
+ lmc_timing_params0.s.tdllk = divide_roundup(DDR3_tDLLK, 256);
}
if ((s = lookup_env_parameter_ull("ddr_timing_params0")) != NULL) {
lmc_timing_params0.u = strtoull(s, NULL, 0);
}
- ddr_print("TIMING_PARAMS0 : 0x%016lx\n", lmc_timing_params0.u);
+ ddr_print("TIMING_PARAMS0 : 0x%016llx\n", lmc_timing_params0.u);
DRAM_CSR_WRITE(node, BDK_LMCX_TIMING_PARAMS0(ddr_interface_num), lmc_timing_params0.u);
}
@@ -3964,26 +3971,26 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_timing_params1.s.tras = divide_roundup(tras, tclk_psecs) - 1;
- // NOTE: this is reworked for pass 2.x
- temp_trcd = divide_roundup(trcd, tclk_psecs);
+ // NOTE: this is reworked for pass 2.x
+ temp_trcd = divide_roundup(trcd, tclk_psecs);
#if 1
if (temp_trcd > 15)
ddr_print("TIMING_PARAMS1[trcd]: need extension bit for 0x%x\n", temp_trcd);
#endif
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (temp_trcd > 15)) {
- /* Let .trcd=0 serve as a flag that the field has
- overflowed. Must use Additive Latency mode as a
- workaround. */
- temp_trcd = 0;
- }
- lmc_timing_params1.s.trcd = temp_trcd & 0x0f;
- lmc_timing_params1.s.trcd_ext = (temp_trcd >> 4) & 1;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (temp_trcd > 15)) {
+ /* Let .trcd=0 serve as a flag that the field has
+ overflowed. Must use Additive Latency mode as a
+ workaround. */
+ temp_trcd = 0;
+ }
+ lmc_timing_params1.s.trcd = temp_trcd & 0x0f;
+ lmc_timing_params1.s.trcd_ext = (temp_trcd >> 4) & 1;
lmc_timing_params1.s.twtr = divide_roundup(twtr, tclk_psecs) - 1;
lmc_timing_params1.s.trfc = divide_roundup(trfc, 8*tclk_psecs);
- // workaround needed for all THUNDER chips thru T88 Pass 2.0,
- // but not 81xx and 83xx...
+ // workaround needed for all THUNDER chips thru T88 Pass 2.0,
+ // but not 81xx and 83xx...
if ((ddr_type == DDR4_DRAM) && CAVIUM_IS_MODEL(CAVIUM_CN88XX)) {
/* Workaround bug 24006. Use Trrd_l. */
lmc_timing_params1.s.trrd = divide_roundup(ddr4_tRRD_Lmin, tclk_psecs) - 2;
@@ -3999,17 +4006,17 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
** tXP = max( 3nCK, 6.0 ns) DDR3-2133 tCLK = 937 psec
*/
txp = (tclk_psecs < 1875) ? 6000 : 7500;
- // NOTE: this is reworked for pass 2.x
- int temp_txp = divide_roundup(max(3*tclk_psecs, (unsigned)txp), tclk_psecs) - 1;
+ // NOTE: this is reworked for pass 2.x
+ int temp_txp = divide_roundup(max(3*tclk_psecs, (unsigned)txp), tclk_psecs) - 1;
#if 1
if (temp_txp > 7)
ddr_print("TIMING_PARAMS1[txp]: need extension bit for 0x%x\n", temp_txp);
#endif
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (temp_txp > 7)) {
- temp_txp = 7; // max it out
- }
- lmc_timing_params1.s.txp = temp_txp & 7;
- lmc_timing_params1.s.txp_ext = (temp_txp >> 3) & 1;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (temp_txp > 7)) {
+ temp_txp = 7; // max it out
+ }
+ lmc_timing_params1.s.txp = temp_txp & 7;
+ lmc_timing_params1.s.txp_ext = (temp_txp >> 3) & 1;
lmc_timing_params1.s.twlmrd = divide_roundup(DDR3_tWLMRD*tclk_psecs, 4*tclk_psecs);
lmc_timing_params1.s.twldqsen = divide_roundup(DDR3_tWLDQSEN*tclk_psecs, 4*tclk_psecs);
@@ -4045,7 +4052,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if ((s = lookup_env_parameter_ull("ddr_timing_params1")) != NULL) {
lmc_timing_params1.u = strtoull(s, NULL, 0);
}
- ddr_print("TIMING_PARAMS1 : 0x%016lx\n", lmc_timing_params1.u);
+ ddr_print("TIMING_PARAMS1 : 0x%016llx\n", lmc_timing_params1.u);
DRAM_CSR_WRITE(node, BDK_LMCX_TIMING_PARAMS1(ddr_interface_num), lmc_timing_params1.u);
}
@@ -4055,34 +4062,34 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
bdk_lmcx_timing_params2_t lmc_timing_params2;
lmc_timing_params1.u = BDK_CSR_READ(node, BDK_LMCX_TIMING_PARAMS1(ddr_interface_num));
lmc_timing_params2.u = BDK_CSR_READ(node, BDK_LMCX_TIMING_PARAMS2(ddr_interface_num));
- ddr_print("TIMING_PARAMS2 : 0x%016lx\n", lmc_timing_params2.u);
+ ddr_print("TIMING_PARAMS2 : 0x%016llx\n", lmc_timing_params2.u);
//lmc_timing_params2.s.trrd_l = divide_roundup(ddr4_tRRD_Lmin, tclk_psecs) - 1;
- // NOTE: this is reworked for pass 2.x
- int temp_trrd_l = divide_roundup(ddr4_tRRD_Lmin, tclk_psecs) - 2;
+ // NOTE: this is reworked for pass 2.x
+ int temp_trrd_l = divide_roundup(ddr4_tRRD_Lmin, tclk_psecs) - 2;
#if 1
if (temp_trrd_l > 7)
ddr_print("TIMING_PARAMS2[trrd_l]: need extension bit for 0x%x\n", temp_trrd_l);
#endif
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (temp_trrd_l > 7)) {
- temp_trrd_l = 7; // max it out
- }
- lmc_timing_params2.s.trrd_l = temp_trrd_l & 7;
- lmc_timing_params2.s.trrd_l_ext = (temp_trrd_l >> 3) & 1;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && (temp_trrd_l > 7)) {
+ temp_trrd_l = 7; // max it out
+ }
+ lmc_timing_params2.s.trrd_l = temp_trrd_l & 7;
+ lmc_timing_params2.s.trrd_l_ext = (temp_trrd_l >> 3) & 1;
lmc_timing_params2.s.twtr_l = divide_nint(max(4*tclk_psecs, 7500ull), tclk_psecs) - 1; // correct for 1600-2400
lmc_timing_params2.s.t_rw_op_max = 7;
lmc_timing_params2.s.trtp = divide_roundup(max(4*tclk_psecs, 7500ull), tclk_psecs) - 1;
- ddr_print("TIMING_PARAMS2 : 0x%016lx\n", lmc_timing_params2.u);
+ ddr_print("TIMING_PARAMS2 : 0x%016llx\n", lmc_timing_params2.u);
DRAM_CSR_WRITE(node, BDK_LMCX_TIMING_PARAMS2(ddr_interface_num), lmc_timing_params2.u);
/* Workaround Errata 25823 - LMC: Possible DDR4 tWTR_L not met
for Write-to-Read operations to the same Bank Group */
if (lmc_timing_params1.s.twtr < (lmc_timing_params2.s.twtr_l - 4)) {
lmc_timing_params1.s.twtr = lmc_timing_params2.s.twtr_l - 4;
- ddr_print("ERRATA 25823: NEW: TWTR: %d, TWTR_L: %d\n", lmc_timing_params1.s.twtr, lmc_timing_params2.s.twtr_l);
- ddr_print("TIMING_PARAMS1 : 0x%016lx\n", lmc_timing_params1.u);
+ ddr_print("ERRATA 25823: NEW: TWTR: %d, TWTR_L: %d\n", lmc_timing_params1.s.twtr, lmc_timing_params2.s.twtr_l);
+ ddr_print("TIMING_PARAMS1 : 0x%016llx\n", lmc_timing_params1.u);
DRAM_CSR_WRITE(node, BDK_LMCX_TIMING_PARAMS1(ddr_interface_num), lmc_timing_params1.u);
}
}
@@ -4095,17 +4102,17 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
if (ddr_type == DDR4_DRAM) {
- lmc_modereg_params0.s.cwl = 0; /* 1600 (1250ps) */
- if (tclk_psecs < 1250)
- lmc_modereg_params0.s.cwl = 1; /* 1866 (1072ps) */
- if (tclk_psecs < 1072)
- lmc_modereg_params0.s.cwl = 2; /* 2133 (938ps) */
- if (tclk_psecs < 938)
- lmc_modereg_params0.s.cwl = 3; /* 2400 (833ps) */
- if (tclk_psecs < 833)
- lmc_modereg_params0.s.cwl = 4; /* 2666 (750ps) */
- if (tclk_psecs < 750)
- lmc_modereg_params0.s.cwl = 5; /* 3200 (625ps) */
+ lmc_modereg_params0.s.cwl = 0; /* 1600 (1250ps) */
+ if (tclk_psecs < 1250)
+ lmc_modereg_params0.s.cwl = 1; /* 1866 (1072ps) */
+ if (tclk_psecs < 1072)
+ lmc_modereg_params0.s.cwl = 2; /* 2133 (938ps) */
+ if (tclk_psecs < 938)
+ lmc_modereg_params0.s.cwl = 3; /* 2400 (833ps) */
+ if (tclk_psecs < 833)
+ lmc_modereg_params0.s.cwl = 4; /* 2666 (750ps) */
+ if (tclk_psecs < 750)
+ lmc_modereg_params0.s.cwl = 5; /* 3200 (625ps) */
} else {
/*
** CSR CWL CAS write Latency
@@ -4135,7 +4142,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_modereg_params0.s.cwl = 6;
if (tclk_psecs < 833)
lmc_modereg_params0.s.cwl = 7;
- }
+ }
if ((s = lookup_env_parameter("ddr_cwl")) != NULL) {
lmc_modereg_params0.s.cwl = strtoul(s, NULL, 0) - 5;
@@ -4167,54 +4174,54 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
}
if (ddr_type == DDR4_DRAM) {
- lmc_modereg_params0.s.cl = 0x0;
- if (CL > 9)
- lmc_modereg_params0.s.cl = 0x1;
- if (CL > 10)
- lmc_modereg_params0.s.cl = 0x2;
- if (CL > 11)
- lmc_modereg_params0.s.cl = 0x3;
- if (CL > 12)
- lmc_modereg_params0.s.cl = 0x4;
- if (CL > 13)
- lmc_modereg_params0.s.cl = 0x5;
- if (CL > 14)
- lmc_modereg_params0.s.cl = 0x6;
- if (CL > 15)
- lmc_modereg_params0.s.cl = 0x7;
- if (CL > 16)
- lmc_modereg_params0.s.cl = 0x8;
- if (CL > 18)
- lmc_modereg_params0.s.cl = 0x9;
- if (CL > 20)
- lmc_modereg_params0.s.cl = 0xA;
- if (CL > 24)
- lmc_modereg_params0.s.cl = 0xB;
+ lmc_modereg_params0.s.cl = 0x0;
+ if (CL > 9)
+ lmc_modereg_params0.s.cl = 0x1;
+ if (CL > 10)
+ lmc_modereg_params0.s.cl = 0x2;
+ if (CL > 11)
+ lmc_modereg_params0.s.cl = 0x3;
+ if (CL > 12)
+ lmc_modereg_params0.s.cl = 0x4;
+ if (CL > 13)
+ lmc_modereg_params0.s.cl = 0x5;
+ if (CL > 14)
+ lmc_modereg_params0.s.cl = 0x6;
+ if (CL > 15)
+ lmc_modereg_params0.s.cl = 0x7;
+ if (CL > 16)
+ lmc_modereg_params0.s.cl = 0x8;
+ if (CL > 18)
+ lmc_modereg_params0.s.cl = 0x9;
+ if (CL > 20)
+ lmc_modereg_params0.s.cl = 0xA;
+ if (CL > 24)
+ lmc_modereg_params0.s.cl = 0xB;
} else {
- lmc_modereg_params0.s.cl = 0x2;
- if (CL > 5)
- lmc_modereg_params0.s.cl = 0x4;
- if (CL > 6)
- lmc_modereg_params0.s.cl = 0x6;
- if (CL > 7)
- lmc_modereg_params0.s.cl = 0x8;
- if (CL > 8)
- lmc_modereg_params0.s.cl = 0xA;
- if (CL > 9)
- lmc_modereg_params0.s.cl = 0xC;
- if (CL > 10)
- lmc_modereg_params0.s.cl = 0xE;
- if (CL > 11)
- lmc_modereg_params0.s.cl = 0x1;
- if (CL > 12)
- lmc_modereg_params0.s.cl = 0x3;
- if (CL > 13)
- lmc_modereg_params0.s.cl = 0x5;
- if (CL > 14)
- lmc_modereg_params0.s.cl = 0x7;
- if (CL > 15)
- lmc_modereg_params0.s.cl = 0x9;
- }
+ lmc_modereg_params0.s.cl = 0x2;
+ if (CL > 5)
+ lmc_modereg_params0.s.cl = 0x4;
+ if (CL > 6)
+ lmc_modereg_params0.s.cl = 0x6;
+ if (CL > 7)
+ lmc_modereg_params0.s.cl = 0x8;
+ if (CL > 8)
+ lmc_modereg_params0.s.cl = 0xA;
+ if (CL > 9)
+ lmc_modereg_params0.s.cl = 0xC;
+ if (CL > 10)
+ lmc_modereg_params0.s.cl = 0xE;
+ if (CL > 11)
+ lmc_modereg_params0.s.cl = 0x1;
+ if (CL > 12)
+ lmc_modereg_params0.s.cl = 0x3;
+ if (CL > 13)
+ lmc_modereg_params0.s.cl = 0x5;
+ if (CL > 14)
+ lmc_modereg_params0.s.cl = 0x7;
+ if (CL > 15)
+ lmc_modereg_params0.s.cl = 0x9;
+ }
lmc_modereg_params0.s.rbt = 0; /* Read Only. */
lmc_modereg_params0.s.tm = 0;
@@ -4223,34 +4230,34 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
param = divide_roundup(twr, tclk_psecs);
if (ddr_type == DDR4_DRAM) { /* DDR4 */
- lmc_modereg_params0.s.wrp = 1;
- if (param > 12)
- lmc_modereg_params0.s.wrp = 2;
- if (param > 14)
- lmc_modereg_params0.s.wrp = 3;
- if (param > 16)
- lmc_modereg_params0.s.wrp = 4;
- if (param > 18)
- lmc_modereg_params0.s.wrp = 5;
- if (param > 20)
- lmc_modereg_params0.s.wrp = 6;
- if (param > 24) /* RESERVED in DDR4 spec */
- lmc_modereg_params0.s.wrp = 7;
+ lmc_modereg_params0.s.wrp = 1;
+ if (param > 12)
+ lmc_modereg_params0.s.wrp = 2;
+ if (param > 14)
+ lmc_modereg_params0.s.wrp = 3;
+ if (param > 16)
+ lmc_modereg_params0.s.wrp = 4;
+ if (param > 18)
+ lmc_modereg_params0.s.wrp = 5;
+ if (param > 20)
+ lmc_modereg_params0.s.wrp = 6;
+ if (param > 24) /* RESERVED in DDR4 spec */
+ lmc_modereg_params0.s.wrp = 7;
} else { /* DDR3 */
- lmc_modereg_params0.s.wrp = 1;
- if (param > 5)
- lmc_modereg_params0.s.wrp = 2;
- if (param > 6)
- lmc_modereg_params0.s.wrp = 3;
- if (param > 7)
- lmc_modereg_params0.s.wrp = 4;
- if (param > 8)
- lmc_modereg_params0.s.wrp = 5;
- if (param > 10)
- lmc_modereg_params0.s.wrp = 6;
- if (param > 12)
- lmc_modereg_params0.s.wrp = 7;
- }
+ lmc_modereg_params0.s.wrp = 1;
+ if (param > 5)
+ lmc_modereg_params0.s.wrp = 2;
+ if (param > 6)
+ lmc_modereg_params0.s.wrp = 3;
+ if (param > 7)
+ lmc_modereg_params0.s.wrp = 4;
+ if (param > 8)
+ lmc_modereg_params0.s.wrp = 5;
+ if (param > 10)
+ lmc_modereg_params0.s.wrp = 6;
+ if (param > 12)
+ lmc_modereg_params0.s.wrp = 7;
+ }
lmc_modereg_params0.s.ppd = 0;
@@ -4264,7 +4271,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if ((s = lookup_env_parameter_ull("ddr_modereg_params0")) != NULL) {
lmc_modereg_params0.u = strtoull(s, NULL, 0);
}
- ddr_print("MODEREG_PARAMS0 : 0x%016lx\n", lmc_modereg_params0.u);
+ ddr_print("MODEREG_PARAMS0 : 0x%016llx\n", lmc_modereg_params0.u);
DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
}
@@ -4408,61 +4415,61 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_modereg_params1.s.dic_01,
lmc_modereg_params1.s.dic_00);
- ddr_print("MODEREG_PARAMS1 : 0x%016lx\n", lmc_modereg_params1.u);
+ ddr_print("MODEREG_PARAMS1 : 0x%016llx\n", lmc_modereg_params1.u);
DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num), lmc_modereg_params1.u);
} /* LMC(0)_MODEREG_PARAMS1 */
/* LMC(0)_MODEREG_PARAMS2 */
if (ddr_type == DDR4_DRAM) {
- bdk_lmcx_modereg_params2_t lmc_modereg_params2;
- lmc_modereg_params2.u = odt_config[odt_idx].odt_mask2.u;
-
- for (i=0; i<4; ++i) {
- uint64_t value;
- if ((s = lookup_env_parameter("ddr_rtt_park_%1d%1d", !!(i&2), !!(i&1))) != NULL) {
- value = strtoul(s, NULL, 0);
- lmc_modereg_params2.u &= ~((uint64_t)0x7 << (i*10+0));
- lmc_modereg_params2.u |= ( (value & 0x7) << (i*10+0));
- }
- }
-
- if ((s = lookup_env_parameter("ddr_rtt_park")) != NULL) {
- uint64_t value = strtoul(s, NULL, 0);
- for (i=0; i<4; ++i) {
- lmc_modereg_params2.u &= ~((uint64_t)0x7 << (i*10+0));
- lmc_modereg_params2.u |= ( (value & 0x7) << (i*10+0));
- }
- }
-
- if ((s = lookup_env_parameter_ull("ddr_modereg_params2")) != NULL) {
- lmc_modereg_params2.u = strtoull(s, NULL, 0);
- }
-
- ddr_print("RTT_PARK %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
- imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_11],
- imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_10],
- imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_01],
- imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_00],
- lmc_modereg_params2.s.rtt_park_11,
- lmc_modereg_params2.s.rtt_park_10,
- lmc_modereg_params2.s.rtt_park_01,
- lmc_modereg_params2.s.rtt_park_00);
-
- ddr_print("%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_RANGE",
- lmc_modereg_params2.s.vref_range_11,
- lmc_modereg_params2.s.vref_range_10,
- lmc_modereg_params2.s.vref_range_01,
- lmc_modereg_params2.s.vref_range_00);
-
- ddr_print("%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_VALUE",
- lmc_modereg_params2.s.vref_value_11,
- lmc_modereg_params2.s.vref_value_10,
- lmc_modereg_params2.s.vref_value_01,
- lmc_modereg_params2.s.vref_value_00);
-
- ddr_print("MODEREG_PARAMS2 : 0x%016lx\n", lmc_modereg_params2.u);
- DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS2(ddr_interface_num), lmc_modereg_params2.u);
+ bdk_lmcx_modereg_params2_t lmc_modereg_params2;
+ lmc_modereg_params2.u = odt_config[odt_idx].odt_mask2.u;
+
+ for (i=0; i<4; ++i) {
+ uint64_t value;
+ if ((s = lookup_env_parameter("ddr_rtt_park_%1d%1d", !!(i&2), !!(i&1))) != NULL) {
+ value = strtoul(s, NULL, 0);
+ lmc_modereg_params2.u &= ~((uint64_t)0x7 << (i*10+0));
+ lmc_modereg_params2.u |= ( (value & 0x7) << (i*10+0));
+ }
+ }
+
+ if ((s = lookup_env_parameter("ddr_rtt_park")) != NULL) {
+ uint64_t value = strtoul(s, NULL, 0);
+ for (i=0; i<4; ++i) {
+ lmc_modereg_params2.u &= ~((uint64_t)0x7 << (i*10+0));
+ lmc_modereg_params2.u |= ( (value & 0x7) << (i*10+0));
+ }
+ }
+
+ if ((s = lookup_env_parameter_ull("ddr_modereg_params2")) != NULL) {
+ lmc_modereg_params2.u = strtoull(s, NULL, 0);
+ }
+
+ ddr_print("RTT_PARK %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_11],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_10],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_01],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_00],
+ lmc_modereg_params2.s.rtt_park_11,
+ lmc_modereg_params2.s.rtt_park_10,
+ lmc_modereg_params2.s.rtt_park_01,
+ lmc_modereg_params2.s.rtt_park_00);
+
+ ddr_print("%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_RANGE",
+ lmc_modereg_params2.s.vref_range_11,
+ lmc_modereg_params2.s.vref_range_10,
+ lmc_modereg_params2.s.vref_range_01,
+ lmc_modereg_params2.s.vref_range_00);
+
+ ddr_print("%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_VALUE",
+ lmc_modereg_params2.s.vref_value_11,
+ lmc_modereg_params2.s.vref_value_10,
+ lmc_modereg_params2.s.vref_value_01,
+ lmc_modereg_params2.s.vref_value_00);
+
+ ddr_print("MODEREG_PARAMS2 : 0x%016llx\n", lmc_modereg_params2.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS2(ddr_interface_num), lmc_modereg_params2.u);
} /* LMC(0)_MODEREG_PARAMS2 */
@@ -4503,7 +4510,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_modereg_params3.s.xrank_add_tccd_s = delay;
}
- ddr_print("MODEREG_PARAMS3 : 0x%016lx\n", lmc_modereg_params3.u);
+ ddr_print("MODEREG_PARAMS3 : 0x%016llx\n", lmc_modereg_params3.u);
DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS3(ddr_interface_num), lmc_modereg_params3.u);
} /* LMC(0)_MODEREG_PARAMS3 */
@@ -4527,7 +4534,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if ((s = lookup_env_parameter_ull("ddr_nxm")) != NULL) {
lmc_nxm.u = strtoull(s, NULL, 0);
}
- ddr_print("LMC_NXM : 0x%016lx\n", lmc_nxm.u);
+ ddr_print("LMC_NXM : 0x%016llx\n", lmc_nxm.u);
DRAM_CSR_WRITE(node, BDK_LMCX_NXM(ddr_interface_num), lmc_nxm.u);
}
@@ -4540,7 +4547,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_wodt_mask.u = strtoull(s, NULL, 0);
}
- ddr_print("WODT_MASK : 0x%016lx\n", lmc_wodt_mask.u);
+ ddr_print("WODT_MASK : 0x%016llx\n", lmc_wodt_mask.u);
DRAM_CSR_WRITE(node, BDK_LMCX_WODT_MASK(ddr_interface_num), lmc_wodt_mask.u);
}
@@ -4554,8 +4561,8 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_rodt_mask.u = strtoull(s, NULL, 0);
}
- ddr_print("%-45s : 0x%016lx\n", "RODT_MASK", lmc_rodt_mask.u);
- DRAM_CSR_WRITE(node, BDK_LMCX_RODT_MASK(ddr_interface_num), lmc_rodt_mask.u);
+ ddr_print("%-45s : 0x%016llx\n", "RODT_MASK", lmc_rodt_mask.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_RODT_MASK(ddr_interface_num), lmc_rodt_mask.u);
dyn_rtt_nom_mask = 0;
for (rankx = 0; rankx < dimm_count * 4;rankx++) {
@@ -4583,12 +4590,12 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
- comp_ctl2.s.dqx_ctl = odt_config[odt_idx].odt_ena;
- comp_ctl2.s.ck_ctl = (custom_lmc_config->ck_ctl == 0) ? 4 : custom_lmc_config->ck_ctl; /* Default 4=34.3 ohm */
- comp_ctl2.s.cmd_ctl = (custom_lmc_config->cmd_ctl == 0) ? 4 : custom_lmc_config->cmd_ctl; /* Default 4=34.3 ohm */
- comp_ctl2.s.control_ctl = (custom_lmc_config->ctl_ctl == 0) ? 4 : custom_lmc_config->ctl_ctl; /* Default 4=34.3 ohm */
+ comp_ctl2.s.dqx_ctl = odt_config[odt_idx].odt_ena;
+ comp_ctl2.s.ck_ctl = (custom_lmc_config->ck_ctl == 0) ? 4 : custom_lmc_config->ck_ctl; /* Default 4=34.3 ohm */
+ comp_ctl2.s.cmd_ctl = (custom_lmc_config->cmd_ctl == 0) ? 4 : custom_lmc_config->cmd_ctl; /* Default 4=34.3 ohm */
+ comp_ctl2.s.control_ctl = (custom_lmc_config->ctl_ctl == 0) ? 4 : custom_lmc_config->ctl_ctl; /* Default 4=34.3 ohm */
- // NOTE: these are now done earlier, in Step 6.9.3
+ // NOTE: these are now done earlier, in Step 6.9.3
// comp_ctl2.s.ntune_offset = 0;
// comp_ctl2.s.ptune_offset = 0;
@@ -4607,12 +4614,12 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
comp_ctl2.s.rodt_ctl = default_rodt_ctl;
- // if DDR4, force CK_CTL to 26 ohms if it is currently 34 ohms, and DCLK speed is 1 GHz or more...
- if ((ddr_type == DDR4_DRAM) && (comp_ctl2.s.ck_ctl == ddr4_driver_34_ohm) && (ddr_hertz >= 1000000000)) {
- comp_ctl2.s.ck_ctl = ddr4_driver_26_ohm; // lowest for DDR4 is 26 ohms
- ddr_print("Forcing DDR4 COMP_CTL2[CK_CTL] to %d, %d ohms\n", comp_ctl2.s.ck_ctl,
- imp_values->drive_strength[comp_ctl2.s.ck_ctl]);
- }
+ // if DDR4, force CK_CTL to 26 ohms if it is currently 34 ohms, and DCLK speed is 1 GHz or more...
+ if ((ddr_type == DDR4_DRAM) && (comp_ctl2.s.ck_ctl == ddr4_driver_34_ohm) && (ddr_hertz >= 1000000000)) {
+ comp_ctl2.s.ck_ctl = ddr4_driver_26_ohm; // lowest for DDR4 is 26 ohms
+ ddr_print("Forcing DDR4 COMP_CTL2[CK_CTL] to %d, %d ohms\n", comp_ctl2.s.ck_ctl,
+ imp_values->drive_strength[comp_ctl2.s.ck_ctl]);
+ }
if ((s = lookup_env_parameter("ddr_ck_ctl")) != NULL) {
comp_ctl2.s.ck_ctl = strtoul(s, NULL, 0);
@@ -4623,7 +4630,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
}
if ((s = lookup_env_parameter("ddr_control_ctl")) != NULL) {
- comp_ctl2.s.control_ctl = strtoul(s, NULL, 0);
+ comp_ctl2.s.control_ctl = strtoul(s, NULL, 0);
}
if ((s = lookup_env_parameter("ddr_dqx_ctl")) != NULL) {
@@ -4631,13 +4638,13 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
}
ddr_print("%-45s : %d, %d ohms\n", "DQX_CTL ", comp_ctl2.s.dqx_ctl,
- imp_values->dqx_strength [comp_ctl2.s.dqx_ctl ]);
+ imp_values->dqx_strength [comp_ctl2.s.dqx_ctl ]);
ddr_print("%-45s : %d, %d ohms\n", "CK_CTL ", comp_ctl2.s.ck_ctl,
- imp_values->drive_strength[comp_ctl2.s.ck_ctl ]);
+ imp_values->drive_strength[comp_ctl2.s.ck_ctl ]);
ddr_print("%-45s : %d, %d ohms\n", "CMD_CTL ", comp_ctl2.s.cmd_ctl,
- imp_values->drive_strength[comp_ctl2.s.cmd_ctl ]);
+ imp_values->drive_strength[comp_ctl2.s.cmd_ctl ]);
ddr_print("%-45s : %d, %d ohms\n", "CONTROL_CTL ", comp_ctl2.s.control_ctl,
- imp_values->drive_strength[comp_ctl2.s.control_ctl]);
+ imp_values->drive_strength[comp_ctl2.s.control_ctl]);
ddr_print("Read ODT_CTL : 0x%x (%d ohms)\n",
comp_ctl2.s.rodt_ctl, imp_values->rodt_ohms[comp_ctl2.s.rodt_ctl]);
@@ -4656,7 +4663,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
node, ddr_interface_num, lmc_phy_ctl.cn81xx.c1_sel);
}
- ddr_print("PHY_CTL : 0x%016lx\n", lmc_phy_ctl.u);
+ ddr_print("PHY_CTL : 0x%016llx\n", lmc_phy_ctl.u);
DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(ddr_interface_num), lmc_phy_ctl.u);
}
@@ -4665,11 +4672,11 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
bdk_lmcx_dimm_ctl_t lmc_dimm_ctl;
for (didx = 0; didx < (unsigned)dimm_count; ++didx) {
- bdk_lmcx_dimmx_params_t lmc_dimmx_params;
- int dimm = didx;
- int rc;
+ bdk_lmcx_dimmx_params_t lmc_dimmx_params;
+ int dimm = didx;
+ int rc;
- lmc_dimmx_params.u = BDK_CSR_READ(node, BDK_LMCX_DIMMX_PARAMS(ddr_interface_num, dimm));
+ lmc_dimmx_params.u = BDK_CSR_READ(node, BDK_LMCX_DIMMX_PARAMS(ddr_interface_num, dimm));
if (ddr_type == DDR4_DRAM) {
@@ -4720,8 +4727,8 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_dimmx_params.s.rc11 = 0;
lmc_dimmx_params.s.rc12 = 0;
- lmc_dimmx_params.s.rc13 = (spd_dimm_type == 4) ? 0 : 4; /* 0=LRDIMM, 1=RDIMM */
- lmc_dimmx_params.s.rc13 |= (ddr_type == DDR4_DRAM) ? (spd_addr_mirror << 3) : 0;
+ lmc_dimmx_params.s.rc13 = (spd_dimm_type == 4) ? 0 : 4; /* 0=LRDIMM, 1=RDIMM */
+ lmc_dimmx_params.s.rc13 |= (ddr_type == DDR4_DRAM) ? (spd_addr_mirror << 3) : 0;
lmc_dimmx_params.s.rc14 = 0;
//lmc_dimmx_params.s.rc15 = 4; /* 0 nCK latency adder */
lmc_dimmx_params.s.rc15 = 0; /* 1 nCK latency adder */
@@ -4749,38 +4756,38 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_ddr4_dimm_ctl.s.ddr4_dimm0_wmask = 0x004;
lmc_ddr4_dimm_ctl.s.ddr4_dimm1_wmask = (dimm_count > 1) ? 0x004 : 0x0000;
- /*
- * Handle any overrides from envvars here...
- */
- if ((s = lookup_env_parameter("ddr_ddr4_params0")) != NULL) {
- lmc_dimmx_ddr4_params0.u = strtoul(s, NULL, 0);
- }
-
- if ((s = lookup_env_parameter("ddr_ddr4_params1")) != NULL) {
- lmc_dimmx_ddr4_params1.u = strtoul(s, NULL, 0);
- }
-
- if ((s = lookup_env_parameter("ddr_ddr4_dimm_ctl")) != NULL) {
- lmc_ddr4_dimm_ctl.u = strtoul(s, NULL, 0);
- }
-
- for (i=0; i<11; ++i) {
- uint64_t value;
- if ((s = lookup_env_parameter("ddr_ddr4_rc%1xx", i+1)) != NULL) {
- value = strtoul(s, NULL, 0);
- if (i < 8) {
- lmc_dimmx_ddr4_params0.u &= ~((uint64_t)0xff << (i*8));
- lmc_dimmx_ddr4_params0.u |= (value << (i*8));
- } else {
- lmc_dimmx_ddr4_params1.u &= ~((uint64_t)0xff << ((i-8)*8));
- lmc_dimmx_ddr4_params1.u |= (value << ((i-8)*8));
- }
- }
- }
-
- /*
- * write the final CSR values
- */
+ /*
+ * Handle any overrides from envvars here...
+ */
+ if ((s = lookup_env_parameter("ddr_ddr4_params0")) != NULL) {
+ lmc_dimmx_ddr4_params0.u = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_ddr4_params1")) != NULL) {
+ lmc_dimmx_ddr4_params1.u = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_ddr4_dimm_ctl")) != NULL) {
+ lmc_ddr4_dimm_ctl.u = strtoul(s, NULL, 0);
+ }
+
+ for (i=0; i<11; ++i) {
+ uint64_t value;
+ if ((s = lookup_env_parameter("ddr_ddr4_rc%1xx", i+1)) != NULL) {
+ value = strtoul(s, NULL, 0);
+ if (i < 8) {
+ lmc_dimmx_ddr4_params0.u &= ~((uint64_t)0xff << (i*8));
+ lmc_dimmx_ddr4_params0.u |= (value << (i*8));
+ } else {
+ lmc_dimmx_ddr4_params1.u &= ~((uint64_t)0xff << ((i-8)*8));
+ lmc_dimmx_ddr4_params1.u |= (value << ((i-8)*8));
+ }
+ }
+ }
+
+ /*
+ * write the final CSR values
+ */
DRAM_CSR_WRITE(node, BDK_LMCX_DIMMX_DDR4_PARAMS0(ddr_interface_num, dimm), lmc_dimmx_ddr4_params0.u);
DRAM_CSR_WRITE(node, BDK_LMCX_DDR4_DIMM_CTL(ddr_interface_num), lmc_ddr4_dimm_ctl.u);
@@ -4802,71 +4809,71 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_dimmx_ddr4_params0.s.rc1x );
} else { /* if (ddr_type == DDR4_DRAM) */
- rc = read_spd(node, &dimm_config_table[didx], 69);
- lmc_dimmx_params.s.rc0 = (rc >> 0) & 0xf;
- lmc_dimmx_params.s.rc1 = (rc >> 4) & 0xf;
-
- rc = read_spd(node, &dimm_config_table[didx], 70);
- lmc_dimmx_params.s.rc2 = (rc >> 0) & 0xf;
- lmc_dimmx_params.s.rc3 = (rc >> 4) & 0xf;
-
- rc = read_spd(node, &dimm_config_table[didx], 71);
- lmc_dimmx_params.s.rc4 = (rc >> 0) & 0xf;
- lmc_dimmx_params.s.rc5 = (rc >> 4) & 0xf;
-
- rc = read_spd(node, &dimm_config_table[didx], 72);
- lmc_dimmx_params.s.rc6 = (rc >> 0) & 0xf;
- lmc_dimmx_params.s.rc7 = (rc >> 4) & 0xf;
-
- rc = read_spd(node, &dimm_config_table[didx], 73);
- lmc_dimmx_params.s.rc8 = (rc >> 0) & 0xf;
- lmc_dimmx_params.s.rc9 = (rc >> 4) & 0xf;
-
- rc = read_spd(node, &dimm_config_table[didx], 74);
- lmc_dimmx_params.s.rc10 = (rc >> 0) & 0xf;
- lmc_dimmx_params.s.rc11 = (rc >> 4) & 0xf;
-
- rc = read_spd(node, &dimm_config_table[didx], 75);
- lmc_dimmx_params.s.rc12 = (rc >> 0) & 0xf;
- lmc_dimmx_params.s.rc13 = (rc >> 4) & 0xf;
-
- rc = read_spd(node, &dimm_config_table[didx], 76);
- lmc_dimmx_params.s.rc14 = (rc >> 0) & 0xf;
- lmc_dimmx_params.s.rc15 = (rc >> 4) & 0xf;
-
-
- if ((s = lookup_env_parameter("ddr_clk_drive")) != NULL) {
- if (strcmp(s,"light") == 0) {
- lmc_dimmx_params.s.rc5 = 0x0; /* Light Drive */
- }
- if (strcmp(s,"moderate") == 0) {
- lmc_dimmx_params.s.rc5 = 0x5; /* Moderate Drive */
- }
- if (strcmp(s,"strong") == 0) {
- lmc_dimmx_params.s.rc5 = 0xA; /* Strong Drive */
- }
- }
-
- if ((s = lookup_env_parameter("ddr_cmd_drive")) != NULL) {
- if (strcmp(s,"light") == 0) {
- lmc_dimmx_params.s.rc3 = 0x0; /* Light Drive */
- }
- if (strcmp(s,"moderate") == 0) {
- lmc_dimmx_params.s.rc3 = 0x5; /* Moderate Drive */
- }
- if (strcmp(s,"strong") == 0) {
- lmc_dimmx_params.s.rc3 = 0xA; /* Strong Drive */
- }
- }
-
- if ((s = lookup_env_parameter("ddr_ctl_drive")) != NULL) {
- if (strcmp(s,"light") == 0) {
- lmc_dimmx_params.s.rc4 = 0x0; /* Light Drive */
- }
- if (strcmp(s,"moderate") == 0) {
- lmc_dimmx_params.s.rc4 = 0x5; /* Moderate Drive */
- }
- }
+ rc = read_spd(node, &dimm_config_table[didx], 69);
+ lmc_dimmx_params.s.rc0 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc1 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 70);
+ lmc_dimmx_params.s.rc2 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc3 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 71);
+ lmc_dimmx_params.s.rc4 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc5 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 72);
+ lmc_dimmx_params.s.rc6 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc7 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 73);
+ lmc_dimmx_params.s.rc8 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc9 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 74);
+ lmc_dimmx_params.s.rc10 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc11 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 75);
+ lmc_dimmx_params.s.rc12 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc13 = (rc >> 4) & 0xf;
+
+ rc = read_spd(node, &dimm_config_table[didx], 76);
+ lmc_dimmx_params.s.rc14 = (rc >> 0) & 0xf;
+ lmc_dimmx_params.s.rc15 = (rc >> 4) & 0xf;
+
+
+ if ((s = lookup_env_parameter("ddr_clk_drive")) != NULL) {
+ if (strcmp(s,"light") == 0) {
+ lmc_dimmx_params.s.rc5 = 0x0; /* Light Drive */
+ }
+ if (strcmp(s,"moderate") == 0) {
+ lmc_dimmx_params.s.rc5 = 0x5; /* Moderate Drive */
+ }
+ if (strcmp(s,"strong") == 0) {
+ lmc_dimmx_params.s.rc5 = 0xA; /* Strong Drive */
+ }
+ }
+
+ if ((s = lookup_env_parameter("ddr_cmd_drive")) != NULL) {
+ if (strcmp(s,"light") == 0) {
+ lmc_dimmx_params.s.rc3 = 0x0; /* Light Drive */
+ }
+ if (strcmp(s,"moderate") == 0) {
+ lmc_dimmx_params.s.rc3 = 0x5; /* Moderate Drive */
+ }
+ if (strcmp(s,"strong") == 0) {
+ lmc_dimmx_params.s.rc3 = 0xA; /* Strong Drive */
+ }
+ }
+
+ if ((s = lookup_env_parameter("ddr_ctl_drive")) != NULL) {
+ if (strcmp(s,"light") == 0) {
+ lmc_dimmx_params.s.rc4 = 0x0; /* Light Drive */
+ }
+ if (strcmp(s,"moderate") == 0) {
+ lmc_dimmx_params.s.rc4 = 0x5; /* Moderate Drive */
+ }
+ }
/*
@@ -4888,116 +4895,116 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if (1250 > tclk_psecs)
lmc_dimmx_params.s.rc10 = 4;
- } /* if (ddr_type == DDR4_DRAM) */
+ } /* if (ddr_type == DDR4_DRAM) */
if ((s = lookup_env_parameter("ddr_dimmx_params")) != NULL) {
lmc_dimmx_params.u = strtoul(s, NULL, 0);
}
- for (i=0; i<16; ++i) {
- uint64_t value;
- if ((s = lookup_env_parameter("ddr_rc%d", i)) != NULL) {
- value = strtoul(s, NULL, 0);
- lmc_dimmx_params.u &= ~((uint64_t)0xf << (i*4));
- lmc_dimmx_params.u |= ( value << (i*4));
- }
- }
-
- DRAM_CSR_WRITE(node, BDK_LMCX_DIMMX_PARAMS(ddr_interface_num, dimm), lmc_dimmx_params.u);
-
- ddr_print("DIMM%d Register Control Words RC15:RC0 : %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",
- dimm,
- lmc_dimmx_params.s.rc15,
- lmc_dimmx_params.s.rc14,
- lmc_dimmx_params.s.rc13,
- lmc_dimmx_params.s.rc12,
- lmc_dimmx_params.s.rc11,
- lmc_dimmx_params.s.rc10,
- lmc_dimmx_params.s.rc9 ,
- lmc_dimmx_params.s.rc8 ,
- lmc_dimmx_params.s.rc7 ,
- lmc_dimmx_params.s.rc6 ,
- lmc_dimmx_params.s.rc5 ,
- lmc_dimmx_params.s.rc4 ,
- lmc_dimmx_params.s.rc3 ,
- lmc_dimmx_params.s.rc2 ,
- lmc_dimmx_params.s.rc1 ,
- lmc_dimmx_params.s.rc0 );
- } /* for didx */
-
- if (ddr_type == DDR4_DRAM) {
-
- /* LMC0_DIMM_CTL */
- lmc_dimm_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DIMM_CTL(ddr_interface_num));
- lmc_dimm_ctl.s.dimm0_wmask = 0xdf3f;
- lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0xdf3f : 0x0000;
- lmc_dimm_ctl.s.tcws = 0x4e0;
- lmc_dimm_ctl.cn88xx.parity = custom_lmc_config->parity;
-
- if ((s = lookup_env_parameter("ddr_dimm0_wmask")) != NULL) {
- lmc_dimm_ctl.s.dimm0_wmask = strtoul(s, NULL, 0);
- }
-
- if ((s = lookup_env_parameter("ddr_dimm1_wmask")) != NULL) {
- lmc_dimm_ctl.s.dimm1_wmask = strtoul(s, NULL, 0);
- }
-
- if ((s = lookup_env_parameter("ddr_dimm_ctl_parity")) != NULL) {
- lmc_dimm_ctl.cn88xx.parity = strtoul(s, NULL, 0);
- }
-
- if ((s = lookup_env_parameter("ddr_dimm_ctl_tcws")) != NULL) {
- lmc_dimm_ctl.s.tcws = strtoul(s, NULL, 0);
- }
-
- ddr_print("LMC DIMM_CTL : 0x%016lx\n", lmc_dimm_ctl.u);
- DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
-
- perform_octeon3_ddr3_sequence(node, rank_mask,
- ddr_interface_num, 0x7 ); /* Init RCW */
-
- /* Write RC0D last */
- lmc_dimm_ctl.s.dimm0_wmask = 0x2000;
- lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0x2000 : 0x0000;
- ddr_print("LMC DIMM_CTL : 0x%016lx\n", lmc_dimm_ctl.u);
- DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
-
- /* Don't write any extended registers the second time */
- DRAM_CSR_WRITE(node, BDK_LMCX_DDR4_DIMM_CTL(ddr_interface_num), 0);
-
- perform_octeon3_ddr3_sequence(node, rank_mask,
- ddr_interface_num, 0x7 ); /* Init RCW */
+ for (i=0; i<16; ++i) {
+ uint64_t value;
+ if ((s = lookup_env_parameter("ddr_rc%d", i)) != NULL) {
+ value = strtoul(s, NULL, 0);
+ lmc_dimmx_params.u &= ~((uint64_t)0xf << (i*4));
+ lmc_dimmx_params.u |= ( value << (i*4));
+ }
+ }
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMMX_PARAMS(ddr_interface_num, dimm), lmc_dimmx_params.u);
+
+ ddr_print("DIMM%d Register Control Words RC15:RC0 : %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x %x\n",
+ dimm,
+ lmc_dimmx_params.s.rc15,
+ lmc_dimmx_params.s.rc14,
+ lmc_dimmx_params.s.rc13,
+ lmc_dimmx_params.s.rc12,
+ lmc_dimmx_params.s.rc11,
+ lmc_dimmx_params.s.rc10,
+ lmc_dimmx_params.s.rc9 ,
+ lmc_dimmx_params.s.rc8 ,
+ lmc_dimmx_params.s.rc7 ,
+ lmc_dimmx_params.s.rc6 ,
+ lmc_dimmx_params.s.rc5 ,
+ lmc_dimmx_params.s.rc4 ,
+ lmc_dimmx_params.s.rc3 ,
+ lmc_dimmx_params.s.rc2 ,
+ lmc_dimmx_params.s.rc1 ,
+ lmc_dimmx_params.s.rc0 );
+ } /* for didx */
+
+ if (ddr_type == DDR4_DRAM) {
+
+ /* LMC0_DIMM_CTL */
+ lmc_dimm_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DIMM_CTL(ddr_interface_num));
+ lmc_dimm_ctl.s.dimm0_wmask = 0xdf3f;
+ lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0xdf3f : 0x0000;
+ lmc_dimm_ctl.s.tcws = 0x4e0;
+ lmc_dimm_ctl.cn88xx.parity = custom_lmc_config->parity;
+
+ if ((s = lookup_env_parameter("ddr_dimm0_wmask")) != NULL) {
+ lmc_dimm_ctl.s.dimm0_wmask = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dimm1_wmask")) != NULL) {
+ lmc_dimm_ctl.s.dimm1_wmask = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dimm_ctl_parity")) != NULL) {
+ lmc_dimm_ctl.cn88xx.parity = strtoul(s, NULL, 0);
+ }
+
+ if ((s = lookup_env_parameter("ddr_dimm_ctl_tcws")) != NULL) {
+ lmc_dimm_ctl.s.tcws = strtoul(s, NULL, 0);
+ }
+
+ ddr_print("LMC DIMM_CTL : 0x%016llx\n", lmc_dimm_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
+
+ perform_octeon3_ddr3_sequence(node, rank_mask,
+ ddr_interface_num, 0x7 ); /* Init RCW */
+
+ /* Write RC0D last */
+ lmc_dimm_ctl.s.dimm0_wmask = 0x2000;
+ lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0x2000 : 0x0000;
+ ddr_print("LMC DIMM_CTL : 0x%016llx\n", lmc_dimm_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
+
+ /* Don't write any extended registers the second time */
+ DRAM_CSR_WRITE(node, BDK_LMCX_DDR4_DIMM_CTL(ddr_interface_num), 0);
+
+ perform_octeon3_ddr3_sequence(node, rank_mask,
+ ddr_interface_num, 0x7 ); /* Init RCW */
} else {
- /* LMC0_DIMM_CTL */
- lmc_dimm_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DIMM_CTL(ddr_interface_num));
- lmc_dimm_ctl.s.dimm0_wmask = 0xffff;
- lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0xffff : 0x0000;
- lmc_dimm_ctl.s.tcws = 0x4e0;
- lmc_dimm_ctl.cn88xx.parity = custom_lmc_config->parity;
+ /* LMC0_DIMM_CTL */
+ lmc_dimm_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DIMM_CTL(ddr_interface_num));
+ lmc_dimm_ctl.s.dimm0_wmask = 0xffff;
+ lmc_dimm_ctl.s.dimm1_wmask = (dimm_count > 1) ? 0xffff : 0x0000;
+ lmc_dimm_ctl.s.tcws = 0x4e0;
+ lmc_dimm_ctl.cn88xx.parity = custom_lmc_config->parity;
- if ((s = lookup_env_parameter("ddr_dimm0_wmask")) != NULL) {
- lmc_dimm_ctl.s.dimm0_wmask = strtoul(s, NULL, 0);
- }
+ if ((s = lookup_env_parameter("ddr_dimm0_wmask")) != NULL) {
+ lmc_dimm_ctl.s.dimm0_wmask = strtoul(s, NULL, 0);
+ }
- if ((s = lookup_env_parameter("ddr_dimm1_wmask")) != NULL) {
- lmc_dimm_ctl.s.dimm1_wmask = strtoul(s, NULL, 0);
- }
+ if ((s = lookup_env_parameter("ddr_dimm1_wmask")) != NULL) {
+ lmc_dimm_ctl.s.dimm1_wmask = strtoul(s, NULL, 0);
+ }
- if ((s = lookup_env_parameter("ddr_dimm_ctl_parity")) != NULL) {
- lmc_dimm_ctl.cn88xx.parity = strtoul(s, NULL, 0);
- }
+ if ((s = lookup_env_parameter("ddr_dimm_ctl_parity")) != NULL) {
+ lmc_dimm_ctl.cn88xx.parity = strtoul(s, NULL, 0);
+ }
- if ((s = lookup_env_parameter("ddr_dimm_ctl_tcws")) != NULL) {
- lmc_dimm_ctl.s.tcws = strtoul(s, NULL, 0);
- }
+ if ((s = lookup_env_parameter("ddr_dimm_ctl_tcws")) != NULL) {
+ lmc_dimm_ctl.s.tcws = strtoul(s, NULL, 0);
+ }
- ddr_print("LMC DIMM_CTL : 0x%016lx\n", lmc_dimm_ctl.u);
- DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
+ ddr_print("LMC DIMM_CTL : 0x%016llx\n", lmc_dimm_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_DIMM_CTL(ddr_interface_num), lmc_dimm_ctl.u);
- perform_octeon3_ddr3_sequence(node, rank_mask,
- ddr_interface_num, 0x7 ); /* Init RCW */
- }
+ perform_octeon3_ddr3_sequence(node, rank_mask,
+ ddr_interface_num, 0x7 ); /* Init RCW */
+ }
} else { /* if (spd_rdimm) */
/* Disable register control writes for unbuffered */
bdk_lmcx_dimm_ctl_t lmc_dimm_ctl;
@@ -5083,7 +5090,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
read_DAC_DBI_settings(node, ddr_interface_num, /*DAC*/1, dac_settings);
if ((num_samples == 1) || dram_is_verbose(VBL_DEV)) {
display_DAC_DBI_settings(node, ddr_interface_num, /*DAC*/1, use_ecc,
- dac_settings, "Internal VREF");
+ dac_settings, (char *)"Internal VREF");
}
// for DDR4, evaluate the DAC settings and retry if any issues
@@ -5119,7 +5126,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
dac_settings[lane] = process_samples_average(&lanes[lane].bytes[0], num_samples,
ddr_interface_num, lane);
}
- display_DAC_DBI_settings(node, ddr_interface_num, /*DAC*/1, use_ecc, dac_settings, "Averaged VREF");
+ display_DAC_DBI_settings(node, ddr_interface_num, /*DAC*/1, use_ecc, dac_settings, (char *)"Averaged VREF");
// finally, write the final DAC values
for (lane = 0; lane < last_lane; lane++) {
@@ -5216,7 +5223,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
}
DRAM_CSR_WRITE(node, BDK_LMCX_EXT_CONFIG(ddr_interface_num), ext_config.u);
- ddr_print("%-45s : 0x%016lx\n", "EXT_CONFIG", ext_config.u);
+ ddr_print("%-45s : 0x%016llx\n", "EXT_CONFIG", ext_config.u);
}
@@ -5230,8 +5237,8 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
long enough for a few ZQCS calibrations to occur. This
should ensure that the calibration circuitry is
stabilized before read/write leveling occurs. */
- save_ref_zqcs_int = lmc_config.s.ref_zqcs_int;
- lmc_config.s.ref_zqcs_int = 1 | (32<<7); /* set smallest interval */
+ save_ref_zqcs_int = lmc_config.s.ref_zqcs_int;
+ lmc_config.s.ref_zqcs_int = 1 | (32<<7); /* set smallest interval */
DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
@@ -5245,11 +5252,11 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
* tclk_psecs * 100 * 512 * 128) / (10000*10000)
+ 10 * ((uint64_t)32 * tclk_psecs * 100 * 512 * 128) / (10000*10000);
- VB_PRT(VBL_FAE, "N%d.LMC%d: Waiting %ld usecs for ZQCS calibrations to start\n",
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Waiting %lld usecs for ZQCS calibrations to start\n",
node, ddr_interface_num, temp_delay_usecs);
bdk_wait_usec(temp_delay_usecs);
- lmc_config.s.ref_zqcs_int = save_ref_zqcs_int; /* Restore computed interval */
+ lmc_config.s.ref_zqcs_int = save_ref_zqcs_int; /* Restore computed interval */
DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(ddr_interface_num), lmc_config.u);
BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
@@ -5410,7 +5417,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
else {
wlevel_bitmask_errors = 1; /* Force software write-leveling to run */
ddr_print("N%d.LMC%d: Forcing software Write-Leveling\n", node, ddr_interface_num);
- }
+ }
default_wlevel_rtt_nom = (ddr_type == DDR3_DRAM) ? rttnom_20ohm : ddr4_rttnom_40ohm ; /* FIXME? */
@@ -5423,7 +5430,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if ((s = lookup_env_parameter_ull("ddr_hwl_wodt_mask")) != NULL) {
lmc_wodt_mask.u = strtoull(s, NULL, 0);
if (lmc_wodt_mask.u != saved_wodt_mask) { // print/store only when diff
- ddr_print("WODT_MASK : 0x%016lx\n", lmc_wodt_mask.u);
+ ddr_print("WODT_MASK : 0x%016llx\n", lmc_wodt_mask.u);
DRAM_CSR_WRITE(node, BDK_LMCX_WODT_MASK(ddr_interface_num), lmc_wodt_mask.u);
}
}
@@ -5431,18 +5438,18 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
#endif /* WODT_MASK_2R_1S */
lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(ddr_interface_num));
- ecc_ena = lmc_config.s.ecc_ena;
+ ecc_ena = lmc_config.s.ecc_ena;
- if ((s = lookup_env_parameter("ddr_wlevel_roundup")) != NULL) {
- ddr_wlevel_roundup = strtoul(s, NULL, 0);
- }
- if ((s = lookup_env_parameter("ddr_wlevel_printall")) != NULL) {
- ddr_wlevel_printall = strtoul(s, NULL, 0);
- }
+ if ((s = lookup_env_parameter("ddr_wlevel_roundup")) != NULL) {
+ ddr_wlevel_roundup = strtoul(s, NULL, 0);
+ }
+ if ((s = lookup_env_parameter("ddr_wlevel_printall")) != NULL) {
+ ddr_wlevel_printall = strtoul(s, NULL, 0);
+ }
- if ((s = lookup_env_parameter("ddr_disable_hwl_validity")) != NULL) {
- disable_hwl_validity = !!strtoul(s, NULL, 0);
- }
+ if ((s = lookup_env_parameter("ddr_disable_hwl_validity")) != NULL) {
+ disable_hwl_validity = !!strtoul(s, NULL, 0);
+ }
if ((s = lookup_env_parameter("ddr_wlevel_rtt_nom")) != NULL) {
default_wlevel_rtt_nom = strtoul(s, NULL, 0);
@@ -5483,22 +5490,22 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
}
// Start the hardware write-leveling loop per rank
- for (rankx = 0; rankx < dimm_count * 4; rankx++) {
+ for (rankx = 0; rankx < dimm_count * 4; rankx++) {
- if (!(rank_mask & (1 << rankx)))
- continue;
+ if (!(rank_mask & (1 << rankx)))
+ continue;
#if HW_WL_MAJORITY
- // array to collect counts of byte-lane values
- // assume low-order 3 bits and even, so really only 2 bit values
- int wlevel_bytes[9][4];
- memset(wlevel_bytes, 0, sizeof(wlevel_bytes));
+ // array to collect counts of byte-lane values
+ // assume low-order 3 bits and even, so really only 2 bit values
+ int wlevel_bytes[9][4];
+ memset(wlevel_bytes, 0, sizeof(wlevel_bytes));
#endif
- // restructure the looping so we can keep trying until we get the samples we want
- //for (int wloop = 0; wloop < wlevel_loops; wloop++) {
- int wloop = 0;
- int wloop_retries = 0; // retries per sample for HW-related issues with bitmasks or values
+ // restructure the looping so we can keep trying until we get the samples we want
+ //for (int wloop = 0; wloop < wlevel_loops; wloop++) {
+ int wloop = 0;
+ int wloop_retries = 0; // retries per sample for HW-related issues with bitmasks or values
int wloop_retries_total = 0;
int wloop_retries_exhausted = 0;
#define WLOOP_RETRIES_DEFAULT 5
@@ -5506,86 +5513,85 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
int wlevel_bitmask_errors_rank = 0;
int wlevel_validity_errors_rank = 0;
- while (wloop < wlevel_loops) {
+ while (wloop < wlevel_loops) {
- wlevel_ctl.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num));
+ wlevel_ctl.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num));
- wlevel_ctl.s.rtt_nom = (default_wlevel_rtt_nom > 0) ? (default_wlevel_rtt_nom - 1) : 7;
+ wlevel_ctl.s.rtt_nom = (default_wlevel_rtt_nom > 0) ? (default_wlevel_rtt_nom - 1) : 7;
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), 0); /* Clear write-level delays */
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), 0); /* Clear write-level delays */
- wlevel_bitmask_errors = 0; /* Reset error counters */
+ wlevel_bitmask_errors = 0; /* Reset error counters */
wlevel_validity_errors = 0;
- for (byte_idx=0; byte_idx<9; ++byte_idx) {
- wlevel_bitmask[byte_idx] = 0; /* Reset bitmasks */
- }
+ for (byte_idx=0; byte_idx<9; ++byte_idx) {
+ wlevel_bitmask[byte_idx] = 0; /* Reset bitmasks */
+ }
#if HWL_BY_BYTE // FIXME???
- /* Make a separate pass for each byte to reduce power. */
- for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
+ /* Make a separate pass for each byte to reduce power. */
+ for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
- if (!(ddr_interface_bytemask&(1<<byte_idx)))
- continue;
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
- wlevel_ctl.s.lanemask = (1<<byte_idx);
+ wlevel_ctl.s.lanemask = (1<<byte_idx);
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num), wlevel_ctl.u);
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num), wlevel_ctl.u);
- /* Read and write values back in order to update the
- status field. This insures that we read the updated
- values after write-leveling has completed. */
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
- BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx)));
+ /* Read and write values back in order to update the
+ status field. This insures that we read the updated
+ values after write-leveling has completed. */
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
+ BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx)));
- perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 6); /* write-leveling */
+ perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 6); /* write-leveling */
- if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
- BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
- status, ==, 3, 1000000))
- {
- error_print("ERROR: Timeout waiting for WLEVEL\n");
- }
- lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+ if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
+ BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
+ status, ==, 3, 1000000))
+ {
+ error_print("ERROR: Timeout waiting for WLEVEL\n");
+ }
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
- wlevel_bitmask[byte_idx] = octeon_read_lmcx_ddr3_wlevel_dbg(node, ddr_interface_num, byte_idx);
- if (wlevel_bitmask[byte_idx] == 0)
- ++wlevel_bitmask_errors;
- } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
+ wlevel_bitmask[byte_idx] = octeon_read_lmcx_ddr3_wlevel_dbg(node, ddr_interface_num, byte_idx);
+ if (wlevel_bitmask[byte_idx] == 0)
+ ++wlevel_bitmask_errors;
+ } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
- wlevel_ctl.s.lanemask = /*0x1ff*/ddr_interface_bytemask; // restore for RL
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num), wlevel_ctl.u);
+ wlevel_ctl.s.lanemask = /*0x1ff*/ddr_interface_bytemask; // restore for RL
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num), wlevel_ctl.u);
#else
// do all the byte-lanes at the same time
- wlevel_ctl.s.lanemask = /*0x1ff*/ddr_interface_bytemask; // FIXME?
-
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num), wlevel_ctl.u);
-
- /* Read and write values back in order to update the
- status field. This insures that we read the updated
- values after write-leveling has completed. */
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
- BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx)));
-
- perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 6); /* write-leveling */
-
- if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
- BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
- status, ==, 3, 1000000))
- {
- error_print("ERROR: Timeout waiting for WLEVEL\n");
- }
-
- lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
-
- for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
- if (!(ddr_interface_bytemask&(1<<byte_idx)))
- continue;
- wlevel_bitmask[byte_idx] = octeon_read_lmcx_ddr3_wlevel_dbg(node, ddr_interface_num, byte_idx);
- if (wlevel_bitmask[byte_idx] == 0)
- ++wlevel_bitmask_errors;
- } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
+ wlevel_ctl.s.lanemask = /*0x1ff*/ddr_interface_bytemask; // FIXME?
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_CTL(ddr_interface_num), wlevel_ctl.u);
+
+ /* Read and write values back in order to update the
+ status field. This insures that we read the updated
+ values after write-leveling has completed. */
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
+ BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx)));
+
+ perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 6); /* write-leveling */
+
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx),
+ status, ==, 3, 1000000))
+ {
+ error_print("ERROR: Timeout waiting for WLEVEL\n");
+ }
+
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+
+ for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+ wlevel_bitmask[byte_idx] = octeon_read_lmcx_ddr3_wlevel_dbg(node, ddr_interface_num, byte_idx);
+ if (wlevel_bitmask[byte_idx] == 0)
+ ++wlevel_bitmask_errors;
+ } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
#endif
// check validity only if no bitmask errors
@@ -5606,128 +5612,128 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
} else
wlevel_bitmask_errors_rank++;
- // before we print, if we had bitmask or validity errors, do a retry...
- if ((wlevel_bitmask_errors != 0) || (wlevel_validity_errors != 0)) {
+ // before we print, if we had bitmask or validity errors, do a retry...
+ if ((wlevel_bitmask_errors != 0) || (wlevel_validity_errors != 0)) {
// VBL must be high to show the bad bitmaps or delays here also
if (dram_is_verbose(VBL_DEV2)) {
display_WL_BM(node, ddr_interface_num, rankx, wlevel_bitmask);
display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
}
- if (wloop_retries < WLOOP_RETRIES_DEFAULT) {
- wloop_retries++;
+ if (wloop_retries < WLOOP_RETRIES_DEFAULT) {
+ wloop_retries++;
wloop_retries_total++;
- // this printout is per-retry: only when VBL is high enough (DEV2?)
- VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: H/W Write-Leveling had %s errors - retrying...\n",
- node, ddr_interface_num, rankx,
+ // this printout is per-retry: only when VBL is high enough (DEV2?)
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: H/W Write-Leveling had %s errors - retrying...\n",
+ node, ddr_interface_num, rankx,
(wlevel_bitmask_errors) ? "Bitmask" : "Validity");
- continue; // this takes us back to the top without counting a sample
- } else { // ran out of retries for this sample
+ continue; // this takes us back to the top without counting a sample
+ } else { // ran out of retries for this sample
// retries exhausted, do not print at normal VBL
- VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: H/W Write-Leveling issues: %s errors\n",
- node, ddr_interface_num, rankx,
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: H/W Write-Leveling issues: %s errors\n",
+ node, ddr_interface_num, rankx,
(wlevel_bitmask_errors) ? "Bitmask" : "Validity");
wloop_retries_exhausted++;
- }
- }
+ }
+ }
// no errors or exhausted retries, use this sample
wloop_retries = 0; //reset for next sample
- // when only 1 sample or forced, print the bitmasks first and current HW WL
- if ((wlevel_loops == 1) || ddr_wlevel_printall) {
- display_WL_BM(node, ddr_interface_num, rankx, wlevel_bitmask);
- display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
- }
-
- if (ddr_wlevel_roundup) { /* Round up odd bitmask delays */
- for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
- if (!(ddr_interface_bytemask&(1<<byte_idx)))
- continue;
- update_wlevel_rank_struct(&lmc_wlevel_rank,
- byte_idx,
- roundup_ddr3_wlevel_bitmask(wlevel_bitmask[byte_idx]));
- } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
- display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
- }
+ // when only 1 sample or forced, print the bitmasks first and current HW WL
+ if ((wlevel_loops == 1) || ddr_wlevel_printall) {
+ display_WL_BM(node, ddr_interface_num, rankx, wlevel_bitmask);
+ display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
+ }
+
+ if (ddr_wlevel_roundup) { /* Round up odd bitmask delays */
+ for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+ update_wlevel_rank_struct(&lmc_wlevel_rank,
+ byte_idx,
+ roundup_ddr3_wlevel_bitmask(wlevel_bitmask[byte_idx]));
+ } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
+ }
#if HW_WL_MAJORITY
- // OK, we have a decent sample, no bitmask or validity errors
- for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
- if (!(ddr_interface_bytemask&(1<<byte_idx)))
- continue;
- // increment count of byte-lane value
- int ix = (get_wlevel_rank_struct(&lmc_wlevel_rank, byte_idx) >> 1) & 3; // only 4 values
- wlevel_bytes[byte_idx][ix]++;
- } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
+ // OK, we have a decent sample, no bitmask or validity errors
+ for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) {
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+ // increment count of byte-lane value
+ int ix = (get_wlevel_rank_struct(&lmc_wlevel_rank, byte_idx) >> 1) & 3; // only 4 values
+ wlevel_bytes[byte_idx][ix]++;
+ } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
#endif
- wloop++; // if we get here, we have taken a decent sample
+ wloop++; // if we get here, we have taken a decent sample
- } /* while (wloop < wlevel_loops) */
+ } /* while (wloop < wlevel_loops) */
#if HW_WL_MAJORITY
- // if we did sample more than once, try to pick a majority vote
- if (wlevel_loops > 1) {
- // look for the majority in each byte-lane
- for (byte_idx = 0; byte_idx < (8+ecc_ena); ++byte_idx) {
- int mx = -1, mc = 0, xc = 0, cc = 0;
- int ix, ic;
- if (!(ddr_interface_bytemask&(1<<byte_idx)))
- continue;
- for (ix = 0; ix < 4; ix++) {
- ic = wlevel_bytes[byte_idx][ix];
- // make a bitmask of the ones with a count
- if (ic > 0) {
- mc |= (1 << ix);
- cc++; // count how many had non-zero counts
- }
- // find the majority
- if (ic > xc) { // new max?
- xc = ic; // yes
- mx = ix; // set its index
- }
- }
+ // if we did sample more than once, try to pick a majority vote
+ if (wlevel_loops > 1) {
+ // look for the majority in each byte-lane
+ for (byte_idx = 0; byte_idx < (8+ecc_ena); ++byte_idx) {
+ int mx = -1, mc = 0, xc = 0, cc = 0;
+ int ix, ic;
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+ for (ix = 0; ix < 4; ix++) {
+ ic = wlevel_bytes[byte_idx][ix];
+ // make a bitmask of the ones with a count
+ if (ic > 0) {
+ mc |= (1 << ix);
+ cc++; // count how many had non-zero counts
+ }
+ // find the majority
+ if (ic > xc) { // new max?
+ xc = ic; // yes
+ mx = ix; // set its index
+ }
+ }
#if SWL_TRY_HWL_ALT
- // see if there was an alternate
- int alts = (mc & ~(1 << mx)); // take out the majority choice
- if (alts != 0) {
- for (ix = 0; ix < 4; ix++) {
- if (alts & (1 << ix)) { // FIXME: could be done multiple times? bad if so
- hwl_alts[rankx].hwl_alt_mask |= (1 << byte_idx); // set the mask
- hwl_alts[rankx].hwl_alt_delay[byte_idx] = ix << 1; // record the value
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: SWL_TRY_HWL_ALT: Byte %d maj %d (%d) alt %d (%d).\n",
+ // see if there was an alternate
+ int alts = (mc & ~(1 << mx)); // take out the majority choice
+ if (alts != 0) {
+ for (ix = 0; ix < 4; ix++) {
+ if (alts & (1 << ix)) { // FIXME: could be done multiple times? bad if so
+ hwl_alts[rankx].hwl_alt_mask |= (1 << byte_idx); // set the mask
+ hwl_alts[rankx].hwl_alt_delay[byte_idx] = ix << 1; // record the value
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: SWL_TRY_HWL_ALT: Byte %d maj %d (%d) alt %d (%d).\n",
node, ddr_interface_num, rankx, byte_idx, mx << 1, xc,
ix << 1, wlevel_bytes[byte_idx][ix]);
- }
- }
- } else {
- debug_print("N%d.LMC%d.R%d: SWL_TRY_HWL_ALT: Byte %d maj %d alt NONE.\n",
- node, ddr_interface_num, rankx, byte_idx, mx << 1);
- }
+ }
+ }
+ } else {
+ debug_print("N%d.LMC%d.R%d: SWL_TRY_HWL_ALT: Byte %d maj %d alt NONE.\n",
+ node, ddr_interface_num, rankx, byte_idx, mx << 1);
+ }
#endif /* SWL_TRY_HWL_ALT */
- if (cc > 2) { // unlikely, but...
- // assume: counts for 3 indices are all 1
- // possiblities are: 0/2/4, 2/4/6, 0/4/6, 0/2/6
- // and the desired?: 2 , 4 , 6, 0
- // we choose the middle, assuming one of the outliers is bad
- // NOTE: this is an ugly hack at the moment; there must be a better way
- switch (mc) {
- case 0x7: mx = 1; break; // was 0/2/4, choose 2
- case 0xb: mx = 0; break; // was 0/2/6, choose 0
- case 0xd: mx = 3; break; // was 0/4/6, choose 6
- case 0xe: mx = 2; break; // was 2/4/6, choose 4
- default:
- case 0xf: mx = 1; break; // was 0/2/4/6, choose 2?
- }
- error_print("N%d.LMC%d.R%d: HW WL MAJORITY: bad byte-lane %d (0x%x), using %d.\n",
- node, ddr_interface_num, rankx, byte_idx, mc, mx << 1);
- }
- update_wlevel_rank_struct(&lmc_wlevel_rank, byte_idx, mx << 1);
- } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
-
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
- display_WL_with_final(node, ddr_interface_num, lmc_wlevel_rank, rankx);
- } /* if (wlevel_loops > 1) */
+ if (cc > 2) { // unlikely, but...
+ // assume: counts for 3 indices are all 1
+ // possiblities are: 0/2/4, 2/4/6, 0/4/6, 0/2/6
+ // and the desired?: 2 , 4 , 6, 0
+ // we choose the middle, assuming one of the outliers is bad
+ // NOTE: this is an ugly hack at the moment; there must be a better way
+ switch (mc) {
+ case 0x7: mx = 1; break; // was 0/2/4, choose 2
+ case 0xb: mx = 0; break; // was 0/2/6, choose 0
+ case 0xd: mx = 3; break; // was 0/4/6, choose 6
+ case 0xe: mx = 2; break; // was 2/4/6, choose 4
+ default:
+ case 0xf: mx = 1; break; // was 0/2/4/6, choose 2?
+ }
+ error_print("N%d.LMC%d.R%d: HW WL MAJORITY: bad byte-lane %d (0x%x), using %d.\n",
+ node, ddr_interface_num, rankx, byte_idx, mc, mx << 1);
+ }
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte_idx, mx << 1);
+ } /* for (byte_idx=0; byte_idx<(8+ecc_ena); ++byte_idx) */
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ display_WL_with_final(node, ddr_interface_num, lmc_wlevel_rank, rankx);
+ } /* if (wlevel_loops > 1) */
#endif /* HW_WL_MAJORITY */
// maybe print an error summary for the rank
if ((wlevel_bitmask_errors_rank != 0) || (wlevel_validity_errors_rank != 0)) {
@@ -5737,7 +5743,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
wloop_retries_total, wloop_retries_exhausted);
}
- } /* for (rankx = 0; rankx < dimm_count * 4;rankx++) */
+ } /* for (rankx = 0; rankx < dimm_count * 4;rankx++) */
#if WODT_MASK_2R_1S
if ((ddr_type == DDR4_DRAM) && (num_ranks == 2) && (dimm_count == 1)) {
@@ -5747,7 +5753,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_wodt_mask.u = BDK_CSR_READ(node, BDK_LMCX_WODT_MASK(ddr_interface_num));
if (lmc_wodt_mask.u != saved_wodt_mask) { // always restore what was saved if diff
lmc_wodt_mask.u = saved_wodt_mask;
- ddr_print("WODT_MASK : 0x%016lx\n", lmc_wodt_mask.u);
+ ddr_print("WODT_MASK : 0x%016llx\n", lmc_wodt_mask.u);
DRAM_CSR_WRITE(node, BDK_LMCX_WODT_MASK(ddr_interface_num), lmc_wodt_mask.u);
}
}
@@ -5759,26 +5765,26 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if (! disable_deskew_training) {
deskew_counts_t dsk_counts;
- int retry_count = 0;
+ int retry_count = 0;
- VB_PRT(VBL_FAE, "N%d.LMC%d: Check Deskew Settings before Read-Leveling.\n", node, ddr_interface_num);
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Check Deskew Settings before Read-Leveling.\n", node, ddr_interface_num);
- do {
- Validate_Read_Deskew_Training(node, rank_mask, ddr_interface_num, &dsk_counts, VBL_FAE);
+ do {
+ Validate_Read_Deskew_Training(node, rank_mask, ddr_interface_num, &dsk_counts, VBL_FAE);
- // only RAWCARD A or B will not benefit from retraining if there's only saturation
+ // only RAWCARD A or B will not benefit from retraining if there's only saturation
// or any rawcard if there is a nibble error
- if ((!spd_rawcard_AorB && dsk_counts.saturated > 0) ||
- ((dsk_counts.nibrng_errs != 0) || (dsk_counts.nibunl_errs != 0)))
- {
- retry_count++;
- VB_PRT(VBL_FAE, "N%d.LMC%d: Deskew Status indicates saturation or nibble errors - retry %d Training.\n",
- node, ddr_interface_num, retry_count);
- Perform_Read_Deskew_Training(node, rank_mask, ddr_interface_num,
+ if ((!spd_rawcard_AorB && dsk_counts.saturated > 0) ||
+ ((dsk_counts.nibrng_errs != 0) || (dsk_counts.nibunl_errs != 0)))
+ {
+ retry_count++;
+ VB_PRT(VBL_FAE, "N%d.LMC%d: Deskew Status indicates saturation or nibble errors - retry %d Training.\n",
+ node, ddr_interface_num, retry_count);
+ Perform_Read_Deskew_Training(node, rank_mask, ddr_interface_num,
spd_rawcard_AorB, 0, ddr_interface_64b);
- } else
- break;
- } while (retry_count < 5);
+ } else
+ break;
+ } while (retry_count < 5);
// print the last setting only if we had to do retries here
if (retry_count > 0)
@@ -6019,9 +6025,6 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_CTL(ddr_interface_num), rlevel_ctl.u);
- if (bdk_is_platform(BDK_PLATFORM_ASIM))
- rlevel_debug_loops = 0;
-
if ((s = lookup_env_parameter("ddr%d_rlevel_debug_loops", ddr_interface_num)) != NULL) {
rlevel_debug_loops = strtoul(s, NULL, 0);
}
@@ -6038,7 +6041,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
ddr_rlevel_compute = strtoul(s, NULL, 0);
}
- ddr_print("RLEVEL_CTL : 0x%016lx\n", rlevel_ctl.u);
+ ddr_print("RLEVEL_CTL : 0x%016llx\n", rlevel_ctl.u);
ddr_print("RLEVEL_OFFSET : %6d\n", rlevel_ctl.s.offset);
ddr_print("RLEVEL_OFFSET_EN : %6d\n", rlevel_ctl.s.offset_en);
@@ -6067,7 +6070,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
#ifdef ENABLE_CUSTOM_RLEVEL_TABLE
if (custom_lmc_config->rlevel_table != NULL) {
- char part_number[21];
+ char part_number[21];
/* Check for hard-coded read-leveling settings */
get_dimm_part_number(part_number, node, &dimm_config_table[0], 0, ddr_type);
for (rankx = 0; rankx < dimm_count * 4;rankx++) {
@@ -6081,7 +6084,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
debug_print("DIMM part number:\"%s\", SPD: \"%s\"\n", custom_lmc_config->rlevel_table[i].part, part_number);
if ((strcmp(part_number, custom_lmc_config->rlevel_table[i].part) == 0)
&& (_abs(custom_lmc_config->rlevel_table[i].speed - 2*ddr_hertz/(1000*1000)) < 10 ))
- {
+ {
ddr_print("Using hard-coded read leveling for DIMM part number: \"%s\"\n", part_number);
lmc_rlevel_rank.u = custom_lmc_config->rlevel_table[i].rlevel_rank[ddr_interface_num][rankx];
DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), lmc_rlevel_rank.u);
@@ -6097,61 +6100,61 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
#endif /* ENABLE_CUSTOM_RLEVEL_TABLE */
while(rlevel_debug_loops--) {
- /* Initialize the error scoreboard */
- memset(rlevel_scoreboard, 0, sizeof(rlevel_scoreboard));
+ /* Initialize the error scoreboard */
+ memset(rlevel_scoreboard, 0, sizeof(rlevel_scoreboard));
- if ((s = lookup_env_parameter("ddr_rlevel_comp_offset")) != NULL) {
- rlevel_comp_offset = strtoul(s, NULL, 0);
- }
-
- disable_sequential_delay_check = custom_lmc_config->disable_sequential_delay_check;
-
- if ((s = lookup_env_parameter("ddr_disable_sequential_delay_check")) != NULL) {
- disable_sequential_delay_check = strtoul(s, NULL, 0);
- }
-
- maximum_adjacent_rlevel_delay_increment = custom_lmc_config->maximum_adjacent_rlevel_delay_increment;
-
- if ((s = lookup_env_parameter("ddr_maximum_adjacent_rlevel_delay_increment")) != NULL) {
- maximum_adjacent_rlevel_delay_increment = strtoul(s, NULL, 0);
- }
+ if ((s = lookup_env_parameter("ddr_rlevel_comp_offset")) != NULL) {
+ rlevel_comp_offset = strtoul(s, NULL, 0);
+ }
- lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
- saved_ddr__ptune = lmc_comp_ctl2.s.ddr__ptune;
- saved_ddr__ntune = lmc_comp_ctl2.s.ddr__ntune;
+ disable_sequential_delay_check = custom_lmc_config->disable_sequential_delay_check;
- /* Disable dynamic compensation settings */
- if (rlevel_comp_offset != 0) {
- lmc_comp_ctl2.s.ptune = saved_ddr__ptune;
- lmc_comp_ctl2.s.ntune = saved_ddr__ntune;
+ if ((s = lookup_env_parameter("ddr_disable_sequential_delay_check")) != NULL) {
+ disable_sequential_delay_check = strtoul(s, NULL, 0);
+ }
- /* Round up the ptune calculation to bias the odd cases toward ptune */
- lmc_comp_ctl2.s.ptune += divide_roundup(rlevel_comp_offset, 2);
- lmc_comp_ctl2.s.ntune -= rlevel_comp_offset/2;
+ maximum_adjacent_rlevel_delay_increment = custom_lmc_config->maximum_adjacent_rlevel_delay_increment;
- lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
- saved_int_zqcs_dis = lmc_control.s.int_zqcs_dis;
- lmc_control.s.int_zqcs_dis = 1; /* Disable ZQCS while in bypass. */
- DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+ if ((s = lookup_env_parameter("ddr_maximum_adjacent_rlevel_delay_increment")) != NULL) {
+ maximum_adjacent_rlevel_delay_increment = strtoul(s, NULL, 0);
+ }
- lmc_comp_ctl2.s.byp = 1; /* Enable bypass mode */
- DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
- BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
- lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read again */
- ddr_print("DDR__PTUNE/DDR__NTUNE : %d/%d\n",
- lmc_comp_ctl2.s.ddr__ptune, lmc_comp_ctl2.s.ddr__ntune);
- }
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ saved_ddr__ptune = lmc_comp_ctl2.s.ddr__ptune;
+ saved_ddr__ntune = lmc_comp_ctl2.s.ddr__ntune;
+
+ /* Disable dynamic compensation settings */
+ if (rlevel_comp_offset != 0) {
+ lmc_comp_ctl2.s.ptune = saved_ddr__ptune;
+ lmc_comp_ctl2.s.ntune = saved_ddr__ntune;
+
+ /* Round up the ptune calculation to bias the odd cases toward ptune */
+ lmc_comp_ctl2.s.ptune += divide_roundup(rlevel_comp_offset, 2);
+ lmc_comp_ctl2.s.ntune -= rlevel_comp_offset/2;
+
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ saved_int_zqcs_dis = lmc_control.s.int_zqcs_dis;
+ lmc_control.s.int_zqcs_dis = 1; /* Disable ZQCS while in bypass. */
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ lmc_comp_ctl2.s.byp = 1; /* Enable bypass mode */
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
+ BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read again */
+ ddr_print("DDR__PTUNE/DDR__NTUNE : %d/%d\n",
+ lmc_comp_ctl2.s.ddr__ptune, lmc_comp_ctl2.s.ddr__ntune);
+ }
- lmc_modereg_params1.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num));
+ lmc_modereg_params1.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num));
- for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
- rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+ for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
- /* When the read ODT mask is zero the dyn_rtt_nom_mask is
- zero than RTT_NOM will not be changing during
- read-leveling. Since the value is fixed we only need
- to test it once. */
- if (dyn_rtt_nom_mask == 0) {
+ /* When the read ODT mask is zero the dyn_rtt_nom_mask is
+ zero than RTT_NOM will not be changing during
+ read-leveling. Since the value is fixed we only need
+ to test it once. */
+ if (dyn_rtt_nom_mask == 0) {
print_nom_ohms = -1; // flag not to print NOM ohms
if (rtt_idx != min_rtt_nom_idx)
continue;
@@ -6164,19 +6167,19 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
print_nom_ohms = imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_00];
}
- DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num), lmc_modereg_params1.u);
- VB_PRT(VBL_TME, "\n");
- VB_PRT(VBL_TME, "RTT_NOM %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
- imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_11],
- imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_10],
- imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_01],
- imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_00],
- lmc_modereg_params1.s.rtt_nom_11,
- lmc_modereg_params1.s.rtt_nom_10,
- lmc_modereg_params1.s.rtt_nom_01,
- lmc_modereg_params1.s.rtt_nom_00);
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num), lmc_modereg_params1.u);
+ VB_PRT(VBL_TME, "\n");
+ VB_PRT(VBL_TME, "RTT_NOM %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_11],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_10],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_01],
+ imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_00],
+ lmc_modereg_params1.s.rtt_nom_11,
+ lmc_modereg_params1.s.rtt_nom_10,
+ lmc_modereg_params1.s.rtt_nom_01,
+ lmc_modereg_params1.s.rtt_nom_00);
- perform_ddr_init_sequence(node, rank_mask, ddr_interface_num);
+ perform_ddr_init_sequence(node, rank_mask, ddr_interface_num);
// Try RANK outside RODT to rearrange the output...
for (rankx = 0; rankx < dimm_count * 4; rankx++) {
@@ -6194,7 +6197,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
#if PICK_BEST_RANK_SCORE_NOT_AVG
- rlevel_best_rank_score = DEFAULT_BEST_RANK_SCORE;
+ rlevel_best_rank_score = DEFAULT_BEST_RANK_SCORE;
#endif
rlevel_rodt_errors = 0;
lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
@@ -6205,119 +6208,116 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
VB_PRT(VBL_DEV, "Read ODT_CTL : 0x%x (%d ohms)\n",
lmc_comp_ctl2.s.rodt_ctl, imp_values->rodt_ohms[lmc_comp_ctl2.s.rodt_ctl]);
- memset(rlevel_byte, 0, sizeof(rlevel_byte));
+ memset(rlevel_byte, 0, sizeof(rlevel_byte));
- for (average_loops = 0; average_loops < rlevel_avg_loops; average_loops++) {
- rlevel_bitmask_errors = 0;
+ for (average_loops = 0; average_loops < rlevel_avg_loops; average_loops++) {
+ rlevel_bitmask_errors = 0;
- if (! (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM))) {
- /* Clear read-level delays */
- DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), 0);
+ if (! (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM))) {
+ /* Clear read-level delays */
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), 0);
/* read-leveling */
- perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 1);
+ perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 1);
- if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
- BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx),
- status, ==, 3, 1000000))
- {
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx),
+ status, ==, 3, 1000000))
+ {
error_print("ERROR: Timeout waiting for RLEVEL\n");
}
- }
+ }
- lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
- { // start bitmask interpretation block
+ { // start bitmask interpretation block
int redoing_nonseq_errs = 0;
memset(rlevel_bitmask, 0, sizeof(rlevel_bitmask));
- if (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM)) {
- bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank_aside;
- bdk_lmcx_modereg_params0_t lmc_modereg_params0;
+ if (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM)) {
+ bdk_lmcx_rlevel_rankx_t lmc_rlevel_rank_aside;
+ bdk_lmcx_modereg_params0_t lmc_modereg_params0;
- /* A-side */
- lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
- lmc_modereg_params0.s.mprloc = 0; /* MPR Page 0 Location 0 */
- DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
+ /* A-side */
+ lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
+ lmc_modereg_params0.s.mprloc = 0; /* MPR Page 0 Location 0 */
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
- /* Clear read-level delays */
- DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), 0);
+ /* Clear read-level delays */
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), 0);
- perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 1); /* read-leveling */
+ perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 1); /* read-leveling */
- if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
- BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx),
- status, ==, 3, 1000000))
- {
- error_print("ERROR: Timeout waiting for RLEVEL\n");
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx),
+ status, ==, 3, 1000000))
+ {
+ error_print("ERROR: Timeout waiting for RLEVEL\n");
- }
- lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+ }
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
- lmc_rlevel_rank_aside.u = lmc_rlevel_rank.u;
+ lmc_rlevel_rank_aside.u = lmc_rlevel_rank.u;
- rlevel_bitmask[0].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 0);
- rlevel_bitmask[1].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 1);
- rlevel_bitmask[2].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 2);
- rlevel_bitmask[3].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 3);
- rlevel_bitmask[8].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 8);
- /* A-side complete */
+ rlevel_bitmask[0].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 0);
+ rlevel_bitmask[1].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 1);
+ rlevel_bitmask[2].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 2);
+ rlevel_bitmask[3].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 3);
+ rlevel_bitmask[8].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 8);
+ /* A-side complete */
- /* B-side */
- lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
- lmc_modereg_params0.s.mprloc = 3; /* MPR Page 0 Location 3 */
- DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
+ /* B-side */
+ lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
+ lmc_modereg_params0.s.mprloc = 3; /* MPR Page 0 Location 3 */
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
- /* Clear read-level delays */
- DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), 0);
+ /* Clear read-level delays */
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), 0);
- perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 1); /* read-leveling */
+ perform_octeon3_ddr3_sequence(node, 1 << rankx, ddr_interface_num, 1); /* read-leveling */
- if (!bdk_is_platform(BDK_PLATFORM_ASIM) &&
- BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx),
- status, ==, 3, 1000000))
- {
- error_print("ERROR: Timeout waiting for RLEVEL\n");
- }
- lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+ if (BDK_CSR_WAIT_FOR_FIELD(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx),
+ status, ==, 3, 1000000))
+ {
+ error_print("ERROR: Timeout waiting for RLEVEL\n");
+ }
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
- rlevel_bitmask[4].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 4);
- rlevel_bitmask[5].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 5);
- rlevel_bitmask[6].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 6);
- rlevel_bitmask[7].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 7);
- /* B-side complete */
+ rlevel_bitmask[4].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 4);
+ rlevel_bitmask[5].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 5);
+ rlevel_bitmask[6].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 6);
+ rlevel_bitmask[7].bm = octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, 7);
+ /* B-side complete */
- update_rlevel_rank_struct(&lmc_rlevel_rank, 0, lmc_rlevel_rank_aside.cn83xx.byte0);
- update_rlevel_rank_struct(&lmc_rlevel_rank, 1, lmc_rlevel_rank_aside.cn83xx.byte1);
- update_rlevel_rank_struct(&lmc_rlevel_rank, 2, lmc_rlevel_rank_aside.cn83xx.byte2);
- update_rlevel_rank_struct(&lmc_rlevel_rank, 3, lmc_rlevel_rank_aside.cn83xx.byte3);
- update_rlevel_rank_struct(&lmc_rlevel_rank, 8, lmc_rlevel_rank_aside.cn83xx.byte8); /* ECC A-side */
+ update_rlevel_rank_struct(&lmc_rlevel_rank, 0, lmc_rlevel_rank_aside.cn83xx.byte0);
+ update_rlevel_rank_struct(&lmc_rlevel_rank, 1, lmc_rlevel_rank_aside.cn83xx.byte1);
+ update_rlevel_rank_struct(&lmc_rlevel_rank, 2, lmc_rlevel_rank_aside.cn83xx.byte2);
+ update_rlevel_rank_struct(&lmc_rlevel_rank, 3, lmc_rlevel_rank_aside.cn83xx.byte3);
+ update_rlevel_rank_struct(&lmc_rlevel_rank, 8, lmc_rlevel_rank_aside.cn83xx.byte8); /* ECC A-side */
- lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
- lmc_modereg_params0.s.mprloc = 0; /* MPR Page 0 Location 0 */
- DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
+ lmc_modereg_params0.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num));
+ lmc_modereg_params0.s.mprloc = 0; /* MPR Page 0 Location 0 */
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
- } /* if (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM)) */
+ } /* if (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM)) */
- /*
+ /*
* Evaluate the quality of the read-leveling delays from the bitmasks.
* Also save off a software computed read-leveling mask that may be
* used later to qualify the delay results from Octeon.
*/
- for (byte_idx = 0; byte_idx < (8+ecc_ena); ++byte_idx) {
+ for (byte_idx = 0; byte_idx < (8+ecc_ena); ++byte_idx) {
int bmerr;
- if (!(ddr_interface_bytemask&(1<<byte_idx)))
- continue;
- if (! (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM))) {
- rlevel_bitmask[byte_idx].bm =
- octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, byte_idx);
- }
+ if (!(ddr_interface_bytemask&(1<<byte_idx)))
+ continue;
+ if (! (rlevel_separate_ab && spd_rdimm && (ddr_type == DDR4_DRAM))) {
+ rlevel_bitmask[byte_idx].bm =
+ octeon_read_lmcx_ddr3_rlevel_dbg(node, ddr_interface_num, byte_idx);
+ }
bmerr = validate_ddr3_rlevel_bitmask(&rlevel_bitmask[byte_idx], ddr_type);
- rlevel_bitmask[byte_idx].errs = bmerr;
- rlevel_bitmask_errors += bmerr;
+ rlevel_bitmask[byte_idx].errs = bmerr;
+ rlevel_bitmask_errors += bmerr;
#if PERFECT_BITMASK_COUNTING
if ((ddr_type == DDR4_DRAM) && !bmerr) { // count only the "perfect" bitmasks
// FIXME: could optimize this a bit?
@@ -6326,51 +6326,51 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
rank_perfect_counts[rankx].total[byte_idx] += 1;
}
#endif /* PERFECT_BITMASK_COUNTING */
- }
-
- /* Set delays for unused bytes to match byte 0. */
- for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
- if (ddr_interface_bytemask & (1 << byte_idx))
- continue;
- update_rlevel_rank_struct(&lmc_rlevel_rank, byte_idx, lmc_rlevel_rank.cn83xx.byte0);
- }
-
- /* Save a copy of the byte delays in physical
- order for sequential evaluation. */
- unpack_rlevel_settings(ddr_interface_bytemask, ecc_ena, rlevel_byte, lmc_rlevel_rank);
+ }
+
+ /* Set delays for unused bytes to match byte 0. */
+ for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
+ if (ddr_interface_bytemask & (1 << byte_idx))
+ continue;
+ update_rlevel_rank_struct(&lmc_rlevel_rank, byte_idx, lmc_rlevel_rank.cn83xx.byte0);
+ }
+
+ /* Save a copy of the byte delays in physical
+ order for sequential evaluation. */
+ unpack_rlevel_settings(ddr_interface_bytemask, ecc_ena, rlevel_byte, lmc_rlevel_rank);
redo_nonseq_errs:
rlevel_nonseq_errors = 0;
- if (! disable_sequential_delay_check) {
- if ((ddr_interface_bytemask & 0xff) == 0xff) {
- /* Evaluate delay sequence across the whole range of bytes for standard dimms. */
- if ((spd_dimm_type == 1) || (spd_dimm_type == 5)) { /* 1=RDIMM, 5=Mini-RDIMM */
- int register_adjacent_delay = _abs(rlevel_byte[4].delay - rlevel_byte[5].delay);
- /* Registered dimm topology routes from the center. */
- rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 0, 3+ecc_ena,
- maximum_adjacent_rlevel_delay_increment);
- rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 5, 7+ecc_ena,
- maximum_adjacent_rlevel_delay_increment);
+ if (! disable_sequential_delay_check) {
+ if ((ddr_interface_bytemask & 0xff) == 0xff) {
+ /* Evaluate delay sequence across the whole range of bytes for standard dimms. */
+ if ((spd_dimm_type == 1) || (spd_dimm_type == 5)) { /* 1=RDIMM, 5=Mini-RDIMM */
+ int register_adjacent_delay = _abs(rlevel_byte[4].delay - rlevel_byte[5].delay);
+ /* Registered dimm topology routes from the center. */
+ rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 0, 3+ecc_ena,
+ maximum_adjacent_rlevel_delay_increment);
+ rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 5, 7+ecc_ena,
+ maximum_adjacent_rlevel_delay_increment);
// byte 5 sqerrs never gets cleared for RDIMMs
rlevel_byte[5].sqerrs = 0;
- if (register_adjacent_delay > 1) {
- /* Assess proximity of bytes on opposite sides of register */
- rlevel_nonseq_errors += (register_adjacent_delay-1) * RLEVEL_ADJACENT_DELAY_ERROR;
+ if (register_adjacent_delay > 1) {
+ /* Assess proximity of bytes on opposite sides of register */
+ rlevel_nonseq_errors += (register_adjacent_delay-1) * RLEVEL_ADJACENT_DELAY_ERROR;
// update byte 5 error
- rlevel_byte[5].sqerrs += (register_adjacent_delay-1) * RLEVEL_ADJACENT_DELAY_ERROR;
+ rlevel_byte[5].sqerrs += (register_adjacent_delay-1) * RLEVEL_ADJACENT_DELAY_ERROR;
}
- }
- if ((spd_dimm_type == 2) || (spd_dimm_type == 6)) { /* 2=UDIMM, 6=Mini-UDIMM */
- /* Unbuffered dimm topology routes from end to end. */
- rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 0, 7+ecc_ena,
- maximum_adjacent_rlevel_delay_increment);
- }
- } else {
- rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 0, 3+ecc_ena,
- maximum_adjacent_rlevel_delay_increment);
- }
- } /* if (! disable_sequential_delay_check) */
+ }
+ if ((spd_dimm_type == 2) || (spd_dimm_type == 6)) { /* 2=UDIMM, 6=Mini-UDIMM */
+ /* Unbuffered dimm topology routes from end to end. */
+ rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 0, 7+ecc_ena,
+ maximum_adjacent_rlevel_delay_increment);
+ }
+ } else {
+ rlevel_nonseq_errors += nonsequential_delays(rlevel_byte, 0, 3+ecc_ena,
+ maximum_adjacent_rlevel_delay_increment);
+ }
+ } /* if (! disable_sequential_delay_check) */
#if 0
// FIXME FIXME: disabled for now, it was too much...
@@ -6390,16 +6390,16 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
rlevel_rank_errors = rlevel_bitmask_errors + rlevel_nonseq_errors;
#endif
- // print original sample here only if we are not really averaging or picking best
+ // print original sample here only if we are not really averaging or picking best
// also do not print if we were redoing the NONSEQ score for using COMPUTED
- if (!redoing_nonseq_errs && ((rlevel_avg_loops < 2) || dram_is_verbose(VBL_DEV2))) {
+ if (!redoing_nonseq_errs && ((rlevel_avg_loops < 2) || dram_is_verbose(VBL_DEV2))) {
display_RL_BM(node, ddr_interface_num, rankx, rlevel_bitmask, ecc_ena);
display_RL_BM_scores(node, ddr_interface_num, rankx, rlevel_bitmask, ecc_ena);
display_RL_SEQ_scores(node, ddr_interface_num, rankx, rlevel_byte, ecc_ena);
- display_RL_with_score(node, ddr_interface_num, lmc_rlevel_rank, rankx, rlevel_rank_errors);
- }
+ display_RL_with_score(node, ddr_interface_num, lmc_rlevel_rank, rankx, rlevel_rank_errors);
+ }
- if (ddr_rlevel_compute) {
+ if (ddr_rlevel_compute) {
if (!redoing_nonseq_errs) {
/* Recompute the delays based on the bitmask */
for (byte_idx = 0; byte_idx < (8+ecc_ena); ++byte_idx) {
@@ -6425,77 +6425,77 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
rlevel_rank_errors);
}
}
- } /* if (ddr_rlevel_compute) */
+ } /* if (ddr_rlevel_compute) */
- } // end bitmask interpretation block
+ } // end bitmask interpretation block
#if PICK_BEST_RANK_SCORE_NOT_AVG
- // if it is a better (lower) score, then keep it
- if (rlevel_rank_errors < rlevel_best_rank_score) {
- rlevel_best_rank_score = rlevel_rank_errors;
+ // if it is a better (lower) score, then keep it
+ if (rlevel_rank_errors < rlevel_best_rank_score) {
+ rlevel_best_rank_score = rlevel_rank_errors;
- // save the new best delays and best errors
- for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
- rlevel_byte[byte_idx].best = rlevel_byte[byte_idx].delay;
- rlevel_byte[byte_idx].bestsq = rlevel_byte[byte_idx].sqerrs;
+ // save the new best delays and best errors
+ for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
+ rlevel_byte[byte_idx].best = rlevel_byte[byte_idx].delay;
+ rlevel_byte[byte_idx].bestsq = rlevel_byte[byte_idx].sqerrs;
// save bitmasks and their scores as well
// xlate UNPACKED index to PACKED index to get from rlevel_bitmask
- rlevel_byte[byte_idx].bm = rlevel_bitmask[XUP(byte_idx, !!ecc_ena)].bm;
- rlevel_byte[byte_idx].bmerrs = rlevel_bitmask[XUP(byte_idx, !!ecc_ena)].errs;
- }
- }
+ rlevel_byte[byte_idx].bm = rlevel_bitmask[XUP(byte_idx, !!ecc_ena)].bm;
+ rlevel_byte[byte_idx].bmerrs = rlevel_bitmask[XUP(byte_idx, !!ecc_ena)].errs;
+ }
+ }
#else /* PICK_BEST_RANK_SCORE_NOT_AVG */
- /* Accumulate the total score across averaging loops for this setting */
- debug_print("rlevel_scoreboard[rtt_nom=%d][rodt_ctl=%d][rankx=%d].score: %d [%d]\n",
- rtt_nom, rodt_ctl, rankx, rlevel_rank_errors, average_loops);
- rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score += rlevel_rank_errors;
-
- /* Accumulate the delay totals and loop counts
- necessary to compute average delay results */
- for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
- if (rlevel_byte[byte_idx].delay != 0) { /* Don't include delay=0 in the average */
- ++rlevel_byte[byte_idx].loop_count;
- rlevel_byte[byte_idx].loop_total += rlevel_byte[byte_idx].delay;
- }
- } /* for (byte_idx = 0; byte_idx < 9; ++byte_idx) */
+ /* Accumulate the total score across averaging loops for this setting */
+ debug_print("rlevel_scoreboard[rtt_nom=%d][rodt_ctl=%d][rankx=%d].score: %d [%d]\n",
+ rtt_nom, rodt_ctl, rankx, rlevel_rank_errors, average_loops);
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score += rlevel_rank_errors;
+
+ /* Accumulate the delay totals and loop counts
+ necessary to compute average delay results */
+ for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
+ if (rlevel_byte[byte_idx].delay != 0) { /* Don't include delay=0 in the average */
+ ++rlevel_byte[byte_idx].loop_count;
+ rlevel_byte[byte_idx].loop_total += rlevel_byte[byte_idx].delay;
+ }
+ } /* for (byte_idx = 0; byte_idx < 9; ++byte_idx) */
#endif /* PICK_BEST_RANK_SCORE_NOT_AVG */
- rlevel_rodt_errors += rlevel_rank_errors;
+ rlevel_rodt_errors += rlevel_rank_errors;
- } /* for (average_loops = 0; average_loops < rlevel_avg_loops; average_loops++) */
+ } /* for (average_loops = 0; average_loops < rlevel_avg_loops; average_loops++) */
#if PICK_BEST_RANK_SCORE_NOT_AVG
- /* We recorded the best score across the averaging loops */
- rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score = rlevel_best_rank_score;
+ /* We recorded the best score across the averaging loops */
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score = rlevel_best_rank_score;
- /* Restore the delays from the best fields that go with the best score */
- for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
- rlevel_byte[byte_idx].delay = rlevel_byte[byte_idx].best;
- rlevel_byte[byte_idx].sqerrs = rlevel_byte[byte_idx].bestsq;
- }
+ /* Restore the delays from the best fields that go with the best score */
+ for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
+ rlevel_byte[byte_idx].delay = rlevel_byte[byte_idx].best;
+ rlevel_byte[byte_idx].sqerrs = rlevel_byte[byte_idx].bestsq;
+ }
#else /* PICK_BEST_RANK_SCORE_NOT_AVG */
- /* Compute the average score across averaging loops */
- rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score =
- divide_nint(rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score, rlevel_avg_loops);
-
- /* Compute the average delay results */
- for (byte_idx=0; byte_idx < 9; ++byte_idx) {
- if (rlevel_byte[byte_idx].loop_count == 0)
- rlevel_byte[byte_idx].loop_count = 1;
- rlevel_byte[byte_idx].delay = divide_nint(rlevel_byte[byte_idx].loop_total,
- rlevel_byte[byte_idx].loop_count);
- }
+ /* Compute the average score across averaging loops */
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score =
+ divide_nint(rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score, rlevel_avg_loops);
+
+ /* Compute the average delay results */
+ for (byte_idx=0; byte_idx < 9; ++byte_idx) {
+ if (rlevel_byte[byte_idx].loop_count == 0)
+ rlevel_byte[byte_idx].loop_count = 1;
+ rlevel_byte[byte_idx].delay = divide_nint(rlevel_byte[byte_idx].loop_total,
+ rlevel_byte[byte_idx].loop_count);
+ }
#endif /* PICK_BEST_RANK_SCORE_NOT_AVG */
- lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
- pack_rlevel_settings(ddr_interface_bytemask, ecc_ena, rlevel_byte, &lmc_rlevel_rank);
+ pack_rlevel_settings(ddr_interface_bytemask, ecc_ena, rlevel_byte, &lmc_rlevel_rank);
- if (rlevel_avg_loops > 1) {
+ if (rlevel_avg_loops > 1) {
#if PICK_BEST_RANK_SCORE_NOT_AVG
// restore the "best" bitmasks and their scores for printing
for (byte_idx = 0; byte_idx < 9; ++byte_idx) {
@@ -6518,93 +6518,93 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
WITH_RODT_BESTSCORE);
#else /* PICK_BEST_RANK_SCORE_NOT_AVG */
- display_RL_with_average(node, ddr_interface_num, lmc_rlevel_rank, rankx,
- rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score);
+ display_RL_with_average(node, ddr_interface_num, lmc_rlevel_rank, rankx,
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score);
#endif /* PICK_BEST_RANK_SCORE_NOT_AVG */
- } /* if (rlevel_avg_loops > 1) */
+ } /* if (rlevel_avg_loops > 1) */
- rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].setting = lmc_rlevel_rank.u;
+ rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].setting = lmc_rlevel_rank.u;
} /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
} /* for (rankx = 0; rankx < dimm_count*4; rankx++) */
- } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<max_rtt_nom_idx; ++rtt_idx) */
-
-
- /* Re-enable dynamic compensation settings. */
- if (rlevel_comp_offset != 0) {
- lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
-
- lmc_comp_ctl2.s.ptune = 0;
- lmc_comp_ctl2.s.ntune = 0;
- lmc_comp_ctl2.s.byp = 0; /* Disable bypass mode */
- DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
- BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read once */
-
- lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read again */
- ddr_print("DDR__PTUNE/DDR__NTUNE : %d/%d\n",
- lmc_comp_ctl2.s.ddr__ptune, lmc_comp_ctl2.s.ddr__ntune);
-
- lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
- lmc_control.s.int_zqcs_dis = saved_int_zqcs_dis; /* Restore original setting */
- DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
-
- }
-
-
- {
- int override_compensation = 0;
- if ((s = lookup_env_parameter("ddr__ptune")) != NULL) {
- saved_ddr__ptune = strtoul(s, NULL, 0);
- override_compensation = 1;
- }
- if ((s = lookup_env_parameter("ddr__ntune")) != NULL) {
- saved_ddr__ntune = strtoul(s, NULL, 0);
- override_compensation = 1;
- }
- if (override_compensation) {
- lmc_comp_ctl2.s.ptune = saved_ddr__ptune;
- lmc_comp_ctl2.s.ntune = saved_ddr__ntune;
-
- lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
- saved_int_zqcs_dis = lmc_control.s.int_zqcs_dis;
- lmc_control.s.int_zqcs_dis = 1; /* Disable ZQCS while in bypass. */
- DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
-
- lmc_comp_ctl2.s.byp = 1; /* Enable bypass mode */
- DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
- lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read again */
-
- ddr_print("DDR__PTUNE/DDR__NTUNE : %d/%d\n",
- lmc_comp_ctl2.s.ptune, lmc_comp_ctl2.s.ntune);
- }
- }
- { /* Evaluation block */
- int best_rodt_score = DEFAULT_BEST_RANK_SCORE; /* Start with an arbitrarily high score */
- int auto_rodt_ctl = 0;
- int auto_rtt_nom = 0;
- int rodt_score;
- int rodt_row_skip_mask = 0;
-
- // just add specific RODT rows to the skip mask for DDR4 at this time...
- if (ddr_type == DDR4_DRAM) {
- rodt_row_skip_mask |= (1 << ddr4_rodt_ctl_34_ohm); // skip RODT row 34 ohms for all DDR4 types
- rodt_row_skip_mask |= (1 << ddr4_rodt_ctl_40_ohm); // skip RODT row 40 ohms for all DDR4 types
+ } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<max_rtt_nom_idx; ++rtt_idx) */
+
+
+ /* Re-enable dynamic compensation settings. */
+ if (rlevel_comp_offset != 0) {
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+
+ lmc_comp_ctl2.s.ptune = 0;
+ lmc_comp_ctl2.s.ntune = 0;
+ lmc_comp_ctl2.s.byp = 0; /* Disable bypass mode */
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
+ BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read once */
+
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read again */
+ ddr_print("DDR__PTUNE/DDR__NTUNE : %d/%d\n",
+ lmc_comp_ctl2.s.ddr__ptune, lmc_comp_ctl2.s.ddr__ntune);
+
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ lmc_control.s.int_zqcs_dis = saved_int_zqcs_dis; /* Restore original setting */
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ }
+
+
+ {
+ int override_compensation = 0;
+ if ((s = lookup_env_parameter("ddr__ptune")) != NULL) {
+ saved_ddr__ptune = strtoul(s, NULL, 0);
+ override_compensation = 1;
+ }
+ if ((s = lookup_env_parameter("ddr__ntune")) != NULL) {
+ saved_ddr__ntune = strtoul(s, NULL, 0);
+ override_compensation = 1;
+ }
+ if (override_compensation) {
+ lmc_comp_ctl2.s.ptune = saved_ddr__ptune;
+ lmc_comp_ctl2.s.ntune = saved_ddr__ntune;
+
+ lmc_control.u = BDK_CSR_READ(node, BDK_LMCX_CONTROL(ddr_interface_num));
+ saved_int_zqcs_dis = lmc_control.s.int_zqcs_dis;
+ lmc_control.s.int_zqcs_dis = 1; /* Disable ZQCS while in bypass. */
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
+
+ lmc_comp_ctl2.s.byp = 1; /* Enable bypass mode */
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num)); /* Read again */
+
+ ddr_print("DDR__PTUNE/DDR__NTUNE : %d/%d\n",
+ lmc_comp_ctl2.s.ptune, lmc_comp_ctl2.s.ntune);
+ }
+ }
+ { /* Evaluation block */
+ int best_rodt_score = DEFAULT_BEST_RANK_SCORE; /* Start with an arbitrarily high score */
+ int auto_rodt_ctl = 0;
+ int auto_rtt_nom = 0;
+ int rodt_score;
+ int rodt_row_skip_mask = 0;
+
+ // just add specific RODT rows to the skip mask for DDR4 at this time...
+ if (ddr_type == DDR4_DRAM) {
+ rodt_row_skip_mask |= (1 << ddr4_rodt_ctl_34_ohm); // skip RODT row 34 ohms for all DDR4 types
+ rodt_row_skip_mask |= (1 << ddr4_rodt_ctl_40_ohm); // skip RODT row 40 ohms for all DDR4 types
#if ADD_48_OHM_SKIP
- rodt_row_skip_mask |= (1 << ddr4_rodt_ctl_48_ohm); // skip RODT row 48 ohms for all DDR4 types
+ rodt_row_skip_mask |= (1 << ddr4_rodt_ctl_48_ohm); // skip RODT row 48 ohms for all DDR4 types
#endif /* ADD_48OHM_SKIP */
#if NOSKIP_40_48_OHM
- // For now, do not skip RODT row 40 or 48 ohm when ddr_hertz is above 1075 MHz
- if (ddr_hertz > 1075000000) {
- rodt_row_skip_mask &= ~(1 << ddr4_rodt_ctl_40_ohm); // noskip RODT row 40 ohms
- rodt_row_skip_mask &= ~(1 << ddr4_rodt_ctl_48_ohm); // noskip RODT row 48 ohms
- }
+ // For now, do not skip RODT row 40 or 48 ohm when ddr_hertz is above 1075 MHz
+ if (ddr_hertz > 1075000000) {
+ rodt_row_skip_mask &= ~(1 << ddr4_rodt_ctl_40_ohm); // noskip RODT row 40 ohms
+ rodt_row_skip_mask &= ~(1 << ddr4_rodt_ctl_48_ohm); // noskip RODT row 48 ohms
+ }
#endif /* NOSKIP_40_48_OHM */
#if NOSKIP_48_STACKED
- // For now, do not skip RODT row 48 ohm for 2Rx4 stacked die DIMMs
- if ((is_stacked_die) && (num_ranks == 2) && (dram_width == 4)) {
- rodt_row_skip_mask &= ~(1 << ddr4_rodt_ctl_48_ohm); // noskip RODT row 48 ohms
- }
+ // For now, do not skip RODT row 48 ohm for 2Rx4 stacked die DIMMs
+ if ((is_stacked_die) && (num_ranks == 2) && (dram_width == 4)) {
+ rodt_row_skip_mask &= ~(1 << ddr4_rodt_ctl_48_ohm); // noskip RODT row 48 ohms
+ }
#endif /* NOSKIP_48_STACKED */
#if NOSKIP_FOR_MINI
// for now, leave all rows eligible when we have mini-DIMMs...
@@ -6618,65 +6618,65 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
rodt_row_skip_mask = 0;
}
#endif /* NOSKIP_FOR_2S_1R */
- }
-
- VB_PRT(VBL_DEV, "Evaluating Read-Leveling Scoreboard for AUTO settings.\n");
- for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) {
- rtt_nom = imp_values->rtt_nom_table[rtt_idx];
-
- /* When the read ODT mask is zero the dyn_rtt_nom_mask is
- zero than RTT_NOM will not be changing during
- read-leveling. Since the value is fixed we only need
- to test it once. */
- if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
- continue;
-
- for (rodt_ctl=max_rodt_ctl; rodt_ctl>=min_rodt_ctl; --rodt_ctl) {
- rodt_score = 0;
- for (rankx = 0; rankx < dimm_count * 4;rankx++) {
- if (!(rank_mask & (1 << rankx)))
- continue;
- debug_print("rlevel_scoreboard[rtt_nom=%d][rodt_ctl=%d][rankx=%d].score:%d\n",
- rtt_nom, rodt_ctl, rankx, rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score);
- rodt_score += rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score;
- }
- // FIXME: do we need to skip RODT rows here, like we do below in the by-RANK settings?
-
- /* When using automatic ODT settings use the ODT
- settings associated with the best score for
- all of the tested ODT combinations. */
-
- if ((rodt_score < best_rodt_score) || // always take lower score, OR
- ((rodt_score == best_rodt_score) && // take same score if RODT ohms are higher
- (imp_values->rodt_ohms[rodt_ctl] > imp_values->rodt_ohms[auto_rodt_ctl])))
- {
- debug_print("AUTO: new best score for rodt:%d (%3d), new score:%d, previous score:%d\n",
- rodt_ctl, imp_values->rodt_ohms[rodt_ctl], rodt_score, best_rodt_score);
- best_rodt_score = rodt_score;
- auto_rodt_ctl = rodt_ctl;
- auto_rtt_nom = rtt_nom;
- }
- } /* for (rodt_ctl=max_rodt_ctl; rodt_ctl>=min_rodt_ctl; --rodt_ctl) */
- } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
-
- lmc_modereg_params1.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num));
-
- if (ddr_rtt_nom_auto) {
- /* Store the automatically set RTT_NOM value */
- if (dyn_rtt_nom_mask & 1) lmc_modereg_params1.s.rtt_nom_00 = auto_rtt_nom;
- if (dyn_rtt_nom_mask & 2) lmc_modereg_params1.s.rtt_nom_01 = auto_rtt_nom;
- if (dyn_rtt_nom_mask & 4) lmc_modereg_params1.s.rtt_nom_10 = auto_rtt_nom;
- if (dyn_rtt_nom_mask & 8) lmc_modereg_params1.s.rtt_nom_11 = auto_rtt_nom;
- } else {
- /* restore the manual settings to the register */
- lmc_modereg_params1.s.rtt_nom_00 = default_rtt_nom[0];
- lmc_modereg_params1.s.rtt_nom_01 = default_rtt_nom[1];
- lmc_modereg_params1.s.rtt_nom_10 = default_rtt_nom[2];
- lmc_modereg_params1.s.rtt_nom_11 = default_rtt_nom[3];
- }
-
- DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num), lmc_modereg_params1.u);
- VB_PRT(VBL_DEV, "RTT_NOM %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ }
+
+ VB_PRT(VBL_DEV, "Evaluating Read-Leveling Scoreboard for AUTO settings.\n");
+ for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) {
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+
+ /* When the read ODT mask is zero the dyn_rtt_nom_mask is
+ zero than RTT_NOM will not be changing during
+ read-leveling. Since the value is fixed we only need
+ to test it once. */
+ if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
+ continue;
+
+ for (rodt_ctl=max_rodt_ctl; rodt_ctl>=min_rodt_ctl; --rodt_ctl) {
+ rodt_score = 0;
+ for (rankx = 0; rankx < dimm_count * 4;rankx++) {
+ if (!(rank_mask & (1 << rankx)))
+ continue;
+ debug_print("rlevel_scoreboard[rtt_nom=%d][rodt_ctl=%d][rankx=%d].score:%d\n",
+ rtt_nom, rodt_ctl, rankx, rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score);
+ rodt_score += rlevel_scoreboard[rtt_nom][rodt_ctl][rankx].score;
+ }
+ // FIXME: do we need to skip RODT rows here, like we do below in the by-RANK settings?
+
+ /* When using automatic ODT settings use the ODT
+ settings associated with the best score for
+ all of the tested ODT combinations. */
+
+ if ((rodt_score < best_rodt_score) || // always take lower score, OR
+ ((rodt_score == best_rodt_score) && // take same score if RODT ohms are higher
+ (imp_values->rodt_ohms[rodt_ctl] > imp_values->rodt_ohms[auto_rodt_ctl])))
+ {
+ debug_print("AUTO: new best score for rodt:%d (%3d), new score:%d, previous score:%d\n",
+ rodt_ctl, imp_values->rodt_ohms[rodt_ctl], rodt_score, best_rodt_score);
+ best_rodt_score = rodt_score;
+ auto_rodt_ctl = rodt_ctl;
+ auto_rtt_nom = rtt_nom;
+ }
+ } /* for (rodt_ctl=max_rodt_ctl; rodt_ctl>=min_rodt_ctl; --rodt_ctl) */
+ } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
+
+ lmc_modereg_params1.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num));
+
+ if (ddr_rtt_nom_auto) {
+ /* Store the automatically set RTT_NOM value */
+ if (dyn_rtt_nom_mask & 1) lmc_modereg_params1.s.rtt_nom_00 = auto_rtt_nom;
+ if (dyn_rtt_nom_mask & 2) lmc_modereg_params1.s.rtt_nom_01 = auto_rtt_nom;
+ if (dyn_rtt_nom_mask & 4) lmc_modereg_params1.s.rtt_nom_10 = auto_rtt_nom;
+ if (dyn_rtt_nom_mask & 8) lmc_modereg_params1.s.rtt_nom_11 = auto_rtt_nom;
+ } else {
+ /* restore the manual settings to the register */
+ lmc_modereg_params1.s.rtt_nom_00 = default_rtt_nom[0];
+ lmc_modereg_params1.s.rtt_nom_01 = default_rtt_nom[1];
+ lmc_modereg_params1.s.rtt_nom_10 = default_rtt_nom[2];
+ lmc_modereg_params1.s.rtt_nom_11 = default_rtt_nom[3];
+ }
+
+ DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS1(ddr_interface_num), lmc_modereg_params1.u);
+ VB_PRT(VBL_DEV, "RTT_NOM %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_11],
imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_10],
imp_values->rtt_nom_ohms[lmc_modereg_params1.s.rtt_nom_01],
@@ -6686,7 +6686,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_modereg_params1.s.rtt_nom_01,
lmc_modereg_params1.s.rtt_nom_00);
- VB_PRT(VBL_DEV, "RTT_WR %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ VB_PRT(VBL_DEV, "RTT_WR %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 3)],
imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 2)],
imp_values->rtt_wr_ohms[EXTR_WR(lmc_modereg_params1.u, 1)],
@@ -6696,7 +6696,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
EXTR_WR(lmc_modereg_params1.u, 1),
EXTR_WR(lmc_modereg_params1.u, 0));
- VB_PRT(VBL_DEV, "DIC %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ VB_PRT(VBL_DEV, "DIC %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
imp_values->dic_ohms[lmc_modereg_params1.s.dic_11],
imp_values->dic_ohms[lmc_modereg_params1.s.dic_10],
imp_values->dic_ohms[lmc_modereg_params1.s.dic_01],
@@ -6706,167 +6706,167 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_modereg_params1.s.dic_01,
lmc_modereg_params1.s.dic_00);
- if (ddr_type == DDR4_DRAM) {
- bdk_lmcx_modereg_params2_t lmc_modereg_params2;
- /*
- * We must read the CSR, and not depend on odt_config[odt_idx].odt_mask2,
- * since we could have overridden values with envvars.
- * NOTE: this corrects the printout, since the CSR is not written with the old values...
- */
- lmc_modereg_params2.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS2(ddr_interface_num));
-
- VB_PRT(VBL_DEV, "RTT_PARK %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
- imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_11],
- imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_10],
- imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_01],
- imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_00],
- lmc_modereg_params2.s.rtt_park_11,
- lmc_modereg_params2.s.rtt_park_10,
- lmc_modereg_params2.s.rtt_park_01,
- lmc_modereg_params2.s.rtt_park_00);
-
- VB_PRT(VBL_DEV, "%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_RANGE",
- lmc_modereg_params2.s.vref_range_11,
- lmc_modereg_params2.s.vref_range_10,
- lmc_modereg_params2.s.vref_range_01,
- lmc_modereg_params2.s.vref_range_00);
-
- VB_PRT(VBL_DEV, "%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_VALUE",
- lmc_modereg_params2.s.vref_value_11,
- lmc_modereg_params2.s.vref_value_10,
- lmc_modereg_params2.s.vref_value_01,
- lmc_modereg_params2.s.vref_value_00);
- }
-
- lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
- if (ddr_rodt_ctl_auto)
- lmc_comp_ctl2.s.rodt_ctl = auto_rodt_ctl;
- else
- lmc_comp_ctl2.s.rodt_ctl = default_rodt_ctl; // back to the original setting
- DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
- lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
- VB_PRT(VBL_DEV, "Read ODT_CTL : 0x%x (%d ohms)\n",
- lmc_comp_ctl2.s.rodt_ctl, imp_values->rodt_ohms[lmc_comp_ctl2.s.rodt_ctl]);
-
- ////////////////// this is the start of the RANK MAJOR LOOP
-
- for (rankx = 0; rankx < dimm_count * 4; rankx++) {
- int best_rank_score = DEFAULT_BEST_RANK_SCORE; /* Start with an arbitrarily high score */
- int best_rank_rtt_nom = 0;
- //int best_rank_nom_ohms = 0;
- int best_rank_ctl = 0;
- int best_rank_ohms = 0;
- int best_rankx = 0;
-
- if (!(rank_mask & (1 << rankx)))
- continue;
+ if (ddr_type == DDR4_DRAM) {
+ bdk_lmcx_modereg_params2_t lmc_modereg_params2;
+ /*
+ * We must read the CSR, and not depend on odt_config[odt_idx].odt_mask2,
+ * since we could have overridden values with envvars.
+ * NOTE: this corrects the printout, since the CSR is not written with the old values...
+ */
+ lmc_modereg_params2.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS2(ddr_interface_num));
+
+ VB_PRT(VBL_DEV, "RTT_PARK %3d, %3d, %3d, %3d ohms : %x,%x,%x,%x\n",
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_11],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_10],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_01],
+ imp_values->rtt_nom_ohms[lmc_modereg_params2.s.rtt_park_00],
+ lmc_modereg_params2.s.rtt_park_11,
+ lmc_modereg_params2.s.rtt_park_10,
+ lmc_modereg_params2.s.rtt_park_01,
+ lmc_modereg_params2.s.rtt_park_00);
+
+ VB_PRT(VBL_DEV, "%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_RANGE",
+ lmc_modereg_params2.s.vref_range_11,
+ lmc_modereg_params2.s.vref_range_10,
+ lmc_modereg_params2.s.vref_range_01,
+ lmc_modereg_params2.s.vref_range_00);
+
+ VB_PRT(VBL_DEV, "%-45s : 0x%x,0x%x,0x%x,0x%x\n", "VREF_VALUE",
+ lmc_modereg_params2.s.vref_value_11,
+ lmc_modereg_params2.s.vref_value_10,
+ lmc_modereg_params2.s.vref_value_01,
+ lmc_modereg_params2.s.vref_value_00);
+ }
+
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ if (ddr_rodt_ctl_auto)
+ lmc_comp_ctl2.s.rodt_ctl = auto_rodt_ctl;
+ else
+ lmc_comp_ctl2.s.rodt_ctl = default_rodt_ctl; // back to the original setting
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(ddr_interface_num), lmc_comp_ctl2.u);
+ lmc_comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(ddr_interface_num));
+ VB_PRT(VBL_DEV, "Read ODT_CTL : 0x%x (%d ohms)\n",
+ lmc_comp_ctl2.s.rodt_ctl, imp_values->rodt_ohms[lmc_comp_ctl2.s.rodt_ctl]);
+
+ ////////////////// this is the start of the RANK MAJOR LOOP
+
+ for (rankx = 0; rankx < dimm_count * 4; rankx++) {
+ int best_rank_score = DEFAULT_BEST_RANK_SCORE; /* Start with an arbitrarily high score */
+ int best_rank_rtt_nom = 0;
+ //int best_rank_nom_ohms = 0;
+ int best_rank_ctl = 0;
+ int best_rank_ohms = 0;
+ int best_rankx = 0;
+
+ if (!(rank_mask & (1 << rankx)))
+ continue;
/* Use the delays associated with the best score for each individual rank */
VB_PRT(VBL_TME, "Evaluating Read-Leveling Scoreboard for Rank %d settings.\n", rankx);
- // some of the rank-related loops below need to operate only on the ranks of a single DIMM,
- // so create a mask for their use here
- int dimm_rank_mask;
- if (num_ranks == 4)
- dimm_rank_mask = rank_mask; // should be 1111
- else {
- dimm_rank_mask = rank_mask & 3; // should be 01 or 11
- if (rankx >= 2)
- dimm_rank_mask <<= 2; // doing a rank on the second DIMM, should be 0100 or 1100
- }
- debug_print("DIMM rank mask: 0x%x, rank mask: 0x%x, rankx: %d\n", dimm_rank_mask, rank_mask, rankx);
-
- ////////////////// this is the start of the BEST ROW SCORE LOOP
-
- for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
- //int rtt_nom_ohms;
- rtt_nom = imp_values->rtt_nom_table[rtt_idx];
- //rtt_nom_ohms = imp_values->rtt_nom_ohms[rtt_nom];
-
- /* When the read ODT mask is zero the dyn_rtt_nom_mask is
- zero than RTT_NOM will not be changing during
- read-leveling. Since the value is fixed we only need
- to test it once. */
- if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
- continue;
-
- debug_print("N%d.LMC%d.R%d: starting RTT_NOM %d (%d)\n",
- node, ddr_interface_num, rankx, rtt_nom, rtt_nom_ohms);
-
- for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
- int next_ohms = imp_values->rodt_ohms[rodt_ctl];
-
- // skip RODT rows in mask, but *NOT* rows with too high a score;
- // we will not use the skipped ones for printing or evaluating, but
- // we need to allow all the non-skipped ones to be candidates for "best"
- if (((1 << rodt_ctl) & rodt_row_skip_mask) != 0) {
- debug_print("N%d.LMC%d.R%d: SKIPPING rodt:%d (%d) with rank_score:%d\n",
- node, ddr_interface_num, rankx, rodt_ctl, next_ohms, next_score);
- continue;
- }
- for (int orankx = 0; orankx < dimm_count * 4; orankx++) { // this is ROFFIX-0528
- if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
- continue;
-
- int next_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
-
- if (next_score > best_rank_score) // always skip a higher score
- continue;
- if (next_score == best_rank_score) { // if scores are equal
- if (next_ohms < best_rank_ohms) // always skip lower ohms
- continue;
- if (next_ohms == best_rank_ohms) { // if same ohms
- if (orankx != rankx) // always skip the other rank(s)
- continue;
- }
- // else next_ohms are greater, always choose it
- }
- // else next_score is less than current best, so always choose it
- VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: new best score: rank %d, rodt %d(%3d), new best %d, previous best %d(%d)\n",
+ // some of the rank-related loops below need to operate only on the ranks of a single DIMM,
+ // so create a mask for their use here
+ int dimm_rank_mask;
+ if (num_ranks == 4)
+ dimm_rank_mask = rank_mask; // should be 1111
+ else {
+ dimm_rank_mask = rank_mask & 3; // should be 01 or 11
+ if (rankx >= 2)
+ dimm_rank_mask <<= 2; // doing a rank on the second DIMM, should be 0100 or 1100
+ }
+ debug_print("DIMM rank mask: 0x%x, rank mask: 0x%x, rankx: %d\n", dimm_rank_mask, rank_mask, rankx);
+
+ ////////////////// this is the start of the BEST ROW SCORE LOOP
+
+ for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
+ //int rtt_nom_ohms;
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+ //rtt_nom_ohms = imp_values->rtt_nom_ohms[rtt_nom];
+
+ /* When the read ODT mask is zero the dyn_rtt_nom_mask is
+ zero than RTT_NOM will not be changing during
+ read-leveling. Since the value is fixed we only need
+ to test it once. */
+ if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
+ continue;
+
+ debug_print("N%d.LMC%d.R%d: starting RTT_NOM %d (%d)\n",
+ node, ddr_interface_num, rankx, rtt_nom, rtt_nom_ohms);
+
+ for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
+ int next_ohms = imp_values->rodt_ohms[rodt_ctl];
+
+ // skip RODT rows in mask, but *NOT* rows with too high a score;
+ // we will not use the skipped ones for printing or evaluating, but
+ // we need to allow all the non-skipped ones to be candidates for "best"
+ if (((1 << rodt_ctl) & rodt_row_skip_mask) != 0) {
+ debug_print("N%d.LMC%d.R%d: SKIPPING rodt:%d (%d) with rank_score:%d\n",
+ node, ddr_interface_num, rankx, rodt_ctl, next_ohms, next_score);
+ continue;
+ }
+ for (int orankx = 0; orankx < dimm_count * 4; orankx++) { // this is ROFFIX-0528
+ if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
+ continue;
+
+ int next_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
+
+ if (next_score > best_rank_score) // always skip a higher score
+ continue;
+ if (next_score == best_rank_score) { // if scores are equal
+ if (next_ohms < best_rank_ohms) // always skip lower ohms
+ continue;
+ if (next_ohms == best_rank_ohms) { // if same ohms
+ if (orankx != rankx) // always skip the other rank(s)
+ continue;
+ }
+ // else next_ohms are greater, always choose it
+ }
+ // else next_score is less than current best, so always choose it
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: new best score: rank %d, rodt %d(%3d), new best %d, previous best %d(%d)\n",
node, ddr_interface_num, rankx, orankx, rodt_ctl, next_ohms, next_score,
best_rank_score, best_rank_ohms);
- best_rank_score = next_score;
- best_rank_rtt_nom = rtt_nom;
- //best_rank_nom_ohms = rtt_nom_ohms;
- best_rank_ctl = rodt_ctl;
- best_rank_ohms = next_ohms;
- best_rankx = orankx;
- lmc_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
+ best_rank_score = next_score;
+ best_rank_rtt_nom = rtt_nom;
+ //best_rank_nom_ohms = rtt_nom_ohms;
+ best_rank_ctl = rodt_ctl;
+ best_rank_ohms = next_ohms;
+ best_rankx = orankx;
+ lmc_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
- } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) */
- } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
- } /* for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) */
-
- ////////////////// this is the end of the BEST ROW SCORE LOOP
-
- // DANGER, Will Robinson!! Abort now if we did not find a best score at all...
- if (best_rank_score == DEFAULT_BEST_RANK_SCORE) {
- error_print("WARNING: no best rank score found for N%d.LMC%d.R%d - resetting node...\n",
- node, ddr_interface_num, rankx);
- bdk_wait_usec(500000);
- bdk_reset_chip(node);
- }
+ } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) */
+ } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
+ } /* for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) */
+
+ ////////////////// this is the end of the BEST ROW SCORE LOOP
+
+ // DANGER, Will Robinson!! Abort now if we did not find a best score at all...
+ if (best_rank_score == DEFAULT_BEST_RANK_SCORE) {
+ error_print("WARNING: no best rank score found for N%d.LMC%d.R%d - resetting node...\n",
+ node, ddr_interface_num, rankx);
+ bdk_wait_usec(500000);
+ bdk_reset_chip(node);
+ }
// FIXME: relative now, but still arbitrary...
// halve the range if 2 DIMMs unless they are single rank...
- int MAX_RANK_SCORE = best_rank_score;
+ int MAX_RANK_SCORE = best_rank_score;
MAX_RANK_SCORE += (MAX_RANK_SCORE_LIMIT / ((num_ranks > 1) ? dimm_count : 1));
- if (!ecc_ena){
- lmc_rlevel_rank.cn83xx.byte8 = lmc_rlevel_rank.cn83xx.byte0; /* ECC is not used */
- }
+ if (!ecc_ena){
+ lmc_rlevel_rank.cn83xx.byte8 = lmc_rlevel_rank.cn83xx.byte0; /* ECC is not used */
+ }
- // at the end, write the best row settings to the current rank
- DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), lmc_rlevel_rank.u);
- lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+ // at the end, write the best row settings to the current rank
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), lmc_rlevel_rank.u);
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
- bdk_lmcx_rlevel_rankx_t saved_rlevel_rank;
- saved_rlevel_rank.u = lmc_rlevel_rank.u;
+ bdk_lmcx_rlevel_rankx_t saved_rlevel_rank;
+ saved_rlevel_rank.u = lmc_rlevel_rank.u;
- ////////////////// this is the start of the PRINT LOOP
+ ////////////////// this is the start of the PRINT LOOP
- // for pass==0, print current rank, pass==1 print other rank(s)
- // this is done because we want to show each ranks RODT values together, not interlaced
+ // for pass==0, print current rank, pass==1 print other rank(s)
+ // this is done because we want to show each ranks RODT values together, not interlaced
#if COUNT_RL_CANDIDATES
// keep separates for ranks - pass=0 target rank, pass=1 other rank on DIMM
int mask_skipped[2] = {0,0};
@@ -6874,17 +6874,17 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
int selected_rows[2] = {0,0};
int zero_scores[2] = {0,0};
#endif /* COUNT_RL_CANDIDATES */
- for (int pass = 0; pass < 2; pass++ ) {
- for (int orankx = 0; orankx < dimm_count * 4; orankx++) {
- if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
- continue;
+ for (int pass = 0; pass < 2; pass++ ) {
+ for (int orankx = 0; orankx < dimm_count * 4; orankx++) {
+ if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
+ continue;
- if (((pass == 0) && (orankx != rankx)) || ((pass != 0) && (orankx == rankx)))
- continue;
+ if (((pass == 0) && (orankx != rankx)) || ((pass != 0) && (orankx == rankx)))
+ continue;
- for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
- rtt_nom = imp_values->rtt_nom_table[rtt_idx];
- if (dyn_rtt_nom_mask == 0) {
+ for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+ if (dyn_rtt_nom_mask == 0) {
print_nom_ohms = -1;
if (rtt_idx != min_rtt_nom_idx)
continue;
@@ -6892,16 +6892,16 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
print_nom_ohms = imp_values->rtt_nom_ohms[rtt_nom];
}
- // cycle through all the RODT values...
- for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
- bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
- int temp_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
- temp_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
+ // cycle through all the RODT values...
+ for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
+ bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
+ int temp_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
+ temp_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
- // skip RODT rows in mask, or rows with too high a score;
- // we will not use them for printing or evaluating...
+ // skip RODT rows in mask, or rows with too high a score;
+ // we will not use them for printing or evaluating...
#if COUNT_RL_CANDIDATES
- int skip_row;
+ int skip_row;
if ((1 << rodt_ctl) & rodt_row_skip_mask) {
skip_row = WITH_RODT_SKIPPING;
++mask_skipped[pass];
@@ -6914,10 +6914,10 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if (temp_score == 0)
++zero_scores[pass];
}
-
+
#else /* COUNT_RL_CANDIDATES */
- int skip_row = (((1 << rodt_ctl) & rodt_row_skip_mask) || (temp_score > MAX_RANK_SCORE))
- ? WITH_RODT_SKIPPING: WITH_RODT_BLANK;
+ int skip_row = (((1 << rodt_ctl) & rodt_row_skip_mask) || (temp_score > MAX_RANK_SCORE))
+ ? WITH_RODT_SKIPPING: WITH_RODT_BLANK;
#endif /* COUNT_RL_CANDIDATES */
// identify and print the BEST ROW when it comes up
@@ -6929,16 +6929,16 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
skip_row = WITH_RODT_BESTROW;
}
- display_RL_with_RODT(node, ddr_interface_num,
- temp_rlevel_rank, orankx, temp_score,
- print_nom_ohms,
- imp_values->rodt_ohms[rodt_ctl],
- skip_row);
+ display_RL_with_RODT(node, ddr_interface_num,
+ temp_rlevel_rank, orankx, temp_score,
+ print_nom_ohms,
+ imp_values->rodt_ohms[rodt_ctl],
+ skip_row);
- } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
- } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
- } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) { */
- } /* for (int pass = 0; pass < 2; pass++ ) */
+ } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
+ } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
+ } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) { */
+ } /* for (int pass = 0; pass < 2; pass++ ) */
#if COUNT_RL_CANDIDATES
VB_PRT(VBL_TME, "N%d.LMC%d.R%d: RLROWS: selected %d+%d, zero_scores %d+%d, mask_skipped %d+%d, score_skipped %d+%d\n",
node, ddr_interface_num, rankx,
@@ -6948,247 +6948,247 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
score_skipped[0], score_skipped[1]);
#endif /* COUNT_RL_CANDIDATES */
- ////////////////// this is the end of the PRINT LOOP
-
- // now evaluate which bytes need adjusting
- uint64_t byte_msk = 0x3f; // 6-bit fields
- uint64_t best_byte, new_byte, temp_byte, orig_best_byte;
-
- uint64_t rank_best_bytes[9]; // collect the new byte values; first init with current best for neighbor use
- for (int byte_idx = 0, byte_sh = 0; byte_idx < 8+ecc_ena; byte_idx++, byte_sh += 6) {
- rank_best_bytes[byte_idx] = (lmc_rlevel_rank.u >> byte_sh) & byte_msk;
- }
-
- ////////////////// this is the start of the BEST BYTE LOOP
-
- for (int byte_idx = 0, byte_sh = 0; byte_idx < 8+ecc_ena; byte_idx++, byte_sh += 6) {
- best_byte = orig_best_byte = rank_best_bytes[byte_idx];
-
- ////////////////// this is the start of the BEST BYTE AVERAGING LOOP
-
- // validate the initial "best" byte by looking at the average of the unskipped byte-column entries
- // we want to do this before we go further, so we can try to start with a better initial value
- // this is the so-called "BESTBUY" patch set
- int sum = 0, count = 0;
-
- for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
- rtt_nom = imp_values->rtt_nom_table[rtt_idx];
- if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
- continue;
-
- for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
- bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
- int temp_score;
- for (int orankx = 0; orankx < dimm_count * 4; orankx++) { // average over all the ranks
- if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
- continue;
- temp_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
- // skip RODT rows in mask, or rows with too high a score;
- // we will not use them for printing or evaluating...
-
- if (!((1 << rodt_ctl) & rodt_row_skip_mask) &&
- (temp_score <= MAX_RANK_SCORE))
- {
- temp_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
- temp_byte = (temp_rlevel_rank.u >> byte_sh) & byte_msk;
- sum += temp_byte;
- count++;
- }
- } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) */
- } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
- } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
-
- ////////////////// this is the end of the BEST BYTE AVERAGING LOOP
-
-
- uint64_t avg_byte = divide_nint(sum, count); // FIXME: validate count and sum??
- int avg_diff = (int)best_byte - (int)avg_byte;
- new_byte = best_byte;
- if (avg_diff != 0) {
- // bump best up/dn by 1, not necessarily all the way to avg
- new_byte = best_byte + ((avg_diff > 0) ? -1: 1);
- }
-
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: START: Byte %d: best %d is different by %d from average %d, using %d.\n",
- node, ddr_interface_num, rankx,
- byte_idx, (int)best_byte, avg_diff, (int)avg_byte, (int)new_byte);
- best_byte = new_byte;
-
- // At this point best_byte is either:
- // 1. the original byte-column value from the best scoring RODT row, OR
- // 2. that value bumped toward the average of all the byte-column values
- //
- // best_byte will not change from here on...
-
- ////////////////// this is the start of the BEST BYTE COUNTING LOOP
-
- // NOTE: we do this next loop separately from above, because we count relative to "best_byte"
- // which may have been modified by the above averaging operation...
+ ////////////////// this is the end of the PRINT LOOP
+
+ // now evaluate which bytes need adjusting
+ uint64_t byte_msk = 0x3f; // 6-bit fields
+ uint64_t best_byte, new_byte, temp_byte, orig_best_byte;
+
+ uint64_t rank_best_bytes[9]; // collect the new byte values; first init with current best for neighbor use
+ for (int byte_idx = 0, byte_sh = 0; byte_idx < 8+ecc_ena; byte_idx++, byte_sh += 6) {
+ rank_best_bytes[byte_idx] = (lmc_rlevel_rank.u >> byte_sh) & byte_msk;
+ }
+
+ ////////////////// this is the start of the BEST BYTE LOOP
+
+ for (int byte_idx = 0, byte_sh = 0; byte_idx < 8+ecc_ena; byte_idx++, byte_sh += 6) {
+ best_byte = orig_best_byte = rank_best_bytes[byte_idx];
+
+ ////////////////// this is the start of the BEST BYTE AVERAGING LOOP
+
+ // validate the initial "best" byte by looking at the average of the unskipped byte-column entries
+ // we want to do this before we go further, so we can try to start with a better initial value
+ // this is the so-called "BESTBUY" patch set
+ int sum = 0, count = 0;
+
+ for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+ if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
+ continue;
+
+ for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
+ bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
+ int temp_score;
+ for (int orankx = 0; orankx < dimm_count * 4; orankx++) { // average over all the ranks
+ if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
+ continue;
+ temp_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
+ // skip RODT rows in mask, or rows with too high a score;
+ // we will not use them for printing or evaluating...
+
+ if (!((1 << rodt_ctl) & rodt_row_skip_mask) &&
+ (temp_score <= MAX_RANK_SCORE))
+ {
+ temp_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
+ temp_byte = (temp_rlevel_rank.u >> byte_sh) & byte_msk;
+ sum += temp_byte;
+ count++;
+ }
+ } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) */
+ } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
+ } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
+
+ ////////////////// this is the end of the BEST BYTE AVERAGING LOOP
+
+
+ uint64_t avg_byte = divide_nint(sum, count); // FIXME: validate count and sum??
+ int avg_diff = (int)best_byte - (int)avg_byte;
+ new_byte = best_byte;
+ if (avg_diff != 0) {
+ // bump best up/dn by 1, not necessarily all the way to avg
+ new_byte = best_byte + ((avg_diff > 0) ? -1: 1);
+ }
+
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: START: Byte %d: best %d is different by %d from average %d, using %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)best_byte, avg_diff, (int)avg_byte, (int)new_byte);
+ best_byte = new_byte;
+
+ // At this point best_byte is either:
+ // 1. the original byte-column value from the best scoring RODT row, OR
+ // 2. that value bumped toward the average of all the byte-column values
+ //
+ // best_byte will not change from here on...
+
+ ////////////////// this is the start of the BEST BYTE COUNTING LOOP
+
+ // NOTE: we do this next loop separately from above, because we count relative to "best_byte"
+ // which may have been modified by the above averaging operation...
//
// Also, the above only moves toward the average by +- 1, so that we will always have a count
// of at least 1 for the original best byte, even if all the others are further away and not counted;
// this ensures we will go back to the original if no others are counted...
// FIXME: this could cause issue if the range of values for a byte-lane are too disparate...
- int count_less = 0, count_same = 0, count_more = 0;
+ int count_less = 0, count_same = 0, count_more = 0;
#if FAILSAFE_CHECK
uint64_t count_byte = new_byte; // save the value we will count around
#endif /* FAILSAFE_CHECK */
#if RANK_MAJORITY
- int rank_less = 0, rank_same = 0, rank_more = 0;
+ int rank_less = 0, rank_same = 0, rank_more = 0;
#endif /* RANK_MAJORITY */
- for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
- rtt_nom = imp_values->rtt_nom_table[rtt_idx];
- if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
- continue;
-
- for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
- bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
- int temp_score;
- for (int orankx = 0; orankx < dimm_count * 4; orankx++) { // count over all the ranks
- if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
- continue;
- temp_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
- // skip RODT rows in mask, or rows with too high a score;
- // we will not use them for printing or evaluating...
- if (((1 << rodt_ctl) & rodt_row_skip_mask) ||
- (temp_score > MAX_RANK_SCORE))
- {
- continue;
- }
- temp_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
- temp_byte = (temp_rlevel_rank.u >> byte_sh) & byte_msk;
-
- if (temp_byte == 0) // do not count it if illegal
- continue;
- else if (temp_byte == best_byte)
- count_same++;
- else if (temp_byte == best_byte - 1)
- count_less++;
- else if (temp_byte == best_byte + 1)
- count_more++;
- // else do not count anything more than 1 away from the best
+ for (rtt_idx = min_rtt_nom_idx; rtt_idx <= max_rtt_nom_idx; ++rtt_idx) {
+ rtt_nom = imp_values->rtt_nom_table[rtt_idx];
+ if ((dyn_rtt_nom_mask == 0) && (rtt_idx != min_rtt_nom_idx))
+ continue;
+
+ for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) {
+ bdk_lmcx_rlevel_rankx_t temp_rlevel_rank;
+ int temp_score;
+ for (int orankx = 0; orankx < dimm_count * 4; orankx++) { // count over all the ranks
+ if (!(dimm_rank_mask & (1 << orankx))) // stay on the same DIMM
+ continue;
+ temp_score = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].score;
+ // skip RODT rows in mask, or rows with too high a score;
+ // we will not use them for printing or evaluating...
+ if (((1 << rodt_ctl) & rodt_row_skip_mask) ||
+ (temp_score > MAX_RANK_SCORE))
+ {
+ continue;
+ }
+ temp_rlevel_rank.u = rlevel_scoreboard[rtt_nom][rodt_ctl][orankx].setting;
+ temp_byte = (temp_rlevel_rank.u >> byte_sh) & byte_msk;
+
+ if (temp_byte == 0) // do not count it if illegal
+ continue;
+ else if (temp_byte == best_byte)
+ count_same++;
+ else if (temp_byte == best_byte - 1)
+ count_less++;
+ else if (temp_byte == best_byte + 1)
+ count_more++;
+ // else do not count anything more than 1 away from the best
#if RANK_MAJORITY
- // FIXME? count is relative to best_byte; should it be rank-based?
- if (orankx != rankx) // rank counts only on main rank
- continue;
- else if (temp_byte == best_byte)
- rank_same++;
- else if (temp_byte == best_byte - 1)
- rank_less++;
- else if (temp_byte == best_byte + 1)
- rank_more++;
+ // FIXME? count is relative to best_byte; should it be rank-based?
+ if (orankx != rankx) // rank counts only on main rank
+ continue;
+ else if (temp_byte == best_byte)
+ rank_same++;
+ else if (temp_byte == best_byte - 1)
+ rank_less++;
+ else if (temp_byte == best_byte + 1)
+ rank_more++;
#endif /* RANK_MAJORITY */
- } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) */
- } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
- } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
+ } /* for (int orankx = 0; orankx < dimm_count * 4; orankx++) */
+ } /* for (rodt_ctl = max_rodt_ctl; rodt_ctl >= min_rodt_ctl; --rodt_ctl) */
+ } /* for (rtt_idx=min_rtt_nom_idx; rtt_idx<=max_rtt_nom_idx; ++rtt_idx) */
#if RANK_MAJORITY
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: COUNT: Byte %d: orig %d now %d, more %d same %d less %d (%d/%d/%d)\n",
- node, ddr_interface_num, rankx,
- byte_idx, (int)orig_best_byte, (int)best_byte,
- count_more, count_same, count_less,
- rank_more, rank_same, rank_less);
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: COUNT: Byte %d: orig %d now %d, more %d same %d less %d (%d/%d/%d)\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)orig_best_byte, (int)best_byte,
+ count_more, count_same, count_less,
+ rank_more, rank_same, rank_less);
#else /* RANK_MAJORITY */
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: COUNT: Byte %d: orig %d now %d, more %d same %d less %d\n",
- node, ddr_interface_num, rankx,
- byte_idx, (int)orig_best_byte, (int)best_byte,
- count_more, count_same, count_less);
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: COUNT: Byte %d: orig %d now %d, more %d same %d less %d\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)orig_best_byte, (int)best_byte,
+ count_more, count_same, count_less);
#endif /* RANK_MAJORITY */
- ////////////////// this is the end of the BEST BYTE COUNTING LOOP
-
- // choose the new byte value
- // we need to check that there is no gap greater than 2 between adjacent bytes
- // (adjacency depends on DIMM type)
- // use the neighbor value to help decide
- // initially, the rank_best_bytes[] will contain values from the chosen lowest score rank
- new_byte = 0;
-
- // neighbor is index-1 unless we are index 0 or index 8 (ECC)
- int neighbor = (byte_idx == 8) ? 3 : ((byte_idx == 0) ? 1 : byte_idx - 1);
- uint64_t neigh_byte = rank_best_bytes[neighbor];
-
-
- // can go up or down or stay the same, so look at a numeric average to help
- new_byte = divide_nint(((count_more * (best_byte + 1)) +
- (count_same * (best_byte + 0)) +
- (count_less * (best_byte - 1))),
- max(1, (count_more + count_same + count_less)));
-
- // use neighbor to help choose with average
- if ((byte_idx > 0) && (_abs(neigh_byte - new_byte) > 2)) // but not for byte 0
- {
- uint64_t avg_pick = new_byte;
- if ((new_byte - best_byte) != 0)
- new_byte = best_byte; // back to best, average did not get better
- else // avg was the same, still too far, now move it towards the neighbor
- new_byte += (neigh_byte > new_byte) ? 1 : -1;
-
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: AVERAGE: Byte %d: neighbor %d too different %d from average %d, picking %d.\n",
- node, ddr_interface_num, rankx,
- byte_idx, neighbor, (int)neigh_byte, (int)avg_pick, (int)new_byte);
- }
+ ////////////////// this is the end of the BEST BYTE COUNTING LOOP
+
+ // choose the new byte value
+ // we need to check that there is no gap greater than 2 between adjacent bytes
+ // (adjacency depends on DIMM type)
+ // use the neighbor value to help decide
+ // initially, the rank_best_bytes[] will contain values from the chosen lowest score rank
+ new_byte = 0;
+
+ // neighbor is index-1 unless we are index 0 or index 8 (ECC)
+ int neighbor = (byte_idx == 8) ? 3 : ((byte_idx == 0) ? 1 : byte_idx - 1);
+ uint64_t neigh_byte = rank_best_bytes[neighbor];
+
+
+ // can go up or down or stay the same, so look at a numeric average to help
+ new_byte = divide_nint(((count_more * (best_byte + 1)) +
+ (count_same * (best_byte + 0)) +
+ (count_less * (best_byte - 1))),
+ max(1, (count_more + count_same + count_less)));
+
+ // use neighbor to help choose with average
+ if ((byte_idx > 0) && (_abs(neigh_byte - new_byte) > 2)) // but not for byte 0
+ {
+ uint64_t avg_pick = new_byte;
+ if ((new_byte - best_byte) != 0)
+ new_byte = best_byte; // back to best, average did not get better
+ else // avg was the same, still too far, now move it towards the neighbor
+ new_byte += (neigh_byte > new_byte) ? 1 : -1;
+
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: AVERAGE: Byte %d: neighbor %d too different %d from average %d, picking %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, neighbor, (int)neigh_byte, (int)avg_pick, (int)new_byte);
+ }
#if MAJORITY_OVER_AVG
- // NOTE:
- // For now, we let the neighbor processing above trump the new simple majority processing here.
- // This is mostly because we have seen no smoking gun for a neighbor bad choice (yet?).
- // Also note that we will ALWAYS be using byte 0 majority, because of the if clause above.
- else {
- // majority is dependent on the counts, which are relative to best_byte, so start there
- uint64_t maj_byte = best_byte;
- if ((count_more > count_same) && (count_more > count_less)) {
- maj_byte++;
- } else if ((count_less > count_same) && (count_less > count_more)) {
- maj_byte--;
- }
- if (maj_byte != new_byte) {
- // print only when majority choice is different from average
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: MAJORTY: Byte %d: picking majority of %d over average %d.\n",
- node, ddr_interface_num, rankx,
- byte_idx, (int)maj_byte, (int)new_byte);
- new_byte = maj_byte;
- } else {
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: AVERAGE: Byte %d: picking average of %d.\n",
- node, ddr_interface_num, rankx,
- byte_idx, (int)new_byte);
- }
+ // NOTE:
+ // For now, we let the neighbor processing above trump the new simple majority processing here.
+ // This is mostly because we have seen no smoking gun for a neighbor bad choice (yet?).
+ // Also note that we will ALWAYS be using byte 0 majority, because of the if clause above.
+ else {
+ // majority is dependent on the counts, which are relative to best_byte, so start there
+ uint64_t maj_byte = best_byte;
+ if ((count_more > count_same) && (count_more > count_less)) {
+ maj_byte++;
+ } else if ((count_less > count_same) && (count_less > count_more)) {
+ maj_byte--;
+ }
+ if (maj_byte != new_byte) {
+ // print only when majority choice is different from average
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: MAJORTY: Byte %d: picking majority of %d over average %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)maj_byte, (int)new_byte);
+ new_byte = maj_byte;
+ } else {
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: AVERAGE: Byte %d: picking average of %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)new_byte);
+ }
#if RANK_MAJORITY
- // rank majority is dependent on the rank counts, which are relative to best_byte,
- // so start there, and adjust according to the rank counts majority
- uint64_t rank_maj = best_byte;
- if ((rank_more > rank_same) && (rank_more > rank_less)) {
- rank_maj++;
- } else if ((rank_less > rank_same) && (rank_less > rank_more)) {
- rank_maj--;
- }
- int rank_sum = rank_more + rank_same + rank_less;
-
- // now, let rank majority possibly rule over the current new_byte however we got it
- if (rank_maj != new_byte) { // only if different
- // Here is where we decide whether to completely apply RANK_MAJORITY or not
- // FIXME: For the moment, we do it ONLY when running 2-slot configs
- // FIXME? or when rank_sum is big enough?
- if ((dimm_count > 1) || (rank_sum > 2)) {
- // print only when rank majority choice is selected
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: RANKMAJ: Byte %d: picking %d over %d.\n",
- node, ddr_interface_num, rankx,
- byte_idx, (int)rank_maj, (int)new_byte);
- new_byte = rank_maj;
- } else { // FIXME: print some info when we could have chosen RANKMAJ but did not
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: RANKMAJ: Byte %d: NOT using %d over %d (best=%d,sum=%d).\n",
- node, ddr_interface_num, rankx,
- byte_idx, (int)rank_maj, (int)new_byte,
- (int)best_byte, rank_sum);
- }
- }
+ // rank majority is dependent on the rank counts, which are relative to best_byte,
+ // so start there, and adjust according to the rank counts majority
+ uint64_t rank_maj = best_byte;
+ if ((rank_more > rank_same) && (rank_more > rank_less)) {
+ rank_maj++;
+ } else if ((rank_less > rank_same) && (rank_less > rank_more)) {
+ rank_maj--;
+ }
+ int rank_sum = rank_more + rank_same + rank_less;
+
+ // now, let rank majority possibly rule over the current new_byte however we got it
+ if (rank_maj != new_byte) { // only if different
+ // Here is where we decide whether to completely apply RANK_MAJORITY or not
+ // FIXME: For the moment, we do it ONLY when running 2-slot configs
+ // FIXME? or when rank_sum is big enough?
+ if ((dimm_count > 1) || (rank_sum > 2)) {
+ // print only when rank majority choice is selected
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: RANKMAJ: Byte %d: picking %d over %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)rank_maj, (int)new_byte);
+ new_byte = rank_maj;
+ } else { // FIXME: print some info when we could have chosen RANKMAJ but did not
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: RANKMAJ: Byte %d: NOT using %d over %d (best=%d,sum=%d).\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)rank_maj, (int)new_byte,
+ (int)best_byte, rank_sum);
+ }
+ }
#endif /* RANK_MAJORITY */
- }
+ }
#else
- else {
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: AVERAGE: Byte %d: picking average of %d.\n",
- node, ddr_interface_num, rankx,
- byte_idx, (int)new_byte);
- }
+ else {
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: AVERAGE: Byte %d: picking average of %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)new_byte);
+ }
#endif
#if FAILSAFE_CHECK
// one last check:
@@ -7196,9 +7196,9 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
// FIXME: go back to original best byte from the best row
if ((new_byte == count_byte) && (count_same == 0)) {
new_byte = orig_best_byte;
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: FAILSAF: Byte %d: going back to original %d.\n",
- node, ddr_interface_num, rankx,
- byte_idx, (int)new_byte);
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: FAILSAF: Byte %d: going back to original %d.\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, (int)new_byte);
}
#endif /* FAILSAFE_CHECK */
#if PERFECT_BITMASK_COUNTING
@@ -7206,26 +7206,28 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
// Remember, we only counted for DDR4, so zero means none or DDR3, and we bypass this...
if (rank_perfect_counts[rankx].total[byte_idx] > 0) {
// FIXME: should be more error checking, look for ties, etc...
- int i, delay_count, delay_value, delay_max;
+ /* FIXME(dhendrix): i shadows another local variable, changed to _i in this block */
+// int i, delay_count, delay_value, delay_max;
+ int _i, delay_count, delay_value, delay_max;
uint32_t ties;
delay_value = -1;
delay_max = 0;
ties = 0;
- for (i = 0; i < 32; i++) {
- delay_count = rank_perfect_counts[rankx].count[byte_idx][i];
+ for (_i = 0; _i < 32; _i++) {
+ delay_count = rank_perfect_counts[rankx].count[byte_idx][_i];
if (delay_count > 0) { // only look closer if there are any,,,
if (delay_count > delay_max) {
delay_max = delay_count;
- delay_value = i;
+ delay_value = _i;
ties = 0; // reset ties to none
} else if (delay_count == delay_max) {
if (ties == 0)
ties = 1UL << delay_value; // put in original value
- ties |= 1UL << i; // add new value
+ ties |= 1UL << _i; // add new value
}
}
- } /* for (i = 0; i < 32; i++) */
+ } /* for (_i = 0; _i < 32; _i++) */
if (delay_value >= 0) {
if (ties != 0) {
@@ -7266,69 +7268,69 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
} /* if (rank_perfect_counts[rankx].total[byte_idx] > 0) */
#endif /* PERFECT_BITMASK_COUNTING */
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: SUMMARY: Byte %d: %s: orig %d now %d, more %d same %d less %d, using %d\n",
- node, ddr_interface_num, rankx,
- byte_idx, "AVG", (int)orig_best_byte,
- (int)best_byte, count_more, count_same, count_less, (int)new_byte);
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: SUMMARY: Byte %d: %s: orig %d now %d, more %d same %d less %d, using %d\n",
+ node, ddr_interface_num, rankx,
+ byte_idx, "AVG", (int)orig_best_byte,
+ (int)best_byte, count_more, count_same, count_less, (int)new_byte);
- // update the byte with the new value (NOTE: orig value in the CSR may not be current "best")
- lmc_rlevel_rank.u &= ~(byte_msk << byte_sh);
- lmc_rlevel_rank.u |= (new_byte << byte_sh);
+ // update the byte with the new value (NOTE: orig value in the CSR may not be current "best")
+ lmc_rlevel_rank.u &= ~(byte_msk << byte_sh);
+ lmc_rlevel_rank.u |= (new_byte << byte_sh);
- rank_best_bytes[byte_idx] = new_byte; // save new best for neighbor use
+ rank_best_bytes[byte_idx] = new_byte; // save new best for neighbor use
- } /* for (byte_idx = 0; byte_idx < 8+ecc_ena; byte_idx++) */
+ } /* for (byte_idx = 0; byte_idx < 8+ecc_ena; byte_idx++) */
- ////////////////// this is the end of the BEST BYTE LOOP
+ ////////////////// this is the end of the BEST BYTE LOOP
- if (saved_rlevel_rank.u != lmc_rlevel_rank.u) {
- DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), lmc_rlevel_rank.u);
- lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
- debug_print("Adjusting Read-Leveling per-RANK settings.\n");
- } else {
- debug_print("Not Adjusting Read-Leveling per-RANK settings.\n");
- }
- display_RL_with_final(node, ddr_interface_num, lmc_rlevel_rank, rankx);
+ if (saved_rlevel_rank.u != lmc_rlevel_rank.u) {
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), lmc_rlevel_rank.u);
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+ debug_print("Adjusting Read-Leveling per-RANK settings.\n");
+ } else {
+ debug_print("Not Adjusting Read-Leveling per-RANK settings.\n");
+ }
+ display_RL_with_final(node, ddr_interface_num, lmc_rlevel_rank, rankx);
#if RLEXTRAS_PATCH
#define RLEVEL_RANKX_EXTRAS_INCR 4
- if ((rank_mask & 0x0F) != 0x0F) { // if there are unused entries to be filled
- bdk_lmcx_rlevel_rankx_t temp_rlevel_rank = lmc_rlevel_rank; // copy the current rank
- int byte, delay;
- if (rankx < 3) {
- debug_print("N%d.LMC%d.R%d: checking for RLEVEL_RANK unused entries.\n",
- node, ddr_interface_num, rankx);
- for (byte = 0; byte < 9; byte++) { // modify the copy in prep for writing to empty slot(s)
- delay = get_rlevel_rank_struct(&temp_rlevel_rank, byte) + RLEVEL_RANKX_EXTRAS_INCR;
- if (delay > (int)RLEVEL_BYTE_MSK) delay = RLEVEL_BYTE_MSK;
- update_rlevel_rank_struct(&temp_rlevel_rank, byte, delay);
- }
- if (rankx == 0) { // if rank 0, write rank 1 and rank 2 here if empty
- if (!(rank_mask & (1<<1))) { // check that rank 1 is empty
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing RLEVEL_RANK unused entry R%d.\n",
- node, ddr_interface_num, rankx, 1);
- DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, 1), temp_rlevel_rank.u);
- }
- if (!(rank_mask & (1<<2))) { // check that rank 2 is empty
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing RLEVEL_RANK unused entry R%d.\n",
- node, ddr_interface_num, rankx, 2);
- DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, 2), temp_rlevel_rank.u);
- }
- }
- // if ranks 0, 1 or 2, write rank 3 here if empty
- if (!(rank_mask & (1<<3))) { // check that rank 3 is empty
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing RLEVEL_RANK unused entry R%d.\n",
- node, ddr_interface_num, rankx, 3);
- DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, 3), temp_rlevel_rank.u);
- }
- }
- }
+ if ((rank_mask & 0x0F) != 0x0F) { // if there are unused entries to be filled
+ bdk_lmcx_rlevel_rankx_t temp_rlevel_rank = lmc_rlevel_rank; // copy the current rank
+ int byte, delay;
+ if (rankx < 3) {
+ debug_print("N%d.LMC%d.R%d: checking for RLEVEL_RANK unused entries.\n",
+ node, ddr_interface_num, rankx);
+ for (byte = 0; byte < 9; byte++) { // modify the copy in prep for writing to empty slot(s)
+ delay = get_rlevel_rank_struct(&temp_rlevel_rank, byte) + RLEVEL_RANKX_EXTRAS_INCR;
+ if (delay > (int)RLEVEL_BYTE_MSK) delay = RLEVEL_BYTE_MSK;
+ update_rlevel_rank_struct(&temp_rlevel_rank, byte, delay);
+ }
+ if (rankx == 0) { // if rank 0, write rank 1 and rank 2 here if empty
+ if (!(rank_mask & (1<<1))) { // check that rank 1 is empty
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing RLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 1);
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, 1), temp_rlevel_rank.u);
+ }
+ if (!(rank_mask & (1<<2))) { // check that rank 2 is empty
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing RLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 2);
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, 2), temp_rlevel_rank.u);
+ }
+ }
+ // if ranks 0, 1 or 2, write rank 3 here if empty
+ if (!(rank_mask & (1<<3))) { // check that rank 3 is empty
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing RLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 3);
+ DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, 3), temp_rlevel_rank.u);
+ }
+ }
+ }
#endif /* RLEXTRAS_PATCH */
- } /* for (rankx = 0; rankx < dimm_count * 4; rankx++) */
+ } /* for (rankx = 0; rankx < dimm_count * 4; rankx++) */
- ////////////////// this is the end of the RANK MAJOR LOOP
+ ////////////////// this is the end of the RANK MAJOR LOOP
- } /* Evaluation block */
+ } /* Evaluation block */
} /* while(rlevel_debug_loops--) */
lmc_control.s.ddr2t = save_ddr2t;
@@ -7362,21 +7364,6 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_rlevel_rank.u = value;
}
-
- if (bdk_is_platform(BDK_PLATFORM_ASIM)) {
- parameter_set |= 1;
-
- lmc_rlevel_rank.cn83xx.byte8 = 3;
- lmc_rlevel_rank.cn83xx.byte7 = 3;
- lmc_rlevel_rank.cn83xx.byte6 = 3;
- lmc_rlevel_rank.cn83xx.byte5 = 3;
- lmc_rlevel_rank.cn83xx.byte4 = 3;
- lmc_rlevel_rank.cn83xx.byte3 = 3;
- lmc_rlevel_rank.cn83xx.byte2 = 3;
- lmc_rlevel_rank.cn83xx.byte1 = 3;
- lmc_rlevel_rank.cn83xx.byte0 = 3;
- }
-
if (parameter_set) {
DRAM_CSR_WRITE(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx), lmc_rlevel_rank.u);
display_RL(node, ddr_interface_num, lmc_rlevel_rank, rankx);
@@ -7402,12 +7389,12 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_modereg_params0.s.al = 2; /* CL-2 */
lmc_control.s.pocas = 1;
- ddr_print("MODEREG_PARAMS0 : 0x%016lx\n", lmc_modereg_params0.u);
+ ddr_print("MODEREG_PARAMS0 : 0x%016llx\n", lmc_modereg_params0.u);
DRAM_CSR_WRITE(node, BDK_LMCX_MODEREG_PARAMS0(ddr_interface_num), lmc_modereg_params0.u);
- ddr_print("TIMING_PARAMS1 : 0x%016lx\n", lmc_timing_params1.u);
+ ddr_print("TIMING_PARAMS1 : 0x%016llx\n", lmc_timing_params1.u);
DRAM_CSR_WRITE(node, BDK_LMCX_TIMING_PARAMS1(ddr_interface_num), lmc_timing_params1.u);
- ddr_print("LMC_CONTROL : 0x%016lx\n", lmc_control.u);
+ ddr_print("LMC_CONTROL : 0x%016llx\n", lmc_control.u);
DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
for (rankx = 0; rankx < dimm_count * 4; rankx++) {
@@ -7422,8 +7409,8 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
// this is here just for output, to allow check of the Deskew settings one last time...
if (! disable_deskew_training) {
deskew_counts_t dsk_counts;
- VB_PRT(VBL_TME, "N%d.LMC%d: Check Deskew Settings before software Write-Leveling.\n",
- node, ddr_interface_num);
+ VB_PRT(VBL_TME, "N%d.LMC%d: Check Deskew Settings before software Write-Leveling.\n",
+ node, ddr_interface_num);
Validate_Read_Deskew_Training(node, rank_mask, ddr_interface_num, &dsk_counts, VBL_TME); // TME for FINAL
}
@@ -7480,7 +7467,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
#endif
int sw_wlevel_enable = 1; /* FIX... Should be customizable. */
int interfaces;
- int measured_vref_flag;
+ int measured_vref_flag;
typedef enum {
WL_ESTIMATED = 0, /* HW/SW wleveling failed. Results
estimated. */
@@ -7532,8 +7519,8 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
} /* for (rankx = 0; rankx < dimm_count * 4; rankx++) */
#endif
- /* Get the measured_vref setting from the config, check for an override... */
- /* NOTE: measured_vref=1 (ON) means force use of MEASURED Vref... */
+ /* Get the measured_vref setting from the config, check for an override... */
+ /* NOTE: measured_vref=1 (ON) means force use of MEASURED Vref... */
// NOTE: measured VREF can only be done for DDR4
if (ddr_type == DDR4_DRAM) {
measured_vref_flag = custom_lmc_config->measured_vref;
@@ -7560,7 +7547,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
** ranks 0 and 2, but only 2 ranks are active. */
active_rank = 0;
- interfaces = __builtin_popcount(ddr_interface_mask);
+ interfaces = bdk_pop(ddr_interface_mask);
#define VREF_RANGE1_LIMIT 0x33 // range1 is valid for 0x00 - 0x32
#define VREF_RANGE2_LIMIT 0x18 // range2 is valid for 0x00 - 0x17
@@ -7572,14 +7559,14 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
for (rankx = 0; rankx < dimm_count * 4; rankx++) {
uint64_t rank_addr;
int vref_value, final_vref_value, final_vref_range = 0;
- int start_vref_value = 0, computed_final_vref_value = -1;
+ int start_vref_value = 0, computed_final_vref_value = -1;
char best_vref_values_count, vref_values_count;
char best_vref_values_start, vref_values_start;
int bytes_failed;
sw_wl_status_t byte_test_status[9];
sw_wl_status_t sw_wl_rank_status = WL_HARDWARE;
- int sw_wl_failed = 0;
+ int sw_wl_failed = 0;
int sw_wlevel_hw = sw_wlevel_hw_default;
if (!sw_wlevel_enable)
@@ -7592,14 +7579,14 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
node, ddr_interface_num, rankx,
(sw_wlevel_hw) ? "with H/W assist" : "with S/W algorithm");
- if ((ddr_type == DDR4_DRAM) && (num_ranks != 4)) {
- // always compute when we can...
- computed_final_vref_value = compute_vref_value(node, ddr_interface_num, rankx,
- dimm_count, num_ranks, imp_values,
- is_stacked_die);
- if (!measured_vref_flag) // but only use it if allowed
- start_vref_value = VREF_FINAL; // skip all the measured Vref processing, just the final setting
- }
+ if ((ddr_type == DDR4_DRAM) && (num_ranks != 4)) {
+ // always compute when we can...
+ computed_final_vref_value = compute_vref_value(node, ddr_interface_num, rankx,
+ dimm_count, num_ranks, imp_values,
+ is_stacked_die);
+ if (!measured_vref_flag) // but only use it if allowed
+ start_vref_value = VREF_FINAL; // skip all the measured Vref processing, just the final setting
+ }
/* Save off the h/w wl results */
lmc_wlevel_rank_hw_results.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
@@ -7619,36 +7606,36 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
} else {
vrange = 0; vvalue = vref_value - VREF_RANGE2_LIMIT;
}
- set_vref(node, ddr_interface_num, rankx,
+ set_vref(node, ddr_interface_num, rankx,
vrange, vvalue);
} else { /* if (vref_value < VREF_FINAL) */
/* Print the final Vref value first. */
- /* Always print the computed first if its valid */
- if (computed_final_vref_value >= 0) {
- ddr_print("N%d.LMC%d.R%d: Vref Computed Summary :"
- " %2d (0x%02x)\n",
- node, ddr_interface_num,
- rankx, computed_final_vref_value,
- computed_final_vref_value);
- }
- if (!measured_vref_flag) { // setup to use the computed
- best_vref_values_count = 1;
- final_vref_value = computed_final_vref_value;
- } else { // setup to use the measured
- if (best_vref_values_count > 0) {
- best_vref_values_count = max(best_vref_values_count, 2);
+ /* Always print the computed first if its valid */
+ if (computed_final_vref_value >= 0) {
+ ddr_print("N%d.LMC%d.R%d: Vref Computed Summary :"
+ " %2d (0x%02x)\n",
+ node, ddr_interface_num,
+ rankx, computed_final_vref_value,
+ computed_final_vref_value);
+ }
+ if (!measured_vref_flag) { // setup to use the computed
+ best_vref_values_count = 1;
+ final_vref_value = computed_final_vref_value;
+ } else { // setup to use the measured
+ if (best_vref_values_count > 0) {
+ best_vref_values_count = max(best_vref_values_count, 2);
#if 0
// NOTE: this already adjusts VREF via calculating 40% rather than 50%
- final_vref_value = best_vref_values_start + divide_roundup((best_vref_values_count-1)*4,10);
- ddr_print("N%d.LMC%d.R%d: Vref Training Summary :"
- " %2d <----- %2d (0x%02x) -----> %2d range: %2d\n",
- node, ddr_interface_num, rankx, best_vref_values_start,
- final_vref_value, final_vref_value,
- best_vref_values_start+best_vref_values_count-1,
- best_vref_values_count-1);
+ final_vref_value = best_vref_values_start + divide_roundup((best_vref_values_count-1)*4,10);
+ ddr_print("N%d.LMC%d.R%d: Vref Training Summary :"
+ " %2d <----- %2d (0x%02x) -----> %2d range: %2d\n",
+ node, ddr_interface_num, rankx, best_vref_values_start,
+ final_vref_value, final_vref_value,
+ best_vref_values_start+best_vref_values_count-1,
+ best_vref_values_count-1);
#else
- final_vref_value = best_vref_values_start + divide_nint(best_vref_values_count - 1, 2);
+ final_vref_value = best_vref_values_start + divide_nint(best_vref_values_count - 1, 2);
if (final_vref_value < VREF_RANGE2_LIMIT) {
final_vref_range = 1;
} else {
@@ -7680,36 +7667,36 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
}
#endif
- } else {
- /* If nothing passed use the default Vref value for this rank */
- bdk_lmcx_modereg_params2_t lmc_modereg_params2;
- lmc_modereg_params2.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS2(ddr_interface_num));
- final_vref_value = (lmc_modereg_params2.u >> (rankx * 10 + 3)) & 0x3f;
- final_vref_range = (lmc_modereg_params2.u >> (rankx * 10 + 9)) & 0x01;
-
- ddr_print("N%d.LMC%d.R%d: Vref Using Default :"
- " %2d <----- %2d (0x%02x) -----> %2d, range%1d\n",
- node, ddr_interface_num, rankx,
+ } else {
+ /* If nothing passed use the default Vref value for this rank */
+ bdk_lmcx_modereg_params2_t lmc_modereg_params2;
+ lmc_modereg_params2.u = BDK_CSR_READ(node, BDK_LMCX_MODEREG_PARAMS2(ddr_interface_num));
+ final_vref_value = (lmc_modereg_params2.u >> (rankx * 10 + 3)) & 0x3f;
+ final_vref_range = (lmc_modereg_params2.u >> (rankx * 10 + 9)) & 0x01;
+
+ ddr_print("N%d.LMC%d.R%d: Vref Using Default :"
+ " %2d <----- %2d (0x%02x) -----> %2d, range%1d\n",
+ node, ddr_interface_num, rankx,
final_vref_value, final_vref_value,
- final_vref_value, final_vref_value, final_vref_range+1);
- }
- }
+ final_vref_value, final_vref_value, final_vref_range+1);
+ }
+ }
- // allow override
+ // allow override
if ((s = lookup_env_parameter("ddr%d_vref_value_%1d%1d",
ddr_interface_num, !!(rankx&2), !!(rankx&1))) != NULL) {
final_vref_value = strtoul(s, NULL, 0);
}
- set_vref(node, ddr_interface_num, rankx, final_vref_range, final_vref_value);
+ set_vref(node, ddr_interface_num, rankx, final_vref_range, final_vref_value);
- } /* if (vref_value < VREF_FINAL) */
+ } /* if (vref_value < VREF_FINAL) */
} /* if (ddr_type == DDR4_DRAM) */
lmc_wlevel_rank.u = lmc_wlevel_rank_hw_results.u; /* Restore the saved value */
- for (byte = 0; byte < 9; ++byte)
- byte_test_status[byte] = WL_ESTIMATED;
+ for (byte = 0; byte < 9; ++byte)
+ byte_test_status[byte] = WL_ESTIMATED;
if (wlevel_bitmask_errors == 0) {
@@ -7718,14 +7705,14 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
// FIXME: these now put in by test_dram_byte()
//rank_addr |= (ddr_interface_num<<7); /* Map address into proper interface */
//rank_addr = bdk_numa_get_address(node, rank_addr);
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: Active Rank %d Address: 0x%lx\n",
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: Active Rank %d Address: 0x%llx\n",
node, ddr_interface_num, rankx, active_rank, rank_addr);
- { // start parallel write-leveling block for delay high-order bits
- int errors = 0;
- int byte_delay[9];
- uint64_t bytemask;
- int bytes_todo;
+ { // start parallel write-leveling block for delay high-order bits
+ int errors = 0;
+ int byte_delay[9];
+ uint64_t bytemask;
+ int bytes_todo;
if (ddr_interface_64b) {
bytes_todo = (sw_wlevel_hw) ? ddr_interface_bytemask : 0xFF;
@@ -7735,16 +7722,16 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
bytemask = 0x00000000ffffffffULL;
}
- for (byte = 0; byte < 9; ++byte) {
- if (!(bytes_todo & (1 << byte))) {
- byte_delay[byte] = 0;
- } else {
- byte_delay[byte] = get_wlevel_rank_struct(&lmc_wlevel_rank, byte);
- }
- } /* for (byte = 0; byte < 9; ++byte) */
+ for (byte = 0; byte < 9; ++byte) {
+ if (!(bytes_todo & (1 << byte))) {
+ byte_delay[byte] = 0;
+ } else {
+ byte_delay[byte] = get_wlevel_rank_struct(&lmc_wlevel_rank, byte);
+ }
+ } /* for (byte = 0; byte < 9; ++byte) */
#define WL_MIN_NO_ERRORS_COUNT 3 // FIXME? three passes without errors
- int no_errors_count = 0;
+ int no_errors_count = 0;
// Change verbosity if using measured vs computed VREF or DDR3
// measured goes many times through SWL, computed and DDR3 only once
@@ -7757,14 +7744,14 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
uint64_t start_dram_dclk, stop_dram_dclk;
uint64_t start_dram_ops, stop_dram_ops;
#endif
- do {
- // write the current set of WL delays
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
- lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+ do {
+ // write the current set of WL delays
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
- bdk_watchdog_poke();
+ bdk_watchdog_poke();
- // do the test
+ // do the test
if (sw_wlevel_hw) {
errors = run_best_hw_patterns(node, ddr_interface_num, rank_addr,
DBTRAIN_TEST, bad_bits);
@@ -7787,10 +7774,10 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
#endif
}
- VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: S/W write-leveling TEST: returned 0x%x\n",
+ VB_PRT(VBL_DEV2, "N%d.LMC%d.R%d: S/W write-leveling TEST: returned 0x%x\n",
node, ddr_interface_num, rankx, errors);
- // remember, errors will not be returned for byte-lanes that have maxxed out...
+ // remember, errors will not be returned for byte-lanes that have maxxed out...
if (errors == 0) {
no_errors_count++; // bump
if (no_errors_count > 1) // bypass check/update completely
@@ -7798,39 +7785,39 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
} else
no_errors_count = 0; // reset
- // check errors by byte
- for (byte = 0; byte < 9; ++byte) {
- if (!(bytes_todo & (1 << byte)))
- continue;
-
- delay = byte_delay[byte];
- if (errors & (1 << byte)) { // yes, an error in this byte lane
- debug_print(" byte %d delay %2d Errors\n", byte, delay);
- // since this byte had an error, we move to the next delay value, unless maxxed out
- delay += 8; // incr by 8 to do only delay high-order bits
- if (delay < 32) {
- update_wlevel_rank_struct(&lmc_wlevel_rank, byte, delay);
- debug_print(" byte %d delay %2d New\n", byte, delay);
- byte_delay[byte] = delay;
- } else { // reached max delay, maybe really done with this byte
+ // check errors by byte
+ for (byte = 0; byte < 9; ++byte) {
+ if (!(bytes_todo & (1 << byte)))
+ continue;
+
+ delay = byte_delay[byte];
+ if (errors & (1 << byte)) { // yes, an error in this byte lane
+ debug_print(" byte %d delay %2d Errors\n", byte, delay);
+ // since this byte had an error, we move to the next delay value, unless maxxed out
+ delay += 8; // incr by 8 to do only delay high-order bits
+ if (delay < 32) {
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte, delay);
+ debug_print(" byte %d delay %2d New\n", byte, delay);
+ byte_delay[byte] = delay;
+ } else { // reached max delay, maybe really done with this byte
#if SWL_TRY_HWL_ALT
- if (!measured_vref_flag && // consider an alt only for computed VREF and
+ if (!measured_vref_flag && // consider an alt only for computed VREF and
(hwl_alts[rankx].hwl_alt_mask & (1 << byte))) // if an alt exists...
{
int bad_delay = delay & 0x6; // just orig low-3 bits
- delay = hwl_alts[rankx].hwl_alt_delay[byte]; // yes, use it
- hwl_alts[rankx].hwl_alt_mask &= ~(1 << byte); // clear that flag
- update_wlevel_rank_struct(&lmc_wlevel_rank, byte, delay);
- byte_delay[byte] = delay;
- debug_print(" byte %d delay %2d ALTERNATE\n", byte, delay);
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: SWL: Byte %d: %d FAIL, trying ALTERNATE %d\n",
+ delay = hwl_alts[rankx].hwl_alt_delay[byte]; // yes, use it
+ hwl_alts[rankx].hwl_alt_mask &= ~(1 << byte); // clear that flag
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte, delay);
+ byte_delay[byte] = delay;
+ debug_print(" byte %d delay %2d ALTERNATE\n", byte, delay);
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: SWL: Byte %d: %d FAIL, trying ALTERNATE %d\n",
node, ddr_interface_num, rankx, byte, bad_delay, delay);
- } else
+ } else
#endif /* SWL_TRY_HWL_ALT */
{
unsigned bits_bad;
- if (byte < 8) {
+ if (byte < 8) {
bytemask &= ~(0xffULL << (8*byte)); // test no longer, remove from byte mask
bits_bad = (unsigned)((bad_bits[0] >> (8 * byte)) & 0xffUL);
} else {
@@ -7839,18 +7826,18 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
bytes_todo &= ~(1 << byte); // remove from bytes to do
byte_test_status[byte] = WL_ESTIMATED; // make sure this is set for this case
debug_print(" byte %d delay %2d Exhausted\n", byte, delay);
- VB_PRT(vbl_local, "N%d.LMC%d.R%d: SWL: Byte %d (0x%02x): delay %d EXHAUSTED \n",
+ VB_PRT(vbl_local, "N%d.LMC%d.R%d: SWL: Byte %d (0x%02x): delay %d EXHAUSTED \n",
node, ddr_interface_num, rankx, byte, bits_bad, delay);
}
- }
- } else { // no error, stay with current delay, but keep testing it...
- debug_print(" byte %d delay %2d Passed\n", byte, delay);
- byte_test_status[byte] = WL_HARDWARE; // change status
- }
+ }
+ } else { // no error, stay with current delay, but keep testing it...
+ debug_print(" byte %d delay %2d Passed\n", byte, delay);
+ byte_test_status[byte] = WL_HARDWARE; // change status
+ }
- } /* for (byte = 0; byte < 9; ++byte) */
+ } /* for (byte = 0; byte < 9; ++byte) */
- } while (no_errors_count < WL_MIN_NO_ERRORS_COUNT);
+ } while (no_errors_count < WL_MIN_NO_ERRORS_COUNT);
#if ENABLE_SW_WLEVEL_UTILIZATION
if (! sw_wlevel_hw) {
@@ -7863,11 +7850,11 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
percent_x10 / 10, percent_x10 % 10);
}
#endif
- if (errors) {
- debug_print("End WLEV_64 while loop: vref_value %d(0x%x), errors 0x%02x\n",
- vref_value, vref_value, errors);
- }
- } // end parallel write-leveling block for delay high-order bits
+ if (errors) {
+ debug_print("End WLEV_64 while loop: vref_value %d(0x%x), errors 0x%02x\n",
+ vref_value, vref_value, errors);
+ }
+ } // end parallel write-leveling block for delay high-order bits
if (sw_wlevel_hw) { // if we used HW-assist, we did the ECC byte when approp.
VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: HW-assist SWL - no ECC estimate!!!\n",
@@ -7882,13 +7869,13 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if ((save_byte8 != lmc_wlevel_rank.s.byte3) &&
(save_byte8 != lmc_wlevel_rank.s.byte4))
- {
- // try to calculate a workable delay using SW bytes 3 and 4 and HW byte 8
+ {
+ // try to calculate a workable delay using SW bytes 3 and 4 and HW byte 8
int test_byte8 = save_byte8;
int test_byte8_error;
int byte8_error = 0x1f;
int adder;
- int avg_bytes = divide_nint(lmc_wlevel_rank.s.byte3+lmc_wlevel_rank.s.byte4, 2);
+ int avg_bytes = divide_nint(lmc_wlevel_rank.s.byte3+lmc_wlevel_rank.s.byte4, 2);
for (adder = 0; adder<= 32; adder+=8) {
test_byte8_error = _abs((adder+save_byte8) - avg_bytes);
if (test_byte8_error < byte8_error) {
@@ -7898,8 +7885,8 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
}
#if SW_WL_CHECK_PATCH
- // only do the check if we are not using measured VREF
- if (!measured_vref_flag) {
+ // only do the check if we are not using measured VREF
+ if (!measured_vref_flag) {
test_byte8 &= ~1; /* Use only even settings, rounding down... */
// do validity check on the calculated ECC delay value
@@ -7941,7 +7928,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
byte_test_status[8] = WL_SOFTWARE; /* Estimated delay */
}
} else {
- byte_test_status[8] = WL_HARDWARE; /* H/W delay value */
+ byte_test_status[8] = WL_HARDWARE; /* H/W delay value */
lmc_wlevel_rank.s.byte8 = lmc_wlevel_rank.s.byte0; /* ECC is not used */
}
} else { /* if ((ddr_interface_bytemask & 0xff) == 0xff) */
@@ -7970,7 +7957,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
/* Vref training loop is only used for DDR4 */
if (ddr_type != DDR4_DRAM)
- break;
+ break;
if (bytes_failed == 0) {
if (vref_values_count == 0) {
@@ -7981,148 +7968,148 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
best_vref_values_count = vref_values_count;
best_vref_values_start = vref_values_start;
debug_print("N%d.LMC%d.R%d: Vref Training (%2d) : 0x%02x <----- ???? -----> 0x%02x\n",
- node, ddr_interface_num,
+ node, ddr_interface_num,
rankx, vref_value, best_vref_values_start,
best_vref_values_start+best_vref_values_count-1);
}
} else {
vref_values_count = 0;
- debug_print("N%d.LMC%d.R%d: Vref Training (%2d) : failed\n",
- node, ddr_interface_num,
- rankx, vref_value);
+ debug_print("N%d.LMC%d.R%d: Vref Training (%2d) : failed\n",
+ node, ddr_interface_num,
+ rankx, vref_value);
}
} /* for (vref_value=0; vref_value<VREF_LIMIT; ++vref_value) */
- /* Determine address of DRAM to test for pass 2 and final test of software write leveling. */
+ /* Determine address of DRAM to test for pass 2 and final test of software write leveling. */
rank_addr = active_rank * (1ull << (pbank_lsb - bunk_enable + (interfaces/2)));
- rank_addr |= (ddr_interface_num<<7); /* Map address into proper interface */
- rank_addr = bdk_numa_get_address(node, rank_addr);
- debug_print("N%d.LMC%d.R%d: Active Rank %d Address: 0x%lx\n",
- node, ddr_interface_num, rankx, active_rank, rank_addr);
+ rank_addr |= (ddr_interface_num<<7); /* Map address into proper interface */
+ rank_addr = bdk_numa_get_address(node, rank_addr);
+ debug_print("N%d.LMC%d.R%d: Active Rank %d Address: 0x%lx\n",
+ node, ddr_interface_num, rankx, active_rank, rank_addr);
- int errors;
+ int errors;
if (bytes_failed) {
#if !DISABLE_SW_WL_PASS_2
- ddr_print("N%d.LMC%d.R%d: Starting SW Write-leveling pass 2\n",
- node, ddr_interface_num, rankx);
+ ddr_print("N%d.LMC%d.R%d: Starting SW Write-leveling pass 2\n",
+ node, ddr_interface_num, rankx);
sw_wl_rank_status = WL_SOFTWARE;
/* If previous s/w fixups failed then retry using s/w write-leveling. */
if (wlevel_bitmask_errors == 0) {
- /* h/w succeeded but previous s/w fixups failed. So retry s/w. */
+ /* h/w succeeded but previous s/w fixups failed. So retry s/w. */
debug_print("N%d.LMC%d.R%d: Retrying software Write-Leveling.\n",
- node, ddr_interface_num, rankx);
+ node, ddr_interface_num, rankx);
}
- { // start parallel write-leveling block for delay low-order bits
- int byte_delay[8];
- int byte_passed[8];
- uint64_t bytemask;
- uint64_t bitmask;
+ { // start parallel write-leveling block for delay low-order bits
+ int byte_delay[8];
+ int byte_passed[8];
+ uint64_t bytemask;
+ uint64_t bitmask;
int wl_offset;
- int bytes_todo;
+ int bytes_todo;
- for (byte = 0; byte < 8; ++byte) {
- byte_passed[byte] = 0;
- }
+ for (byte = 0; byte < 8; ++byte) {
+ byte_passed[byte] = 0;
+ }
- bytes_todo = ddr_interface_bytemask;
+ bytes_todo = ddr_interface_bytemask;
for (wl_offset = sw_wlevel_offset; wl_offset >= 0; --wl_offset) {
- debug_print("Starting wl_offset for-loop: %d\n", wl_offset);
+ debug_print("Starting wl_offset for-loop: %d\n", wl_offset);
- bytemask = 0;
+ bytemask = 0;
- for (byte = 0; byte < 8; ++byte) {
- byte_delay[byte] = 0;
- if (!(bytes_todo & (1 << byte))) // this does not contain fully passed bytes
- continue;
+ for (byte = 0; byte < 8; ++byte) {
+ byte_delay[byte] = 0;
+ if (!(bytes_todo & (1 << byte))) // this does not contain fully passed bytes
+ continue;
- byte_passed[byte] = 0; // reset across passes if not fully passed
- update_wlevel_rank_struct(&lmc_wlevel_rank, byte, 0); // all delays start at 0
- bitmask = ((!ddr_interface_64b) && (byte == 4)) ? 0x0f: 0xff;
- bytemask |= bitmask << (8*byte); // set the bytes bits in the bytemask
- } /* for (byte = 0; byte < 8; ++byte) */
+ byte_passed[byte] = 0; // reset across passes if not fully passed
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte, 0); // all delays start at 0
+ bitmask = ((!ddr_interface_64b) && (byte == 4)) ? 0x0f: 0xff;
+ bytemask |= bitmask << (8*byte); // set the bytes bits in the bytemask
+ } /* for (byte = 0; byte < 8; ++byte) */
- while (bytemask != 0) { // start a pass if there is any byte lane to test
+ while (bytemask != 0) { // start a pass if there is any byte lane to test
- debug_print("Starting bytemask while-loop: 0x%lx\n", bytemask);
+ debug_print("Starting bytemask while-loop: 0x%lx\n", bytemask);
- // write this set of WL delays
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
- lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+ // write this set of WL delays
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
- bdk_watchdog_poke();
+ bdk_watchdog_poke();
- // do the test
+ // do the test
if (sw_wlevel_hw)
errors = run_best_hw_patterns(node, ddr_interface_num, rank_addr,
DBTRAIN_TEST, NULL);
else
errors = test_dram_byte(node, ddr_interface_num, rank_addr, bytemask, NULL);
- debug_print("SWL pass 2: test_dram_byte returned 0x%x\n", errors);
-
- // check errors by byte
- for (byte = 0; byte < 8; ++byte) {
- if (!(bytes_todo & (1 << byte)))
- continue;
-
- delay = byte_delay[byte];
- if (errors & (1 << byte)) { // yes, an error
- debug_print(" byte %d delay %2d Errors\n", byte, delay);
- byte_passed[byte] = 0;
- } else { // no error
- byte_passed[byte] += 1;
- if (byte_passed[byte] == (1 + wl_offset)) { /* Look for consecutive working settings */
- debug_print(" byte %d delay %2d FULLY Passed\n", byte, delay);
- if (wl_offset == 1) {
- byte_test_status[byte] = WL_SOFTWARE;
- } else if (wl_offset == 0) {
- byte_test_status[byte] = WL_SOFTWARE1;
- }
- bytemask &= ~(0xffULL << (8*byte)); // test no longer, remove from byte mask this pass
- bytes_todo &= ~(1 << byte); // remove completely from concern
- continue; // on to the next byte, bypass delay updating!!
- } else {
- debug_print(" byte %d delay %2d Passed\n", byte, delay);
- }
- }
- // error or no, here we move to the next delay value for this byte, unless done all delays
- // only a byte that has "fully passed" will bypass around this,
- delay += 2;
- if (delay < 32) {
- update_wlevel_rank_struct(&lmc_wlevel_rank, byte, delay);
- debug_print(" byte %d delay %2d New\n", byte, delay);
- byte_delay[byte] = delay;
- } else {
- // reached max delay, done with this byte
- debug_print(" byte %d delay %2d Exhausted\n", byte, delay);
- bytemask &= ~(0xffULL << (8*byte)); // test no longer, remove from byte mask this pass
- }
- } /* for (byte = 0; byte < 8; ++byte) */
- debug_print("End of for-loop: bytemask 0x%lx\n", bytemask);
-
- } /* while (bytemask != 0) */
- } /* for (wl_offset = sw_wlevel_offset; wl_offset >= 0; --wl_offset) */
-
- for (byte = 0; byte < 8; ++byte) {
- // any bytes left in bytes_todo did not pass
- if (bytes_todo & (1 << byte)) {
- /* Last resort. Use Rlevel settings to estimate
- Wlevel if software write-leveling fails */
- debug_print("Using RLEVEL as WLEVEL estimate for byte %d\n", byte);
- lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
- rlevel_to_wlevel(&lmc_rlevel_rank, &lmc_wlevel_rank, byte);
- }
- } /* for (byte = 0; byte < 8; ++byte) */
-
- sw_wl_failed = (bytes_todo != 0);
-
- } // end parallel write-leveling block for delay low-order bits
+ debug_print("SWL pass 2: test_dram_byte returned 0x%x\n", errors);
+
+ // check errors by byte
+ for (byte = 0; byte < 8; ++byte) {
+ if (!(bytes_todo & (1 << byte)))
+ continue;
+
+ delay = byte_delay[byte];
+ if (errors & (1 << byte)) { // yes, an error
+ debug_print(" byte %d delay %2d Errors\n", byte, delay);
+ byte_passed[byte] = 0;
+ } else { // no error
+ byte_passed[byte] += 1;
+ if (byte_passed[byte] == (1 + wl_offset)) { /* Look for consecutive working settings */
+ debug_print(" byte %d delay %2d FULLY Passed\n", byte, delay);
+ if (wl_offset == 1) {
+ byte_test_status[byte] = WL_SOFTWARE;
+ } else if (wl_offset == 0) {
+ byte_test_status[byte] = WL_SOFTWARE1;
+ }
+ bytemask &= ~(0xffULL << (8*byte)); // test no longer, remove from byte mask this pass
+ bytes_todo &= ~(1 << byte); // remove completely from concern
+ continue; // on to the next byte, bypass delay updating!!
+ } else {
+ debug_print(" byte %d delay %2d Passed\n", byte, delay);
+ }
+ }
+ // error or no, here we move to the next delay value for this byte, unless done all delays
+ // only a byte that has "fully passed" will bypass around this,
+ delay += 2;
+ if (delay < 32) {
+ update_wlevel_rank_struct(&lmc_wlevel_rank, byte, delay);
+ debug_print(" byte %d delay %2d New\n", byte, delay);
+ byte_delay[byte] = delay;
+ } else {
+ // reached max delay, done with this byte
+ debug_print(" byte %d delay %2d Exhausted\n", byte, delay);
+ bytemask &= ~(0xffULL << (8*byte)); // test no longer, remove from byte mask this pass
+ }
+ } /* for (byte = 0; byte < 8; ++byte) */
+ debug_print("End of for-loop: bytemask 0x%lx\n", bytemask);
+
+ } /* while (bytemask != 0) */
+ } /* for (wl_offset = sw_wlevel_offset; wl_offset >= 0; --wl_offset) */
+
+ for (byte = 0; byte < 8; ++byte) {
+ // any bytes left in bytes_todo did not pass
+ if (bytes_todo & (1 << byte)) {
+ /* Last resort. Use Rlevel settings to estimate
+ Wlevel if software write-leveling fails */
+ debug_print("Using RLEVEL as WLEVEL estimate for byte %d\n", byte);
+ lmc_rlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_RLEVEL_RANKX(ddr_interface_num, rankx));
+ rlevel_to_wlevel(&lmc_rlevel_rank, &lmc_wlevel_rank, byte);
+ }
+ } /* for (byte = 0; byte < 8; ++byte) */
+
+ sw_wl_failed = (bytes_todo != 0);
+
+ } // end parallel write-leveling block for delay low-order bits
if (use_ecc) {
/* ECC byte has to be estimated. Take the average of the two surrounding bytes. */
@@ -8141,29 +8128,29 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if ((ddr_interface_bytemask & (1 << byte)))
continue;
update_wlevel_rank_struct(&lmc_wlevel_rank, byte,
- lmc_wlevel_rank.s.byte0);
+ lmc_wlevel_rank.s.byte0);
byte_test_status[byte] = WL_SOFTWARE;
}
#else /* !DISABLE_SW_WL_PASS_2 */
- // FIXME? the big hammer, did not even try SW WL pass2, assume only chip reset will help
- ddr_print("N%d.LMC%d.R%d: S/W write-leveling pass 1 failed\n",
- node, ddr_interface_num, rankx);
- sw_wl_failed = 1;
+ // FIXME? the big hammer, did not even try SW WL pass2, assume only chip reset will help
+ ddr_print("N%d.LMC%d.R%d: S/W write-leveling pass 1 failed\n",
+ node, ddr_interface_num, rankx);
+ sw_wl_failed = 1;
#endif /* !DISABLE_SW_WL_PASS_2 */
} else { /* if (bytes_failed) */
- // SW WL pass 1 was OK, write the settings
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
- lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
+ // SW WL pass 1 was OK, write the settings
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
+ lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
#if SW_WL_CHECK_PATCH
- // do validity check on the delay values by running the test 1 more time...
- // FIXME: we really need to check the ECC byte setting here as well,
- // so we need to enable ECC for this test!!!
- // if there are any errors, claim SW WL failure
- {
- uint64_t datamask = (ddr_interface_64b) ? 0xffffffffffffffffULL : 0x00000000ffffffffULL;
+ // do validity check on the delay values by running the test 1 more time...
+ // FIXME: we really need to check the ECC byte setting here as well,
+ // so we need to enable ECC for this test!!!
+ // if there are any errors, claim SW WL failure
+ {
+ uint64_t datamask = (ddr_interface_64b) ? 0xffffffffffffffffULL : 0x00000000ffffffffULL;
// do the test
if (sw_wlevel_hw) {
@@ -8177,18 +8164,18 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
#endif
}
- if (errors) {
- ddr_print("N%d.LMC%d.R%d: Wlevel Rank Final Test errors 0x%x\n",
- node, ddr_interface_num, rankx, errors);
- sw_wl_failed = 1;
- }
- }
+ if (errors) {
+ ddr_print("N%d.LMC%d.R%d: Wlevel Rank Final Test errors 0x%x\n",
+ node, ddr_interface_num, rankx, errors);
+ sw_wl_failed = 1;
+ }
+ }
#endif /* SW_WL_CHECK_PATCH */
- } /* if (bytes_failed) */
+ } /* if (bytes_failed) */
- // FIXME? dump the WL settings, so we get more of a clue as to what happened where
- ddr_print("N%d.LMC%d.R%d: Wlevel Rank %#4x, 0x%016lX : %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %s\n",
+ // FIXME? dump the WL settings, so we get more of a clue as to what happened where
+ ddr_print("N%d.LMC%d.R%d: Wlevel Rank %#4x, 0x%016llX : %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %2d%3s %s\n",
node, ddr_interface_num, rankx,
lmc_wlevel_rank.s.status,
lmc_wlevel_rank.u,
@@ -8204,7 +8191,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
(sw_wl_rank_status == WL_HARDWARE) ? "" : "(s)"
);
- // finally, check for fatal conditions: either chip reset right here, or return error flag
+ // finally, check for fatal conditions: either chip reset right here, or return error flag
if (((ddr_type == DDR4_DRAM) && (best_vref_values_count == 0)) || sw_wl_failed) {
if (!ddr_disable_chip_reset) { // do chip RESET
error_print("INFO: Short memory test indicates a retry is needed on N%d.LMC%d.R%d. Resetting node...\n",
@@ -8221,7 +8208,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
active_rank++;
} /* for (rankx = 0; rankx < dimm_count * 4; rankx++) */
- // Finalize the write-leveling settings
+ // Finalize the write-leveling settings
for (rankx = 0; rankx < dimm_count * 4;rankx++) {
uint64_t value;
int parameter_set = 0;
@@ -8230,20 +8217,6 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
- if (bdk_is_platform(BDK_PLATFORM_ASIM)) {
- parameter_set |= 1;
-
- lmc_wlevel_rank.s.byte8 = 0;
- lmc_wlevel_rank.s.byte7 = 0;
- lmc_wlevel_rank.s.byte6 = 0;
- lmc_wlevel_rank.s.byte5 = 0;
- lmc_wlevel_rank.s.byte4 = 0;
- lmc_wlevel_rank.s.byte3 = 0;
- lmc_wlevel_rank.s.byte2 = 0;
- lmc_wlevel_rank.s.byte1 = 0;
- lmc_wlevel_rank.s.byte0 = 0;
- }
-
for (i=0; i<9; ++i) {
if ((s = lookup_env_parameter("ddr%d_wlevel_rank%d_byte%d", ddr_interface_num, rankx, i)) != NULL) {
parameter_set |= 1;
@@ -8262,33 +8235,33 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
if (parameter_set) {
DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx), lmc_wlevel_rank.u);
lmc_wlevel_rank.u = BDK_CSR_READ(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, rankx));
- display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
+ display_WL(node, ddr_interface_num, lmc_wlevel_rank, rankx);
}
#if WLEXTRAS_PATCH
- if ((rank_mask & 0x0F) != 0x0F) { // if there are unused entries to be filled
- if (rankx < 3) {
- debug_print("N%d.LMC%d.R%d: checking for WLEVEL_RANK unused entries.\n",
- node, ddr_interface_num, rankx);
- if (rankx == 0) { // if rank 0, write ranks 1 and 2 here if empty
- if (!(rank_mask & (1<<1))) { // check that rank 1 is empty
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, 1), lmc_wlevel_rank.u);
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing WLEVEL_RANK unused entry R%d.\n",
- node, ddr_interface_num, rankx, 1);
- }
- if (!(rank_mask & (1<<2))) { // check that rank 2 is empty
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing WLEVEL_RANK unused entry R%d.\n",
- node, ddr_interface_num, rankx, 2);
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, 2), lmc_wlevel_rank.u);
- }
- }
- // if rank 0, 1 or 2, write rank 3 here if empty
- if (!(rank_mask & (1<<3))) { // check that rank 3 is empty
- VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing WLEVEL_RANK unused entry R%d.\n",
- node, ddr_interface_num, rankx, 3);
- DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, 3), lmc_wlevel_rank.u);
- }
- }
- }
+ if ((rank_mask & 0x0F) != 0x0F) { // if there are unused entries to be filled
+ if (rankx < 3) {
+ debug_print("N%d.LMC%d.R%d: checking for WLEVEL_RANK unused entries.\n",
+ node, ddr_interface_num, rankx);
+ if (rankx == 0) { // if rank 0, write ranks 1 and 2 here if empty
+ if (!(rank_mask & (1<<1))) { // check that rank 1 is empty
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, 1), lmc_wlevel_rank.u);
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing WLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 1);
+ }
+ if (!(rank_mask & (1<<2))) { // check that rank 2 is empty
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing WLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 2);
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, 2), lmc_wlevel_rank.u);
+ }
+ }
+ // if rank 0, 1 or 2, write rank 3 here if empty
+ if (!(rank_mask & (1<<3))) { // check that rank 3 is empty
+ VB_PRT(VBL_DEV, "N%d.LMC%d.R%d: writing WLEVEL_RANK unused entry R%d.\n",
+ node, ddr_interface_num, rankx, 3);
+ DRAM_CSR_WRITE(node, BDK_LMCX_WLEVEL_RANKX(ddr_interface_num, 3), lmc_wlevel_rank.u);
+ }
+ }
+ }
#endif /* WLEXTRAS_PATCH */
} /* for (rankx = 0; rankx < dimm_count * 4;rankx++) */
@@ -8301,7 +8274,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
#if USE_L2_WAYS_LIMIT
/* Restore the l2 set configuration */
- if ((s = lookup_env_parameter("limit_l2_ways")) != NULL) {
+ if ((s = lookup_env_parameter("limit_l2_ways")) != NULL) {
int ways = strtoul(s, NULL, 10);
limit_l2_ways(node, ways, 1);
} else {
@@ -8318,21 +8291,21 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
}
#endif
-#if 1 // was #ifdef CAVIUM_ONLY
+#ifdef CAVIUM_ONLY
{
- int i;
+ int _i;
int setting[9];
bdk_lmcx_dll_ctl3_t ddr_dll_ctl3;
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
- for (i=0; i<9; ++i) {
- SET_DDR_DLL_CTL3(dll90_byte_sel, ENCODE_DLL90_BYTE_SEL(i));
- DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
+ for (_i=0; _i<9; ++_i) {
+ SET_DDR_DLL_CTL3(dll90_byte_sel, ENCODE_DLL90_BYTE_SEL(_i));
+ DRAM_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), ddr_dll_ctl3.u);
BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
ddr_dll_ctl3.u = BDK_CSR_READ(node, BDK_LMCX_DLL_CTL3(ddr_interface_num));
- setting[i] = GET_DDR_DLL_CTL3(dll90_setting);
- debug_print("%d. LMC%d_DLL_CTL3[%d] = %016lx %d\n", i, ddr_interface_num,
- GET_DDR_DLL_CTL3(dll90_byte_sel), ddr_dll_ctl3.u, setting[i]);
+ setting[_i] = GET_DDR_DLL_CTL3(dll90_setting);
+ debug_print("%d. LMC%d_DLL_CTL3[%d] = %016lx %d\n", _i, ddr_interface_num,
+ GET_DDR_DLL_CTL3(dll90_byte_sel), ddr_dll_ctl3.u, setting[_i]);
}
VB_PRT(VBL_DEV, "N%d.LMC%d: %-36s : %5d %5d %5d %5d %5d %5d %5d %5d %5d\n",
@@ -8340,7 +8313,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
setting[8], setting[7], setting[6], setting[5], setting[4],
setting[3], setting[2], setting[1], setting[0]);
- //BDK_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), save_ddr_dll_ctl3.u);
+ //BDK_CSR_WRITE(node, BDK_LMCX_DLL_CTL3(ddr_interface_num), save_ddr_dll_ctl3.u);
}
#endif /* CAVIUM_ONLY */
@@ -8348,9 +8321,9 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
// FIXME: no need to do these if we are going to auto-tune... ???
process_custom_dll_offsets(node, ddr_interface_num, "ddr_dll_write_offset",
- custom_lmc_config->dll_write_offset, "ddr%d_dll_write_offset_byte%d", 1);
+ custom_lmc_config->dll_write_offset, "ddr%d_dll_write_offset_byte%d", 1);
process_custom_dll_offsets(node, ddr_interface_num, "ddr_dll_read_offset",
- custom_lmc_config->dll_read_offset, "ddr%d_dll_read_offset_byte%d", 2);
+ custom_lmc_config->dll_read_offset, "ddr%d_dll_read_offset_byte%d", 2);
// we want to train write bit-deskew here...
if (! disable_deskew_training) {
@@ -8392,10 +8365,10 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
#if ENABLE_SLOT_CTL_ACCESS
{
- bdk_lmcx_slot_ctl0_t lmc_slot_ctl0;
- bdk_lmcx_slot_ctl1_t lmc_slot_ctl1;
- bdk_lmcx_slot_ctl2_t lmc_slot_ctl2;
- bdk_lmcx_slot_ctl3_t lmc_slot_ctl3;
+ bdk_lmcx_slot_ctl0_t lmc_slot_ctl0;
+ bdk_lmcx_slot_ctl1_t lmc_slot_ctl1;
+ bdk_lmcx_slot_ctl2_t lmc_slot_ctl2;
+ bdk_lmcx_slot_ctl3_t lmc_slot_ctl3;
lmc_slot_ctl0.u = BDK_CSR_READ(node, BDK_LMCX_SLOT_CTL0(ddr_interface_num));
lmc_slot_ctl1.u = BDK_CSR_READ(node, BDK_LMCX_SLOT_CTL1(ddr_interface_num));
@@ -8407,35 +8380,35 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
ddr_print("%-45s : 0x%016lx\n", "LMC_SLOT_CTL2", lmc_slot_ctl2.u);
ddr_print("%-45s : 0x%016lx\n", "LMC_SLOT_CTL3", lmc_slot_ctl3.u);
- // for now, look only for SLOT_CTL1 envvar for override of contents
- if ((s = lookup_env_parameter("ddr%d_slot_ctl1", ddr_interface_num)) != NULL) {
- int slot_ctl1_incr = strtoul(s, NULL, 0);
- // validate the value
- if ((slot_ctl1_incr < 0) || (slot_ctl1_incr > 3)) { // allow 0 for printing only
- error_print("ddr%d_slot_ctl1 illegal value (%d); must be 0-3\n",
- ddr_interface_num, slot_ctl1_incr);
- } else {
+ // for now, look only for SLOT_CTL1 envvar for override of contents
+ if ((s = lookup_env_parameter("ddr%d_slot_ctl1", ddr_interface_num)) != NULL) {
+ int slot_ctl1_incr = strtoul(s, NULL, 0);
+ // validate the value
+ if ((slot_ctl1_incr < 0) || (slot_ctl1_incr > 3)) { // allow 0 for printing only
+ error_print("ddr%d_slot_ctl1 illegal value (%d); must be 0-3\n",
+ ddr_interface_num, slot_ctl1_incr);
+ } else {
#define INCR(csr, chip, field, incr) \
- csr.chip.field = (csr.chip.field < (64 - incr)) ? (csr.chip.field + incr) : 63
-
- // only print original when we are changing it!
- if (slot_ctl1_incr)
- ddr_print("%-45s : 0x%016lx\n", "LMC_SLOT_CTL1", lmc_slot_ctl1.u);
-
- // modify all the SLOT_CTL1 fields by the increment, for now...
- // but make sure the value will not overflow!!!
- INCR(lmc_slot_ctl1, s, r2r_xrank_init, slot_ctl1_incr);
- INCR(lmc_slot_ctl1, s, r2w_xrank_init, slot_ctl1_incr);
- INCR(lmc_slot_ctl1, s, w2r_xrank_init, slot_ctl1_incr);
- INCR(lmc_slot_ctl1, s, w2w_xrank_init, slot_ctl1_incr);
- DRAM_CSR_WRITE(node, BDK_LMCX_SLOT_CTL1(ddr_interface_num), lmc_slot_ctl1.u);
- lmc_slot_ctl1.u = BDK_CSR_READ(node, BDK_LMCX_SLOT_CTL1(ddr_interface_num));
-
- // always print when we are changing it!
- printf("%-45s : 0x%016lx\n", "LMC_SLOT_CTL1", lmc_slot_ctl1.u);
- }
- }
+ csr.chip.field = (csr.chip.field < (64 - incr)) ? (csr.chip.field + incr) : 63
+
+ // only print original when we are changing it!
+ if (slot_ctl1_incr)
+ ddr_print("%-45s : 0x%016lx\n", "LMC_SLOT_CTL1", lmc_slot_ctl1.u);
+
+ // modify all the SLOT_CTL1 fields by the increment, for now...
+ // but make sure the value will not overflow!!!
+ INCR(lmc_slot_ctl1, s, r2r_xrank_init, slot_ctl1_incr);
+ INCR(lmc_slot_ctl1, s, r2w_xrank_init, slot_ctl1_incr);
+ INCR(lmc_slot_ctl1, s, w2r_xrank_init, slot_ctl1_incr);
+ INCR(lmc_slot_ctl1, s, w2w_xrank_init, slot_ctl1_incr);
+ DRAM_CSR_WRITE(node, BDK_LMCX_SLOT_CTL1(ddr_interface_num), lmc_slot_ctl1.u);
+ lmc_slot_ctl1.u = BDK_CSR_READ(node, BDK_LMCX_SLOT_CTL1(ddr_interface_num));
+
+ // always print when we are changing it!
+ printf("%-45s : 0x%016lx\n", "LMC_SLOT_CTL1", lmc_slot_ctl1.u);
+ }
+ }
}
#endif /* ENABLE_SLOT_CTL_ACCESS */
{
@@ -8449,18 +8422,9 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
for (tad=0; tad<num_tads; tad++)
DRAM_CSR_WRITE(node, BDK_L2C_TADX_INT_W1C(tad), BDK_CSR_READ(node, BDK_L2C_TADX_INT_W1C(tad)));
- ddr_print("%-45s : 0x%08lx\n", "LMC_INT",
+ ddr_print("%-45s : 0x%08llx\n", "LMC_INT",
BDK_CSR_READ(node, BDK_LMCX_INT(ddr_interface_num)));
-#if 0
- // NOTE: this must be done for pass 2.x
- // must enable ECC interrupts to get ECC error info in LMCX_INT
- if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
- DRAM_CSR_WRITE(node, BDK_LMCX_INT_ENA_W1S(ddr_interface_num), -1ULL);
- BDK_CSR_INIT(lmc_int_ena_w1s, node, BDK_LMCX_INT_ENA_W1S(ddr_interface_num));
- ddr_print("%-45s : 0x%08lx\n", "LMC_INT_ENA_W1S", lmc_int_ena_w1s.u);
- }
-#endif
}
// Now we can enable scrambling if desired...
@@ -8503,7 +8467,7 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_scramble_cfg0.u = strtoull(s, NULL, 0);
lmc_control.s.scramble_ena = 1;
}
- ddr_print("%-45s : 0x%016lx\n", "LMC_SCRAMBLE_CFG0", lmc_scramble_cfg0.u);
+ ddr_print("%-45s : 0x%016llx\n", "LMC_SCRAMBLE_CFG0", lmc_scramble_cfg0.u);
DRAM_CSR_WRITE(node, BDK_LMCX_SCRAMBLE_CFG0(ddr_interface_num), lmc_scramble_cfg0.u);
@@ -8511,20 +8475,20 @@ int init_octeon3_ddr3_interface(bdk_node_t node,
lmc_scramble_cfg1.u = strtoull(s, NULL, 0);
lmc_control.s.scramble_ena = 1;
}
- ddr_print("%-45s : 0x%016lx\n", "LMC_SCRAMBLE_CFG1", lmc_scramble_cfg1.u);
+ ddr_print("%-45s : 0x%016llx\n", "LMC_SCRAMBLE_CFG1", lmc_scramble_cfg1.u);
DRAM_CSR_WRITE(node, BDK_LMCX_SCRAMBLE_CFG1(ddr_interface_num), lmc_scramble_cfg1.u);
if ((s = lookup_env_parameter_ull("ddr_scramble_cfg2")) != NULL) {
lmc_scramble_cfg2.u = strtoull(s, NULL, 0);
lmc_control.s.scramble_ena = 1;
}
- ddr_print("%-45s : 0x%016lx\n", "LMC_SCRAMBLE_CFG2", lmc_scramble_cfg2.u);
+ ddr_print("%-45s : 0x%016llx\n", "LMC_SCRAMBLE_CFG2", lmc_scramble_cfg2.u);
DRAM_CSR_WRITE(node, BDK_LMCX_SCRAMBLE_CFG2(ddr_interface_num), lmc_scramble_cfg2.u);
if ((s = lookup_env_parameter_ull("ddr_ns_ctl")) != NULL) {
lmc_ns_ctl.u = strtoull(s, NULL, 0);
}
- ddr_print("%-45s : 0x%016lx\n", "LMC_NS_CTL", lmc_ns_ctl.u);
+ ddr_print("%-45s : 0x%016llx\n", "LMC_NS_CTL", lmc_ns_ctl.u);
DRAM_CSR_WRITE(node, BDK_LMCX_NS_CTL(ddr_interface_num), lmc_ns_ctl.u);
DRAM_CSR_WRITE(node, BDK_LMCX_CONTROL(ddr_interface_num), lmc_control.u);
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.h b/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.h
index ba1060e5e0..2dc3df5b27 100644
--- a/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.h
+++ b/src/vendorcode/cavium/bdk/libdram/dram-init-ddr3.h
@@ -82,7 +82,7 @@ read_DAC_DBI_settings(int node, int ddr_interface_num,
int dac_or_dbi, int *settings);
extern void
display_DAC_DBI_settings(int node, int ddr_interface_num, int dac_or_dbi,
- int ecc_ena, int *settings, char *title);
+ int ecc_ena, int *settings, const char *title);
#define RODT_OHMS_COUNT 8
#define RTT_NOM_OHMS_COUNT 8
@@ -94,4 +94,5 @@ display_DAC_DBI_settings(int node, int ddr_interface_num, int dac_or_dbi,
extern uint64_t hertz_to_psecs(uint64_t hertz);
extern uint64_t psecs_to_mts(uint64_t psecs);
extern uint64_t mts_to_hertz(uint64_t mts);
-extern uint64_t pretty_psecs_to_mts(uint64_t psecs);
+//extern uint64_t pretty_psecs_to_mts(uint64_t psecs);
+extern unsigned long pretty_psecs_to_mts(uint64_t psecs); /* FIXME(dhendrix) */
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-internal.h b/src/vendorcode/cavium/bdk/libdram/dram-internal.h
index 07fdbcbf54..0b4e8fb3da 100644
--- a/src/vendorcode/cavium/bdk/libdram/dram-internal.h
+++ b/src/vendorcode/cavium/bdk/libdram/dram-internal.h
@@ -46,7 +46,9 @@
* from the libdram directory
*/
-#include "libdram.h"
+/* FIXME(dhendrix): include path */
+//#include "libdram.h"
+#include <libdram/libdram.h>
#include "lib_octeon_shared.h"
#include "dram-print.h"
#include "dram-util.h"
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-l2c.c b/src/vendorcode/cavium/bdk/libdram/dram-l2c.c
index 11112955b2..8dc57189e5 100644
--- a/src/vendorcode/cavium/bdk/libdram/dram-l2c.c
+++ b/src/vendorcode/cavium/bdk/libdram/dram-l2c.c
@@ -39,6 +39,10 @@
#include <bdk.h>
#include "dram-internal.h"
+/* FIXME(dhendrix): added */
+#include <libbdk-hal/bdk-l2c.h>
+#include <libbdk-hal/bdk-utils.h>
+
int limit_l2_ways(bdk_node_t node, int ways, int verbose)
{
int ways_max = bdk_l2c_get_num_assoc(node);
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-print.h b/src/vendorcode/cavium/bdk/libdram/dram-print.h
index 94cdf92fbf..a29ba6bc4c 100644
--- a/src/vendorcode/cavium/bdk/libdram/dram-print.h
+++ b/src/vendorcode/cavium/bdk/libdram/dram-print.h
@@ -69,6 +69,8 @@ extern dram_verbosity_t dram_verbosity;
#define is_verbosity_special(level) (((int)(dram_verbosity & 0xf0) & (level)) != 0)
#define dram_is_verbose(level) (((level) & VBL_SPECIAL) ? is_verbosity_special(level) : is_verbosity_level(level))
+/* FIXME(dhendrix): printf... */
+#if 0
#define VB_PRT(level, format, ...) \
do { \
if (dram_is_verbose(level)) \
@@ -84,3 +86,20 @@ extern dram_verbosity_t dram_verbosity;
#else
#define debug_print(format, ...) do {} while (0)
#endif
+#endif
+#include <console/console.h>
+#define VB_PRT(level, format, ...) \
+ do { \
+ if (dram_is_verbose(level)) \
+ printk(BIOS_DEBUG, format, ##__VA_ARGS__); \
+ } while (0)
+
+#define ddr_print(format, ...) VB_PRT(VBL_NORM, format, ##__VA_ARGS__)
+
+#define error_print(format, ...) printk(BIOS_ERR, format, ##__VA_ARGS__)
+
+#ifdef DEBUG_DEBUG_PRINT
+ #define debug_print(format, ...) printk(BIOS_DEBUG, format, ##__VA_ARGS__)
+#else
+ #define debug_print(format, ...) do {} while (0)
+#endif
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-spd.c b/src/vendorcode/cavium/bdk/libdram/dram-spd.c
index 3717ca1109..6498b85f2e 100644
--- a/src/vendorcode/cavium/bdk/libdram/dram-spd.c
+++ b/src/vendorcode/cavium/bdk/libdram/dram-spd.c
@@ -37,9 +37,16 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
#include <bdk.h>
-#include <ctype.h>
#include "dram-internal.h"
+#include <bdk-minimal.h>
+#include <libbdk-arch/bdk-warn.h>
+#include <libbdk-hal/bdk-config.h>
+#include <libbdk-hal/bdk-twsi.h>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
/**
* Read the entire contents of a DIMM SPD and store it in the device tree. The
* current DRAM config is also updated, so future SPD accesses used the cached
@@ -52,8 +59,12 @@
*
* @return Zero on success, negative on failure
*/
+static uint8_t spd_bufs[4 * 256]; /* FIXME(dhendrix): storage for SPD buffers, assume DDR4 */
int read_entire_spd(bdk_node_t node, dram_config_t *cfg, int lmc, int dimm)
{
+ /* FIXME(dhendrix): hack to get around using allocated mem */
+ assert(dimm < 4);
+
/* If pointer to data is provided, use it, otherwise read from SPD over twsi */
if (cfg->config[lmc].dimm_config_table[dimm].spd_ptr)
return 0;
@@ -69,13 +80,18 @@ int read_entire_spd(bdk_node_t node, dram_config_t *cfg, int lmc, int dimm)
int64_t dev_type = bdk_twsix_read_ia(node, bus, address, DDR4_SPD_KEY_BYTE_DEVICE_TYPE, 1, 1);
if (dev_type < 0)
return -1; /* No DIMM */
- int spd_size = (dev_type == 0x0c) ? 512 : 256;
-
- /* Allocate storage */
- uint32_t *spd_buf = malloc(spd_size);
- if (!spd_buf)
- return -1;
- uint32_t *ptr = spd_buf;
+ // FIXME: prudolph: Nobody needs 512 byte SPDs...
+ //int spd_size = (dev_type == 0x0c) ? 512 : 256;
+ int spd_size = 256;
+
+ /*
+ * FIXME: Assume DIMM doesn't support
+ * 'Hybrid Module Extended Function Parameters' aka only 256 Byte SPD,
+ * as the code below is broken ...
+ */
+ assert(spd_size == 256);
+ uint8_t *spd_buf = &spd_bufs[dimm * 256];
+ uint32_t *ptr = (uint32_t *)spd_buf;
for (int bank = 0; bank < (spd_size >> 8); bank++)
{
@@ -104,7 +120,9 @@ int read_entire_spd(bdk_node_t node, dram_config_t *cfg, int lmc, int dimm)
}
/* Store the SPD in the device tree */
- bdk_config_set_blob(spd_size, spd_buf, BDK_CONFIG_DDR_SPD_DATA, dimm, lmc, node);
+ /* FIXME(dhendrix): No need for this? cfg gets updated, so the caller
+ * (libdram_config()) has what it needs. */
+// bdk_config_set_blob(spd_size, spd_buf, BDK_CONFIG_DDR_SPD_DATA, dimm, lmc, node);
cfg->config[lmc].dimm_config_table[dimm].spd_ptr = (void*)spd_buf;
return 0;
@@ -350,14 +368,14 @@ static uint32_t get_dimm_checksum(bdk_node_t node, const dimm_config_t *dimm_con
static
void report_common_dimm(bdk_node_t node, const dimm_config_t *dimm_config, int dimm,
- const char **dimm_types, int ddr_type, char *volt_str,
+ const char **dimm_types, int ddr_type, const char *volt_str,
int ddr_interface_num, int num_ranks, int dram_width, int dimm_size_mb)
{
int spd_ecc;
unsigned spd_module_type;
uint32_t serial_number;
char part_number[21]; /* 20 bytes plus string terminator is big enough for either */
- char *sn_str;
+ const char *sn_str;
spd_module_type = get_dimm_module_type(node, dimm_config, ddr_type);
spd_ecc = get_dimm_ecc(node, dimm_config, ddr_type);
@@ -405,7 +423,7 @@ void report_ddr3_dimm(bdk_node_t node, const dimm_config_t *dimm_config,
int dram_width, int dimm_size_mb)
{
int spd_voltage;
- char *volt_str;
+ const char *volt_str;
spd_voltage = read_spd(node, dimm_config, DDR3_SPD_NOMINAL_VOLTAGE);
if ((spd_voltage == 0) || (spd_voltage & 3))
@@ -445,7 +463,7 @@ void report_ddr4_dimm(bdk_node_t node, const dimm_config_t *dimm_config,
int dram_width, int dimm_size_mb)
{
int spd_voltage;
- char *volt_str;
+ const char *volt_str;
spd_voltage = read_spd(node, dimm_config, DDR4_SPD_MODULE_NOMINAL_VOLTAGE);
if ((spd_voltage == 0x01) || (spd_voltage & 0x02))
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-tune-ddr3.c b/src/vendorcode/cavium/bdk/libdram/dram-tune-ddr3.c
index e0e9d4442c..2c6a105dae 100644
--- a/src/vendorcode/cavium/bdk/libdram/dram-tune-ddr3.c
+++ b/src/vendorcode/cavium/bdk/libdram/dram-tune-ddr3.c
@@ -39,6 +39,13 @@
#include <bdk.h>
#include "dram-internal.h"
+#include <string.h>
+#include <lame_string.h> /* for strtoul */
+#include <libbdk-hal/bdk-atomic.h>
+#include <libbdk-hal/bdk-clock.h>
+#include <libbdk-hal/bdk-rng.h>
+#include <libbdk-os/bdk-init.h>
+
// if enhanced verbosity levels are defined, use them
#if defined(VB_PRT)
#define ddr_print2(format, ...) VB_PRT(VBL_FAE, format, ##__VA_ARGS__)
@@ -185,14 +192,14 @@ get_speed_bin(bdk_node_t node, int lmc)
// FIXME: is this reasonable speed "binning"?
if (mts_speed >= 1700) {
- if (mts_speed >= 2000)
- ret = 2;
- else
- ret = 1;
+ if (mts_speed >= 2000)
+ ret = 2;
+ else
+ ret = 1;
}
debug_print("N%d.LMC%d: %s: returning bin %d for MTS %d\n",
- node, lmc, __FUNCTION__, ret, mts_speed);
+ node, lmc, __FUNCTION__, ret, mts_speed);
return ret;
}
@@ -261,25 +268,25 @@ int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask,
// add this loop to fill memory with the test pattern first
// loops are ordered so that only entire cachelines are written
for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
- for (k = 0; k < K_MAX; k += K_INC) {
- for (j = 0; j < J_MAX; j += J_INC) {
- p1 = p + ii + k + j;
- p2 = p1 + p2offset;
- for (i = 0, ix = 0; i < I_MAX; i += I_INC, ix++) {
+ for (k = 0; k < K_MAX; k += K_INC) {
+ for (j = 0; j < J_MAX; j += J_INC) {
+ p1 = p + ii + k + j;
+ p2 = p1 + p2offset;
+ for (i = 0, ix = 0; i < I_MAX; i += I_INC, ix++) {
- v = dram_tune_test_pattern[ix];
- v1 = v; // write the same thing to both areas
+ v = dram_tune_test_pattern[ix];
+ v1 = v; // write the same thing to both areas
- __bdk_dram_write64(p1 + i, v);
- __bdk_dram_write64(p2 + i, v1);
+ __bdk_dram_write64(p1 + i, v);
+ __bdk_dram_write64(p2 + i, v1);
- }
+ }
#if ENABLE_WBIL2
- BDK_CACHE_WBI_L2(p1);
- BDK_CACHE_WBI_L2(p2);
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
#endif
- }
- }
+ }
+ }
} /* for (ii = 0; ii < (1ULL << 31); ii += (1ULL << 29)) */
#endif
@@ -291,12 +298,12 @@ int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask,
// loops are ordered so that only a single 64-bit slot is written to each cacheline at one time,
// then the cachelines are forced out; this should maximize read/write traffic
for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
- for (k = 0; k < K_MAX; k += K_INC) {
- for (i = 0; i < I_MAX; i += I_INC) {
- for (j = 0; j < J_MAX; j += J_INC) {
+ for (k = 0; k < K_MAX; k += K_INC) {
+ for (i = 0; i < I_MAX; i += I_INC) {
+ for (j = 0; j < J_MAX; j += J_INC) {
- p1 = p + ii + k + j;
- p2 = p1 + p2offset;
+ p1 = p + ii + k + j;
+ p2 = p1 + p2offset;
#if ENABLE_PREFETCH
if (j < (J_MAX - J_INC)) {
@@ -304,20 +311,20 @@ int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask,
BDK_PREFETCH(p2 + J_INC, BDK_CACHE_LINE_SIZE);
}
#endif
-
- v = pattern1 * (p1 + i);
- v1 = v; // write the same thing to both areas
- __bdk_dram_write64(p1 + i, v);
- __bdk_dram_write64(p2 + i, v1);
+ v = pattern1 * (p1 + i);
+ v1 = v; // write the same thing to both areas
+
+ __bdk_dram_write64(p1 + i, v);
+ __bdk_dram_write64(p2 + i, v1);
#if ENABLE_WBIL2
- BDK_CACHE_WBI_L2(p1);
- BDK_CACHE_WBI_L2(p2);
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
#endif
- }
- }
- }
+ }
+ }
+ }
} /* for (ii = 0; ii < (1ULL << 31); ii += (1ULL << 29)) */
BDK_DCACHE_INVALIDATE;
@@ -329,24 +336,24 @@ int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask,
for (int burst = 0; burst < 1/* was: dram_tune_use_bursts*/; burst++)
{
- uint64_t this_pattern = bdk_rng_get_random64();
- pattern2 ^= this_pattern;
+ uint64_t this_pattern = bdk_rng_get_random64();
+ pattern2 ^= this_pattern;
/* XOR the data with a random value, applying the change to both
* memory areas.
*/
#if ENABLE_PREFETCH
- BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
- BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
#endif
- for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
- for (k = 0; k < K_MAX; k += K_INC) {
- for (i = 0; i < I_MAX; i += I_INC) { // FIXME: rearranged, did not make much difference?
- for (j = 0; j < J_MAX; j += J_INC) {
+ for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
+ for (k = 0; k < K_MAX; k += K_INC) {
+ for (i = 0; i < I_MAX; i += I_INC) { // FIXME: rearranged, did not make much difference?
+ for (j = 0; j < J_MAX; j += J_INC) {
- p1 = p + ii + k + j;
- p2 = p1 + p2offset;
+ p1 = p + ii + k + j;
+ p2 = p1 + p2offset;
#if ENABLE_PREFETCH
if (j < (J_MAX - J_INC)) {
@@ -354,26 +361,26 @@ int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask,
BDK_PREFETCH(p2 + J_INC, BDK_CACHE_LINE_SIZE);
}
#endif
-
- v = __bdk_dram_read64(p1 + i) ^ this_pattern;
- v1 = __bdk_dram_read64(p2 + i) ^ this_pattern;
+
+ v = __bdk_dram_read64(p1 + i) ^ this_pattern;
+ v1 = __bdk_dram_read64(p2 + i) ^ this_pattern;
#if ENABLE_WBIL2
- BDK_CACHE_INV_L2(p1);
- BDK_CACHE_INV_L2(p2);
+ BDK_CACHE_INV_L2(p1);
+ BDK_CACHE_INV_L2(p2);
#endif
- __bdk_dram_write64(p1 + i, v);
- __bdk_dram_write64(p2 + i, v1);
+ __bdk_dram_write64(p1 + i, v);
+ __bdk_dram_write64(p2 + i, v1);
#if ENABLE_WBIL2
- BDK_CACHE_WBI_L2(p1);
- BDK_CACHE_WBI_L2(p2);
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
#endif
- }
- }
- }
- } /* for (ii = 0; ii < (1ULL << 31); ii += (1ULL << 29)) */
+ }
+ }
+ }
+ } /* for (ii = 0; ii < (1ULL << 31); ii += (1ULL << 29)) */
BDK_DCACHE_INVALIDATE;
@@ -381,8 +388,8 @@ int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask,
node, lmc);
#if ENABLE_PREFETCH
- BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
- BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
+ BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
#endif
/* Look for differences in the areas. If there is a mismatch, reset
@@ -390,18 +397,18 @@ int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask,
* means that on all subsequent passes the pair of locations remain
* out of sync giving spurious errors.
*/
- // FIXME: change the loop order so that an entire cache line is compared at one time
- // FIXME: this is so that a read error that occurs *anywhere* on the cacheline will be caught,
- // FIXME: rather than comparing only 1 cacheline slot at a time, where an error on a different
- // FIXME: slot will be missed that time around
- // Does the above make sense?
+ // FIXME: change the loop order so that an entire cache line is compared at one time
+ // FIXME: this is so that a read error that occurs *anywhere* on the cacheline will be caught,
+ // FIXME: rather than comparing only 1 cacheline slot at a time, where an error on a different
+ // FIXME: slot will be missed that time around
+ // Does the above make sense?
- for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
- for (k = 0; k < K_MAX; k += K_INC) {
- for (j = 0; j < J_MAX; j += J_INC) {
+ for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
+ for (k = 0; k < K_MAX; k += K_INC) {
+ for (j = 0; j < J_MAX; j += J_INC) {
- p1 = p + ii + k + j;
- p2 = p1 + p2offset;
+ p1 = p + ii + k + j;
+ p2 = p1 + p2offset;
#if ENABLE_PREFETCH
if (j < (J_MAX - J_INC)) {
@@ -409,15 +416,15 @@ int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask,
BDK_PREFETCH(p2 + J_INC, BDK_CACHE_LINE_SIZE);
}
#endif
-
- // process entire cachelines in the innermost loop
- for (i = 0; i < I_MAX; i += I_INC) {
- v = ((p1 + i) * pattern1) ^ pattern2; // FIXME: this should predict what we find...???
- d1 = __bdk_dram_read64(p1 + i);
- d2 = __bdk_dram_read64(p2 + i);
+ // process entire cachelines in the innermost loop
+ for (i = 0; i < I_MAX; i += I_INC) {
+
+ v = ((p1 + i) * pattern1) ^ pattern2; // FIXME: this should predict what we find...???
+ d1 = __bdk_dram_read64(p1 + i);
+ d2 = __bdk_dram_read64(p2 + i);
- xor = ((d1 ^ v) | (d2 ^ v)) & datamask; // union of error bits only in active byte lanes
+ xor = ((d1 ^ v) | (d2 ^ v)) & datamask; // union of error bits only in active byte lanes
if (!xor)
continue;
@@ -426,32 +433,32 @@ int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask,
bad_bits[0] |= xor;
//bad_bits[1] |= ~mpr_data1 & 0xffUL; // cannot do ECC here
- int bybit = 1;
- uint64_t bymsk = 0xffULL; // start in byte lane 0
- while (xor != 0) {
- debug_print("ERROR(%03d): [0x%016lX] [0x%016lX] expected 0x%016lX d1 %016lX d2 %016lX\n",
- burst, p1, p2, v, d1, d2);
- if (xor & bymsk) { // error(s) in this lane
- errors |= bybit; // set the byte error bit
- xor &= ~bymsk; // clear byte lane in error bits
- datamask &= ~bymsk; // clear the byte lane in the mask
+ int bybit = 1;
+ uint64_t bymsk = 0xffULL; // start in byte lane 0
+ while (xor != 0) {
+ debug_print("ERROR(%03d): [0x%016lX] [0x%016lX] expected 0x%016lX d1 %016lX d2 %016lX\n",
+ burst, p1, p2, v, d1, d2);
+ if (xor & bymsk) { // error(s) in this lane
+ errors |= bybit; // set the byte error bit
+ xor &= ~bymsk; // clear byte lane in error bits
+ datamask &= ~bymsk; // clear the byte lane in the mask
#if EXIT_WHEN_ALL_LANES_HAVE_ERRORS
- if (datamask == 0) { // nothing left to do
- return errors; // completely done when errors found in all byte lanes in datamask
- }
+ if (datamask == 0) { // nothing left to do
+ return errors; // completely done when errors found in all byte lanes in datamask
+ }
#endif /* EXIT_WHEN_ALL_LANES_HAVE_ERRORS */
- }
- bymsk <<= 8; // move mask into next byte lane
- bybit <<= 1; // move bit into next byte position
- }
- }
+ }
+ bymsk <<= 8; // move mask into next byte lane
+ bybit <<= 1; // move bit into next byte position
+ }
+ }
#if ENABLE_WBIL2
- BDK_CACHE_WBI_L2(p1);
- BDK_CACHE_WBI_L2(p2);
+ BDK_CACHE_WBI_L2(p1);
+ BDK_CACHE_WBI_L2(p2);
#endif
- }
- }
- } /* for (ii = 0; ii < (1ULL << 31); ii += (1ULL << 29)) */
+ }
+ }
+ } /* for (ii = 0; ii < (1ULL << 31); ii += (1ULL << 29)) */
debug_print("N%d.LMC%d: dram_tuning_mem_xor: done TEST loop\n",
node, lmc);
@@ -476,278 +483,6 @@ int dram_tuning_mem_xor(bdk_node_t node, int lmc, uint64_t p, uint64_t bitmask,
#define EXTRACT(v, lsb, width) (((v) >> (lsb)) & ((1ull << (width)) - 1))
#define LMCNO(address, xbits) (EXTRACT(address, 7, xbits) ^ EXTRACT(address, 20, xbits) ^ EXTRACT(address, 12, xbits))
-static int dram_tuning_mem_xor2(uint64_t p, uint64_t bitmask, int xbits)
-{
- uint64_t p1, p2, d1, d2;
- uint64_t v, vpred;
- uint64_t p2offset = dram_tune_rank_offset; // FIXME?
- uint64_t datamask;
- uint64_t xor;
- uint64_t ii;
- uint64_t pattern1 = bdk_rng_get_random64();
- uint64_t pattern2 = 0;
- int errors = 0;
- int errs_by_lmc[4] = { 0,0,0,0 };
- int lmc;
- uint64_t vbase, vincr;
-
- // Byte lanes may be clear in the mask to indicate no testing on that lane.
- datamask = bitmask;
-
- /* Add offset to both test regions to not clobber boot stuff
- * when running from L2 for NAND boot.
- */
- p += AREA_BASE_OFFSET; // make sure base is out of the way of boot
-
- // move the multiplies outside the loop
- vbase = p * pattern1;
- vincr = 8 * pattern1;
-
-#define II_INC (1ULL << 3)
-#define II_MAX (1ULL << 22) // stop where the core ID bits start
-
- // walk the memory areas by 8-byte words
- v = vbase;
- for (ii = 0; ii < II_MAX; ii += II_INC) {
-
- p1 = p + ii;
- p2 = p1 + p2offset;
-
- __bdk_dram_write64(p1, v);
- __bdk_dram_write64(p2, v);
-
- v += vincr;
- }
-
- __bdk_dram_flush_to_mem_range(p , p + II_MAX);
- __bdk_dram_flush_to_mem_range(p + p2offset, p + p2offset + II_MAX);
- BDK_DCACHE_INVALIDATE;
-
- /* Make a series of passes over the memory areas. */
-
- for (int burst = 0; burst < dram_tune_use_bursts; burst++)
- {
- uint64_t this_pattern = bdk_rng_get_random64();
- pattern2 ^= this_pattern;
-
- /* XOR the data with a random value, applying the change to both
- * memory areas.
- */
-#if 0
- BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
- BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
-#endif
- for (ii = 0; ii < II_MAX; ii += II_INC) { // FIXME? extend the range of memory tested!!
-
- p1 = p + ii;
- p2 = p1 + p2offset;
-
- d1 = __bdk_dram_read64(p1) ^ this_pattern;
- d2 = __bdk_dram_read64(p2) ^ this_pattern;
-
- __bdk_dram_write64(p1, d1);
- __bdk_dram_write64(p2, d2);
-
- }
- __bdk_dram_flush_to_mem_range(p , p + II_MAX);
- __bdk_dram_flush_to_mem_range(p + p2offset, p + p2offset + II_MAX);
- BDK_DCACHE_INVALIDATE;
-
- /* Look for differences in the areas. If there is a mismatch, reset
- * both memory locations with the same pattern. Failing to do so
- * means that on all subsequent passes the pair of locations remain
- * out of sync giving spurious errors.
- */
-#if 0
- BDK_PREFETCH(p , BDK_CACHE_LINE_SIZE);
- BDK_PREFETCH(p + p2offset, BDK_CACHE_LINE_SIZE);
-#endif
- vpred = vbase;
- for (ii = 0; ii < II_MAX; ii += II_INC) {
-
- p1 = p + ii;
- p2 = p1 + p2offset;
-
- v = vpred ^ pattern2; // this should predict what we find...
- d1 = __bdk_dram_read64(p1);
- d2 = __bdk_dram_read64(p2);
- vpred += vincr;
-
- xor = ((d1 ^ v) | (d2 ^ v)) & datamask; // union of error bits only in active byte lanes
- if (!xor) // no errors
- continue;
-
- lmc = LMCNO(p1, xbits); // FIXME: LMC should be SAME for p1 and p2!!!
- if (lmc != (int)LMCNO(p2, xbits)) {
- printf("ERROR: LMCs for addresses [0x%016lX] (%lld) and [0x%016lX] (%lld) differ!!!\n",
- p1, LMCNO(p1, xbits), p2, LMCNO(p2, xbits));
- }
- int bybit = 1;
- uint64_t bymsk = 0xffULL; // start in byte lane 0
- while (xor != 0) {
- debug_print("ERROR(%03d): [0x%016lX] [0x%016lX] expected 0x%016lX d1 %016lX d2 %016lX\n",
- burst, p1, p2, v, d1, d2);
- if (xor & bymsk) { // error(s) in this lane
- errs_by_lmc[lmc] |= bybit; // set the byte error bit in the LMCs errors
- errors |= bybit; // set the byte error bit
- xor &= ~bymsk; // clear byte lane in error bits
- //datamask &= ~bymsk; // clear the byte lane in the mask
- }
- bymsk <<= 8; // move mask into next byte lane
- bybit <<= 1; // move bit into next byte position
- } /* while (xor != 0) */
- } /* for (ii = 0; ii < II_MAX; ii += II_INC) */
- } /* for (int burst = 0; burst < dram_tune_use_bursts; burst++) */
-
- // update the global LMC error states
- for (lmc = 0; lmc < 4; lmc++) {
- if (errs_by_lmc[lmc]) {
- bdk_atomic_fetch_and_bset64_nosync(&test_dram_byte_lmc_errs[lmc], errs_by_lmc[lmc]);
- }
- }
-
- return errors;
-}
-
-#if 0
-static int dram_tuning_mem_rows(uint64_t p, uint64_t bitmask)
-{
- uint64_t p1, p2, d1, d2;
- uint64_t v, v1;
- uint64_t p2offset = dram_tune_rank_offset; // FIXME?
- uint64_t datamask;
- uint64_t xor;
- int i, j, k, ii;
- int errors = 0;
- int index;
- uint64_t pattern1 = 0; // FIXME: maybe this could be from a table?
- uint64_t pattern2;
-
- // Byte lanes may be clear in the mask to indicate no testing on that lane.
- datamask = bitmask;
-
- /* Add offset to both test regions to not clobber boot stuff
- * when running from L2 for NAND boot.
- */
- p += 0x10000000; // FIXME? was: 0x4000000; // make sure base is out of the way of cores for tuning
-
- pattern2 = pattern1;
- for (k = 0; k < (1 << 20); k += (1 << 14)) {
- for (j = 0; j < (1 << 12); j += (1 << 9)) {
- for (i = 0; i < (1 << 7); i += 8) {
- index = i + j + k;
- p1 = p + index;
- p2 = p1 + p2offset;
-
- v = pattern2;
- v1 = v; // write the same thing to same slot in both cachelines
- pattern2 = ~pattern2; // flip bits for next slots
-
- __bdk_dram_write64(p1, v);
- __bdk_dram_write64(p2, v1);
- }
-#if 1
- BDK_CACHE_WBI_L2(p1);
- BDK_CACHE_WBI_L2(p2);
-#endif
- }
- }
-
-#if 0
- __bdk_dram_flush_to_mem_range(p, p + (1ULL << 20)); // max_addr is start + where k stops...
- __bdk_dram_flush_to_mem_range(p + p2offset, p + p2offset + (1ULL << 20)); // max_addr is start + where k stops...
-#endif
- BDK_DCACHE_INVALIDATE;
-
- /* Make a series of passes over the memory areas. */
-
- for (int burst = 0; burst < dram_tune_use_bursts; burst++)
- {
- /* just read and flip the bits applying the change to both
- * memory areas.
- */
- for (k = 0; k < (1 << 20); k += (1 << 14)) {
- for (j = 0; j < (1 << 12); j += (1 << 9)) {
- for (i = 0; i < (1 << 7); i += 8) {
- index = i + j + k;
- p1 = p + index;
- p2 = p1 + p2offset;
-
- v = ~__bdk_dram_read64(p1);
- v1 = ~__bdk_dram_read64(p2);
-
- __bdk_dram_write64(p1, v);
- __bdk_dram_write64(p2, v1);
- }
-#if 1
- BDK_CACHE_WBI_L2(p1);
- BDK_CACHE_WBI_L2(p2);
-#endif
- }
- }
-
-#if 0
- __bdk_dram_flush_to_mem_range(p, p + (1ULL << 20)); // max_addr is start + where k stops...
- __bdk_dram_flush_to_mem_range(p + p2offset, p + p2offset + (1ULL << 20)); // max_addr is start + where k stops...
-#endif
- BDK_DCACHE_INVALIDATE;
-
- /* Look for differences in the areas. If there is a mismatch, reset
- * both memory locations with the same pattern. Failing to do so
- * means that on all subsequent passes the pair of locations remain
- * out of sync giving spurious errors.
- */
-
- // FIXME: change the loop order so that an entire cache line is compared at one time
- // FIXME: this is so that a read error that occurs *anywhere* on the cacheline will be caught,
- // FIXME: rather than comparing only 1 cacheline slot at a time, where an error on a different
- // FIXME: slot will be missed that time around
- // Does the above make sense?
-
- pattern2 = ~pattern1; // slots have been flipped by the above loop
-
- for (k = 0; k < (1 << 20); k += (1 << 14)) {
- for (j = 0; j < (1 << 12); j += (1 << 9)) {
- for (i = 0; i < (1 << 7); i += 8) {
- index = i + j + k;
- p1 = p + index;
- p2 = p1 + p2offset;
-
- v = pattern2; // FIXME: this should predict what we find...???
- d1 = __bdk_dram_read64(p1);
- d2 = __bdk_dram_read64(p2);
- pattern2 = ~pattern2; // flip for next slot
-
- xor = ((d1 ^ v) | (d2 ^ v)) & datamask; // union of error bits only in active byte lanes
-
- int bybit = 1;
- uint64_t bymsk = 0xffULL; // start in byte lane 0
- while (xor != 0) {
- debug_print("ERROR(%03d): [0x%016lX] [0x%016lX] expected 0x%016lX d1 %016lX d2 %016lX\n",
- burst, p1, p2, v, d1, d2);
- if (xor & bymsk) { // error(s) in this lane
- errors |= bybit; // set the byte error bit
- xor &= ~bymsk; // clear byte lane in error bits
- datamask &= ~bymsk; // clear the byte lane in the mask
-#if EXIT_WHEN_ALL_LANES_HAVE_ERRORS
- if (datamask == 0) { // nothing left to do
- return errors; // completely done when errors found in all byte lanes in datamask
- }
-#endif /* EXIT_WHEN_ALL_LANES_HAVE_ERRORS */
- }
- bymsk <<= 8; // move mask into next byte lane
- bybit <<= 1; // move bit into next byte position
- }
- }
- }
- }
- pattern1 = ~pattern1; // flip the starting pattern for the next burst
-
- } /* for (int burst = 0; burst < dram_tune_use_bursts; burst++) */
- return errors;
-}
-#endif
-
// cores to use
#define DEFAULT_USE_CORES 44 // FIXME: was (1 << CORE_BITS)
int dram_tune_use_cores = DEFAULT_USE_CORES; // max cores to use, override available
@@ -763,109 +498,6 @@ typedef struct
uint64_t byte_mask;
} test_dram_byte_info_t;
-static void dram_tuning_thread(int arg, void *arg1)
-{
- test_dram_byte_info_t *test_info = arg1;
- int core = arg;
- uint64_t errs;
- bdk_node_t node = test_info->node;
- int num_lmcs, lmc;
-#if 0
- num_lmcs = test_info->num_lmcs;
- // map core numbers into hopefully equal groups per LMC
- lmc = core % num_lmcs;
-#else
- // FIXME: this code should allow running all the cores on a single LMC...
- // if incoming num_lmcs > 0, then use as normal; if < 0 remap to a single LMC
- if (test_info->num_lmcs >= 0) {
- num_lmcs = test_info->num_lmcs;
- // map core numbers into hopefully equal groups per LMC
- lmc = core % num_lmcs;
- } else {
- num_lmcs = 1;
- // incoming num_lmcs is (desired LMC - 10)
- lmc = 10 + test_info->num_lmcs;
- }
-#endif
- uint64_t base_address = 0/* was: (lmc << 7); now done by callee */;
- uint64_t bytemask = test_info->byte_mask;
-
- /* Figure out our work memory range.
- *
- * Note: base_address above just provides the physical offset which determines
- * specific LMC portions of the address space and does not have the node bits set.
- */
- //was: base_address = bdk_numa_get_address(node, base_address); // map to node // now done by callee
- base_address |= (core << CORE_SHIFT); // FIXME: also put full core into address
- if (dram_tune_dimm_offset) { // if multi-slot in some way, choose a DIMM for the core
- base_address |= (core & (1 << (num_lmcs >> 1))) ? dram_tune_dimm_offset : 0;
- }
-
- debug_print("Node %d, core %d, Testing area 1 at 0x%011lx, area 2 at 0x%011lx\n",
- node, core, base_address + AREA_BASE_OFFSET,
- base_address + AREA_BASE_OFFSET + dram_tune_rank_offset);
-
- errs = dram_tuning_mem_xor(node, lmc, base_address, bytemask, NULL);
- //errs = dram_tuning_mem_rows(base_address, bytemask);
-
- /* Report that we're done */
- debug_print("Core %d on LMC %d node %d done with test_dram_byte with 0x%lx errs\n",
- core, lmc, node, errs);
-
- if (errs) {
- bdk_atomic_fetch_and_bset64_nosync(&test_dram_byte_threads_errs, errs);
- bdk_atomic_fetch_and_bset64_nosync(&test_dram_byte_lmc_errs[lmc], errs);
- }
-
- bdk_atomic_add64_nosync(&test_dram_byte_threads_done, 1);
-
- return;
-}
-
-static void dram_tuning_thread2(int arg, void *arg1)
-{
- test_dram_byte_info_t *test_info = arg1;
- int core = arg;
- uint64_t errs;
- bdk_node_t node = test_info->node;
- int num_lmcs = test_info->num_lmcs;
-
- uint64_t base_address = 0; //
- uint64_t bytemask = test_info->byte_mask;
-
- /* Figure out our work memory range.
- *
- * Note: base_address above just provides the physical offset which determines
- * specific portions of the address space and does not have the node bits set.
- */
- base_address = bdk_numa_get_address(node, base_address); // map to node
- base_address |= (core << CORE_SHIFT); // FIXME: also put full core into address
- if (dram_tune_dimm_offset) { // if multi-slot in some way, choose a DIMM for the core
- base_address |= (core & 1) ? dram_tune_dimm_offset : 0;
- }
-
- debug_print("Node %d, core %d, Testing area 1 at 0x%011lx, area 2 at 0x%011lx\n",
- node, core, base_address + AREA_BASE_OFFSET,
- base_address + AREA_BASE_OFFSET + dram_tune_rank_offset);
-
- errs = dram_tuning_mem_xor2(base_address, bytemask, (num_lmcs >> 1)); // 4->2, 2->1, 1->0
- //errs = dram_tuning_mem_rows(base_address, bytemask);
-
- /* Report that we're done */
- debug_print("Core %d on LMC %d node %d done with test_dram_byte with 0x%lx errs\n",
- core, lmc, node, errs);
-
- if (errs) {
- bdk_atomic_fetch_and_bset64_nosync(&test_dram_byte_threads_errs, errs);
- // FIXME: this will have been done already in the called test routine
- //bdk_atomic_fetch_and_bset64_nosync(&test_dram_byte_lmc_errs[lmc], errs);
- }
-
- bdk_atomic_add64_nosync(&test_dram_byte_threads_done, 1);
-
- return;
-}
-
static int dram_tune_use_xor2 = 1; // FIXME: do NOT default to original mem_xor (LMC-based) code
static int
@@ -874,7 +506,6 @@ run_dram_tuning_threads(bdk_node_t node, int num_lmcs, uint64_t bytemask)
test_dram_byte_info_t test_dram_byte_info;
test_dram_byte_info_t *test_info = &test_dram_byte_info;
int total_count = 0;
- __dram_tuning_thread_t thread_p = (dram_tune_use_xor2) ? dram_tuning_thread2 : dram_tuning_thread;
test_info->node = node;
test_info->num_lmcs = num_lmcs;
@@ -890,20 +521,14 @@ run_dram_tuning_threads(bdk_node_t node, int num_lmcs, uint64_t bytemask)
/* Start threads for cores on the node */
if (bdk_numa_exists(node)) {
- debug_print("Starting %d threads for test_dram_byte\n", dram_tune_use_cores);
- for (int core = 0; core < dram_tune_use_cores; core++) {
- if (bdk_thread_create(node, 0, thread_p, core, (void *)test_info, 0)) {
- bdk_error("Failed to create thread %d for test_dram_byte\n", core);
- } else {
- total_count++;
- }
- }
+ /* FIXME(dhendrix): We shouldn't hit this. */
+ die("bdk_numa_exists() is non-zero\n");
}
#if 0
/* Wait for threads to finish */
while (bdk_atomic_get64(&test_dram_byte_threads_done) < total_count)
- bdk_thread_yield();
+ bdk_thread_yield();
#else
#define TIMEOUT_SECS 5 // FIXME: long enough so a pass for a given setting will not print
/* Wait for threads to finish, with progress */
@@ -912,7 +537,7 @@ run_dram_tuning_threads(bdk_node_t node, int num_lmcs, uint64_t bytemask)
uint64_t period = bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) * TIMEOUT_SECS; // FIXME?
uint64_t timeout = bdk_clock_get_count(BDK_CLOCK_TIME) + period;
do {
- bdk_thread_yield();
+// bdk_thread_yield(); /* FIXME(dhendrix): don't yield... */
cur_count = bdk_atomic_get64(&test_dram_byte_threads_done);
cur_time = bdk_clock_get_count(BDK_CLOCK_TIME);
if (cur_time >= timeout) {
@@ -927,136 +552,10 @@ run_dram_tuning_threads(bdk_node_t node, int num_lmcs, uint64_t bytemask)
}
/* These variables count the number of ECC errors. They should only be accessed atomically */
-extern int64_t __bdk_dram_ecc_single_bit_errors[];
+/* FIXME(dhendrix): redundant declaration in original BDK sources */
+//extern int64_t __bdk_dram_ecc_single_bit_errors[];
extern int64_t __bdk_dram_ecc_double_bit_errors[];
-#if 0
-// make the tuning test callable as a standalone
-int
-bdk_run_dram_tuning_test(int node)
-{
- int num_lmcs = __bdk_dram_get_num_lmc(node);
- const char *s;
- int lmc, byte;
- int errors;
- uint64_t start_dram_dclk[4], start_dram_ops[4];
- int save_use_bursts;
-
- // check for the cores on this node, abort if not more than 1 // FIXME?
- dram_tune_max_cores = bdk_get_num_running_cores(node);
- if (dram_tune_max_cores < 2) {
- //bdk_init_cores(node, 0);
- printf("N%d: ERROR: not enough cores to run the DRAM tuning test.\n", node);
- return 0;
- }
-
- // but use only a certain number of cores, at most what is available
- if ((s = getenv("ddr_tune_use_cores")) != NULL) {
- dram_tune_use_cores = strtoul(s, NULL, 0);
- if (dram_tune_use_cores <= 0) // allow 0 or negative to mean all
- dram_tune_use_cores = dram_tune_max_cores;
- }
- if (dram_tune_use_cores > dram_tune_max_cores)
- dram_tune_use_cores = dram_tune_max_cores;
-
- // save the original bursts, so we can replace it with a better number for just testing
- save_use_bursts = dram_tune_use_bursts;
- dram_tune_use_bursts = 1500; // FIXME: hard code bursts for the test here...
-
- // allow override of the test repeats (bursts) per thread create
- if ((s = getenv("ddr_tune_use_bursts")) != NULL) {
- dram_tune_use_bursts = strtoul(s, NULL, 10);
- }
-
- // allow override of the test mem_xor algorithm
- if ((s = getenv("ddr_tune_use_xor2")) != NULL) {
- dram_tune_use_xor2 = !!strtoul(s, NULL, 10);
- }
-
- // FIXME? consult LMC0 only
- BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(0));
- if (lmcx_config.s.rank_ena) { // replace the default offset when there is more than 1 rank...
- dram_tune_rank_offset = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena + (num_lmcs/2));
- ddr_print("N%d: run_dram_tuning_test: changing rank offset to 0x%lx\n", node, dram_tune_rank_offset);
- }
- if (lmcx_config.s.init_status & 0x0c) { // bit 2 or 3 set indicates 2 DIMMs
- dram_tune_dimm_offset = 1ull << (28 + lmcx_config.s.pbank_lsb + (num_lmcs/2));
- ddr_print("N%d: run_dram_tuning_test: changing dimm offset to 0x%lx\n", node, dram_tune_dimm_offset);
- }
- int ddr_interface_64b = !lmcx_config.s.mode32b;
-
- // construct the bytemask
- int bytes_todo = (ddr_interface_64b) ? 0xff : 0x0f; // FIXME: hack?
- uint64_t bytemask = 0;
- for (byte = 0; byte < 8; ++byte) {
- uint64_t bitmask;
- if (bytes_todo & (1 << byte)) {
- bitmask = ((!ddr_interface_64b) && (byte == 4)) ? 0x0f: 0xff;
- bytemask |= bitmask << (8*byte); // set the bytes bits in the bytemask
- }
- } /* for (byte = 0; byte < 8; ++byte) */
-
- // print current working values
- ddr_print("N%d: run_dram_tuning_test: max %d cores, use %d cores, use %d bursts.\n",
- node, dram_tune_max_cores, dram_tune_use_cores, dram_tune_use_bursts);
-
- // do the setup on active LMCs
- for (lmc = 0; lmc < num_lmcs; lmc++) {
- // record start cycle CSRs here for utilization measure
- start_dram_dclk[lmc] = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(lmc));
- start_dram_ops[lmc] = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(lmc));
-#if 0
- bdk_atomic_set64(&__bdk_dram_ecc_single_bit_errors[lmc], 0);
- bdk_atomic_set64(&__bdk_dram_ecc_double_bit_errors[lmc], 0);
-#else
- __bdk_dram_ecc_single_bit_errors[lmc] = 0;
- __bdk_dram_ecc_double_bit_errors[lmc] = 0;
-#endif
- } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
-
- bdk_watchdog_poke();
-
- // run the test(s)
- // only 1 call should be enough, let the bursts, etc, control the load...
- errors = run_dram_tuning_threads(node, num_lmcs, bytemask);
-
- /* Check ECC error counters after the test */
- int64_t ecc_single = 0;
- int64_t ecc_double = 0;
- int64_t ecc_single_errs[4];
- int64_t ecc_double_errs[4];
-
- // finally, print the utilizations all together, and sum the ECC errors
- for (lmc = 0; lmc < num_lmcs; lmc++) {
- uint64_t dclk_diff = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(lmc)) - start_dram_dclk[lmc];
- uint64_t ops_diff = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(lmc)) - start_dram_ops[lmc];
- uint64_t percent_x10 = ops_diff * 1000 / dclk_diff;
- printf("N%d.LMC%d: ops %lu, cycles %lu, used %lu.%lu%%\n",
- node, lmc, ops_diff, dclk_diff, percent_x10 / 10, percent_x10 % 10);
-
- ecc_single += (ecc_single_errs[lmc] = bdk_atomic_get64(&__bdk_dram_ecc_single_bit_errors[lmc]));
- ecc_double += (ecc_double_errs[lmc] = bdk_atomic_get64(&__bdk_dram_ecc_double_bit_errors[lmc]));
- } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
-
- /* Always print any ECC errors */
- if (ecc_single || ecc_double) {
- printf("Test \"%s\": ECC errors, %ld/%ld/%ld/%ld corrected, %ld/%ld/%ld/%ld uncorrected\n",
- "DRAM Tuning Test",
- ecc_single_errs[0], ecc_single_errs[1], ecc_single_errs[2], ecc_single_errs[3],
- ecc_double_errs[0], ecc_double_errs[1], ecc_double_errs[2], ecc_double_errs[3]);
- }
- if (errors || ecc_double || ecc_single) {
- printf("Test \"%s\": FAIL: %ld single, %ld double, %d compare errors\n",
- "DRAM Tuning Test", ecc_single, ecc_double, errors);
- }
-
- // restore bursts
- dram_tune_use_bursts = save_use_bursts;
-
- return (errors + ecc_double + ecc_single);
-}
-#endif /* 0 */
-
#define DEFAULT_SAMPLE_GRAN 3 // sample for errors every N offset values
#define MIN_BYTE_OFFSET -63
#define MAX_BYTE_OFFSET +63
@@ -1064,8 +563,8 @@ int dram_tune_use_gran = DEFAULT_SAMPLE_GRAN;
static int
auto_set_dll_offset(bdk_node_t node, int dll_offset_mode,
- int num_lmcs, int ddr_interface_64b,
- int do_tune)
+ int num_lmcs, int ddr_interface_64b,
+ int do_tune)
{
int byte_offset;
//unsigned short result[9];
@@ -1081,17 +580,17 @@ auto_set_dll_offset(bdk_node_t node, int dll_offset_mode,
uint64_t start_dram_ops[4], stop_dram_ops[4];
int errors, tot_errors;
int lmc;
- char *mode_str = (dll_offset_mode == 2) ? "Read" : "Write";
+ const char *mode_str = (dll_offset_mode == 2) ? "Read" : "Write"; /* FIXME(dhendrix): const */
int mode_is_read = (dll_offset_mode == 2);
- char *mode_blk = (dll_offset_mode == 2) ? " " : "";
+ const char *mode_blk = (dll_offset_mode == 2) ? " " : ""; /* FIXME(dhendrix): const */
int start_offset, end_offset, incr_offset;
int speed_bin = get_speed_bin(node, 0); // FIXME: just get from LMC0?
int low_risk_count = 0, needs_review_count = 0;
if (dram_tune_use_gran != DEFAULT_SAMPLE_GRAN) {
- ddr_print2("N%d: Changing sample granularity from %d to %d\n",
- node, DEFAULT_SAMPLE_GRAN, dram_tune_use_gran);
+ ddr_print2("N%d: Changing sample granularity from %d to %d\n",
+ node, DEFAULT_SAMPLE_GRAN, dram_tune_use_gran);
}
// ensure sample is taken at 0
start_offset = MIN_BYTE_OFFSET - (MIN_BYTE_OFFSET % dram_tune_use_gran);
@@ -1109,12 +608,14 @@ auto_set_dll_offset(bdk_node_t node, int dll_offset_mode,
// FIXME? consult LMC0 only
BDK_CSR_INIT(lmcx_config, node, BDK_LMCX_CONFIG(0));
if (lmcx_config.s.rank_ena) { // replace the default offset when there is more than 1 rank...
- dram_tune_rank_offset = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena + (num_lmcs/2));
- ddr_print2("N%d: Tuning multiple ranks per DIMM (rank offset 0x%lx).\n", node, dram_tune_rank_offset);
+ dram_tune_rank_offset = 1ull << (28 + lmcx_config.s.pbank_lsb - lmcx_config.s.rank_ena + (num_lmcs/2));
+ /* FIXME(dhendrix): %lx --> %llx */
+ ddr_print2("N%d: Tuning multiple ranks per DIMM (rank offset 0x%llx).\n", node, dram_tune_rank_offset);
}
if (lmcx_config.s.init_status & 0x0c) { // bit 2 or 3 set indicates 2 DIMMs
- dram_tune_dimm_offset = 1ull << (28 + lmcx_config.s.pbank_lsb + (num_lmcs/2));
- ddr_print2("N%d: Tuning multiple DIMMs per channel (DIMM offset 0x%lx)\n", node, dram_tune_dimm_offset);
+ dram_tune_dimm_offset = 1ull << (28 + lmcx_config.s.pbank_lsb + (num_lmcs/2));
+ /* FIXME(dhendrix): %lx --> %llx */
+ ddr_print2("N%d: Tuning multiple DIMMs per channel (DIMM offset 0x%llx)\n", node, dram_tune_dimm_offset);
}
// FIXME? do this for LMC0 only
@@ -1125,76 +626,76 @@ auto_set_dll_offset(bdk_node_t node, int dll_offset_mode,
int bytes_todo = (ddr_interface_64b) ? 0xff : 0x0f;
uint64_t bytemask = 0;
for (byte = 0; byte < 8; ++byte) {
- if (bytes_todo & (1 << byte)) {
- bytemask |= 0xfful << (8*byte); // set the bytes bits in the bytemask
- }
+ if (bytes_todo & (1 << byte)) {
+ bytemask |= 0xfful << (8*byte); // set the bytes bits in the bytemask
+ }
} /* for (byte = 0; byte < 8; ++byte) */
// now loop through selected legal values for the DLL byte offset...
for (byte_offset = start_offset; byte_offset <= end_offset; byte_offset += incr_offset) {
- // do the setup on active LMCs
- for (lmc = 0; lmc < num_lmcs; lmc++) {
- change_dll_offset_enable(node, lmc, 0);
-
- // set all byte lanes at once
- load_dll_offset(node, lmc, dll_offset_mode, byte_offset, 10 /* All bytes at once */);
- // but then clear the ECC byte lane so it should be neutral for the test...
- load_dll_offset(node, lmc, dll_offset_mode, 0, 8);
-
- change_dll_offset_enable(node, lmc, 1);
-
- // record start cycle CSRs here for utilization measure
- start_dram_dclk[lmc] = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(lmc));
- start_dram_ops[lmc] = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(lmc));
- } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
-
- bdk_watchdog_poke();
-
- // run the test(s)
- // only 1 call should be enough, let the bursts, etc, control the load...
- tot_errors = run_dram_tuning_threads(node, num_lmcs, bytemask);
-
- for (lmc = 0; lmc < num_lmcs; lmc++) {
- // record stop cycle CSRs here for utilization measure
- stop_dram_dclk[lmc] = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(lmc));
- stop_dram_ops[lmc] = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(lmc));
-
- // accumulate...
- ops_sum[lmc] += stop_dram_ops[lmc] - start_dram_ops[lmc];
- dclk_sum[lmc] += stop_dram_dclk[lmc] - start_dram_dclk[lmc];
-
- errors = test_dram_byte_lmc_errs[lmc];
-
- // check errors by byte, but not ECC
- for (byte = 0; byte < 8; ++byte) {
- if (!(bytes_todo & (1 << byte))) // is this byte lane to be done
- continue; // no
-
- byte_delay_windows[lmc][byte] <<= 1; // always put in a zero
- if (errors & (1 << byte)) { // yes, an error in this byte lane
- byte_delay_count[lmc][byte] = 0; // stop now always
- } else { // no error in this byte lane
- if (byte_delay_count[lmc][byte] == 0) { // first success, set run start
- byte_delay_start[lmc][byte] = byte_offset;
- }
- byte_delay_count[lmc][byte] += incr_offset; // bump run length
-
- if (byte_delay_count[lmc][byte] > byte_delay_best_count[lmc][byte]) {
- byte_delay_best_count[lmc][byte] = byte_delay_count[lmc][byte];
- byte_delay_best_start[lmc][byte] = byte_delay_start[lmc][byte];
- }
- byte_delay_windows[lmc][byte] |= 1ULL; // for pass, put in a 1
- }
- } /* for (byte = 0; byte < 8; ++byte) */
-
- // only print when there are errors and verbose...
- if (errors) {
- debug_print("DLL %s Offset Test %3d: errors 0x%x\n",
- mode_str, byte_offset, errors);
- }
- } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+ // do the setup on active LMCs
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+ change_dll_offset_enable(node, lmc, 0);
+
+ // set all byte lanes at once
+ load_dll_offset(node, lmc, dll_offset_mode, byte_offset, 10 /* All bytes at once */);
+ // but then clear the ECC byte lane so it should be neutral for the test...
+ load_dll_offset(node, lmc, dll_offset_mode, 0, 8);
+
+ change_dll_offset_enable(node, lmc, 1);
+
+ // record start cycle CSRs here for utilization measure
+ start_dram_dclk[lmc] = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(lmc));
+ start_dram_ops[lmc] = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(lmc));
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
+
+ bdk_watchdog_poke();
+
+ // run the test(s)
+ // only 1 call should be enough, let the bursts, etc, control the load...
+ tot_errors = run_dram_tuning_threads(node, num_lmcs, bytemask);
+
+ for (lmc = 0; lmc < num_lmcs; lmc++) {
+ // record stop cycle CSRs here for utilization measure
+ stop_dram_dclk[lmc] = BDK_CSR_READ(node, BDK_LMCX_DCLK_CNT(lmc));
+ stop_dram_ops[lmc] = BDK_CSR_READ(node, BDK_LMCX_OPS_CNT(lmc));
+
+ // accumulate...
+ ops_sum[lmc] += stop_dram_ops[lmc] - start_dram_ops[lmc];
+ dclk_sum[lmc] += stop_dram_dclk[lmc] - start_dram_dclk[lmc];
+
+ errors = test_dram_byte_lmc_errs[lmc];
+
+ // check errors by byte, but not ECC
+ for (byte = 0; byte < 8; ++byte) {
+ if (!(bytes_todo & (1 << byte))) // is this byte lane to be done
+ continue; // no
+
+ byte_delay_windows[lmc][byte] <<= 1; // always put in a zero
+ if (errors & (1 << byte)) { // yes, an error in this byte lane
+ byte_delay_count[lmc][byte] = 0; // stop now always
+ } else { // no error in this byte lane
+ if (byte_delay_count[lmc][byte] == 0) { // first success, set run start
+ byte_delay_start[lmc][byte] = byte_offset;
+ }
+ byte_delay_count[lmc][byte] += incr_offset; // bump run length
+
+ if (byte_delay_count[lmc][byte] > byte_delay_best_count[lmc][byte]) {
+ byte_delay_best_count[lmc][byte] = byte_delay_count[lmc][byte];
+ byte_delay_best_start[lmc][byte] = byte_delay_start[lmc][byte];
+ }
+ byte_delay_windows[lmc][byte] |= 1ULL; // for pass, put in a 1
+ }
+ } /* for (byte = 0; byte < 8; ++byte) */
+
+ // only print when there are errors and verbose...
+ if (errors) {
+ debug_print("DLL %s Offset Test %3d: errors 0x%x\n",
+ mode_str, byte_offset, errors);
+ }
+ } /* for (lmc = 0; lmc < num_lmcs; lmc++) */
} /* for (byte_offset=-63; byte_offset<63; byte_offset += incr_offset) */
@@ -1202,153 +703,154 @@ auto_set_dll_offset(bdk_node_t node, int dll_offset_mode,
// only when margining...
if (!do_tune) {
- printf(" \n");
- printf("-------------------------------------\n");
+ printf(" \n");
+ printf("-------------------------------------\n");
#if 0
- uint32_t mts_speed = (libdram_get_freq_from_pll(node, 0) * 2) / 1000000; // FIXME: sample LMC0
- printf("N%d: Starting %s Timing Margining for %d MT/s.\n", node, mode_str, mts_speed);
+ uint32_t mts_speed = (libdram_get_freq_from_pll(node, 0) * 2) / 1000000; // FIXME: sample LMC0
+ printf("N%d: Starting %s Timing Margining for %d MT/s.\n", node, mode_str, mts_speed);
#else
- printf("N%d: Starting %s Timing Margining.\n", node, mode_str);
+ printf("N%d: Starting %s Timing Margining.\n", node, mode_str);
#endif
- printf(" \n");
+ printf(" \n");
} /* if (!do_tune) */
for (lmc = 0; lmc < num_lmcs; lmc++) {
#if 1
- // FIXME FIXME
- // FIXME: this just makes ECC always show 0
- byte_delay_best_start[lmc][8] = start_offset;
- byte_delay_best_count[lmc][8] = end_offset - start_offset + incr_offset;
+ // FIXME FIXME
+ // FIXME: this just makes ECC always show 0
+ byte_delay_best_start[lmc][8] = start_offset;
+ byte_delay_best_count[lmc][8] = end_offset - start_offset + incr_offset;
#endif
- // disable offsets while we load...
- change_dll_offset_enable(node, lmc, 0);
-
- // only when margining...
- if (!do_tune) {
- // print the heading
- printf(" \n");
- printf("N%d.LMC%d: %s Timing Margin %s : ", node, lmc, mode_str, mode_blk);
- printf(" ECC/8 ");
- for (byte = 7; byte >= 0; byte--) {
- printf(" Byte %d ", byte);
- }
- printf("\n");
- } /* if (!do_tune) */
-
- // print and load the offset values
- // print the windows bit arrays
- // only when margining...
- if (!do_tune) {
+ // disable offsets while we load...
+ change_dll_offset_enable(node, lmc, 0);
+
+ // only when margining...
+ if (!do_tune) {
+ // print the heading
+ printf(" \n");
+ printf("N%d.LMC%d: %s Timing Margin %s : ", node, lmc, mode_str, mode_blk);
+ printf(" ECC/8 ");
+ for (byte = 7; byte >= 0; byte--) {
+ printf(" Byte %d ", byte);
+ }
+ printf("\n");
+ } /* if (!do_tune) */
+
+ // print and load the offset values
+ // print the windows bit arrays
+ // only when margining...
+ if (!do_tune) {
printf("N%d.LMC%d: DLL %s Offset Amount %s : ", node, lmc, mode_str, mode_blk);
} else {
ddr_print("N%d.LMC%d: SW DLL %s Offset Amount %s : ", node, lmc, mode_str, mode_blk);
}
- for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
+ for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
- int count = byte_delay_best_count[lmc][byte];
- if (count == 0)
- count = incr_offset; // should make non-tested ECC byte come out 0
-
- byte_offset = byte_delay_best_start[lmc][byte] +
- ((count - incr_offset) / 2); // adj by incr
+ int count = byte_delay_best_count[lmc][byte];
+ if (count == 0)
+ count = incr_offset; // should make non-tested ECC byte come out 0
- if (!do_tune) { // do counting and special flag if margining
+ byte_offset = byte_delay_best_start[lmc][byte] +
+ ((count - incr_offset) / 2); // adj by incr
+
+ if (!do_tune) { // do counting and special flag if margining
int will_need_review = !is_low_risk_winlen(speed_bin, (count - incr_offset)) &&
- !is_low_risk_offset(speed_bin, byte_offset);
+ !is_low_risk_offset(speed_bin, byte_offset);
printf("%10d%c", byte_offset, (will_need_review) ? '<' :' ');
- if (will_need_review)
- needs_review_count++;
- else
- low_risk_count++;
- } else { // if just tuning, make the printout less lengthy
+ if (will_need_review)
+ needs_review_count++;
+ else
+ low_risk_count++;
+ } else { // if just tuning, make the printout less lengthy
ddr_print("%5d ", byte_offset);
}
- // FIXME? should we be able to override this?
- if (mode_is_read) // for READ offsets, always store what we found
- load_dll_offset(node, lmc, dll_offset_mode, byte_offset, byte);
- else // for WRITE offsets, always store 0
- load_dll_offset(node, lmc, dll_offset_mode, 0, byte);
+ // FIXME? should we be able to override this?
+ if (mode_is_read) // for READ offsets, always store what we found
+ load_dll_offset(node, lmc, dll_offset_mode, byte_offset, byte);
+ else // for WRITE offsets, always store 0
+ load_dll_offset(node, lmc, dll_offset_mode, 0, byte);
- }
- if (!do_tune) {
+ }
+ if (!do_tune) {
printf("\n");
} else {
ddr_print("\n");
}
- // re-enable the offsets now that we are done loading
- change_dll_offset_enable(node, lmc, 1);
-
- // only when margining...
- if (!do_tune) {
- // print the window sizes
- printf("N%d.LMC%d: DLL %s Window Length %s : ", node, lmc, mode_str, mode_blk);
- for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
- int count = byte_delay_best_count[lmc][byte];
- if (count == 0)
- count = incr_offset; // should make non-tested ECC byte come out 0
-
- // do this again since the "needs review" test is an AND...
- byte_offset = byte_delay_best_start[lmc][byte] +
- ((count - incr_offset) / 2); // adj by incr
-
- int will_need_review = !is_low_risk_winlen(speed_bin, (count - incr_offset)) &&
- !is_low_risk_offset(speed_bin, byte_offset);
-
- printf("%10d%c", count - incr_offset, (will_need_review) ? '<' :' ');
- }
- printf("\n");
-
- // print the window extents
- printf("N%d.LMC%d: DLL %s Window Bounds %s : ", node, lmc, mode_str, mode_blk);
- for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
- int start = byte_delay_best_start[lmc][byte];
- int count = byte_delay_best_count[lmc][byte];
- if (count == 0)
- count = incr_offset; // should make non-tested ECC byte come out 0
- printf(" %3d to%3d ", start,
- start + count - incr_offset);
- }
- printf("\n");
+ // re-enable the offsets now that we are done loading
+ change_dll_offset_enable(node, lmc, 1);
+
+ // only when margining...
+ if (!do_tune) {
+ // print the window sizes
+ printf("N%d.LMC%d: DLL %s Window Length %s : ", node, lmc, mode_str, mode_blk);
+ for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
+ int count = byte_delay_best_count[lmc][byte];
+ if (count == 0)
+ count = incr_offset; // should make non-tested ECC byte come out 0
+
+ // do this again since the "needs review" test is an AND...
+ byte_offset = byte_delay_best_start[lmc][byte] +
+ ((count - incr_offset) / 2); // adj by incr
+
+ int will_need_review = !is_low_risk_winlen(speed_bin, (count - incr_offset)) &&
+ !is_low_risk_offset(speed_bin, byte_offset);
+
+ printf("%10d%c", count - incr_offset, (will_need_review) ? '<' :' ');
+ }
+ printf("\n");
+
+ // print the window extents
+ printf("N%d.LMC%d: DLL %s Window Bounds %s : ", node, lmc, mode_str, mode_blk);
+ for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
+ int start = byte_delay_best_start[lmc][byte];
+ int count = byte_delay_best_count[lmc][byte];
+ if (count == 0)
+ count = incr_offset; // should make non-tested ECC byte come out 0
+ printf(" %3d to%3d ", start,
+ start + count - incr_offset);
+ }
+ printf("\n");
#if 0
- // FIXME: should have a way to force these out...
- // print the windows bit arrays
- printf("N%d.LMC%d: DLL %s Window Bitmap%s : ", node, lmc, mode_str, mode_blk);
- for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
- printf("%010lx ", byte_delay_windows[lmc][byte]);
- }
- printf("\n");
+ // FIXME: should have a way to force these out...
+ // print the windows bit arrays
+ printf("N%d.LMC%d: DLL %s Window Bitmap%s : ", node, lmc, mode_str, mode_blk);
+ for (byte = 8; byte >= 0; --byte) { // print in "normal" reverse index order
+ printf("%010lx ", byte_delay_windows[lmc][byte]);
+ }
+ printf("\n");
#endif
- } /* if (!do_tune) */
+ } /* if (!do_tune) */
} /* for (lmc = 0; lmc < num_lmcs; lmc++) */
// only when margining...
if (!do_tune) {
- // print the Summary line(s) here
- printf(" \n");
- printf("N%d: %s Timing Margining Summary : %s ", node, mode_str,
- (needs_review_count > 0) ? "Needs Review" : "Low Risk");
- if (needs_review_count > 0)
- printf("(%d)", needs_review_count);
- printf("\n");
-
- // FIXME??? want to print here: "N0: %s Offsets have been applied already"
-
- printf("-------------------------------------\n");
- printf(" \n");
+ // print the Summary line(s) here
+ printf(" \n");
+ printf("N%d: %s Timing Margining Summary : %s ", node, mode_str,
+ (needs_review_count > 0) ? "Needs Review" : "Low Risk");
+ if (needs_review_count > 0)
+ printf("(%d)", needs_review_count);
+ printf("\n");
+
+ // FIXME??? want to print here: "N0: %s Offsets have been applied already"
+
+ printf("-------------------------------------\n");
+ printf(" \n");
} /* if (!do_tune) */
// FIXME: we probably want this only when doing verbose...
// finally, print the utilizations all together
for (lmc = 0; lmc < num_lmcs; lmc++) {
- uint64_t percent_x10 = ops_sum[lmc] * 1000 / dclk_sum[lmc];
- ddr_print2("N%d.LMC%d: ops %lu, cycles %lu, used %lu.%lu%%\n",
- node, lmc, ops_sum[lmc], dclk_sum[lmc], percent_x10 / 10, percent_x10 % 10);
+ uint64_t percent_x10 = ops_sum[lmc] * 1000 / dclk_sum[lmc];
+ /* FIXME(dhendrix): %lu --> %llu */
+ ddr_print2("N%d.LMC%d: ops %llu, cycles %llu, used %llu.%llu%%\n",
+ node, lmc, ops_sum[lmc], dclk_sum[lmc], percent_x10 / 10, percent_x10 % 10);
} /* for (lmc = 0; lmc < num_lmcs; lmc++) */
// FIXME: only when verbose, or only when there are errors?
@@ -1359,7 +861,7 @@ auto_set_dll_offset(bdk_node_t node, int dll_offset_mode,
tot_errors = run_dram_tuning_threads(node, num_lmcs, bytemask);
debug_print("N%d: %s: Finished running test one last time\n", node, __FUNCTION__);
if (tot_errors)
- ddr_print2("%s Timing Final Test: errors 0x%x\n", mode_str, tot_errors);
+ ddr_print2("%s Timing Final Test: errors 0x%x\n", mode_str, tot_errors);
return (do_tune) ? tot_errors : !!(needs_review_count > 0);
}
@@ -1389,28 +891,30 @@ int perform_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int do_tune)
// enable any non-running cores on this node
orig_coremask = bdk_get_running_coremask(node);
- ddr_print4("N%d: %s: Starting cores (mask was 0x%lx)\n",
- node, __FUNCTION__, orig_coremask);
- bdk_init_cores(node, ~0ULL & ~orig_coremask);
+ /* FIXME(dhendrix): %lx --> %llx */
+ ddr_print4("N%d: %s: Starting cores (mask was 0x%llx)\n",
+ node, __FUNCTION__, orig_coremask);
+ /* FIXME(dhendrix): don't call bdk_init_cores(). */
+// bdk_init_cores(node, ~0ULL & ~orig_coremask);
dram_tune_max_cores = bdk_get_num_running_cores(node);
// but use only a certain number of cores, at most what is available
if ((s = getenv("ddr_tune_use_cores")) != NULL) {
- dram_tune_use_cores = strtoul(s, NULL, 0);
- if (dram_tune_use_cores <= 0) // allow 0 or negative to mean all
- dram_tune_use_cores = dram_tune_max_cores;
+ dram_tune_use_cores = strtoul(s, NULL, 0);
+ if (dram_tune_use_cores <= 0) // allow 0 or negative to mean all
+ dram_tune_use_cores = dram_tune_max_cores;
}
if (dram_tune_use_cores > dram_tune_max_cores)
- dram_tune_use_cores = dram_tune_max_cores;
+ dram_tune_use_cores = dram_tune_max_cores;
// see if we want to do the tuning more than once per LMC...
if ((s = getenv("ddr_tune_use_loops"))) {
- loops = strtoul(s, NULL, 0);
+ loops = strtoul(s, NULL, 0);
}
// see if we want to change the granularity of the byte_offset sampling
if ((s = getenv("ddr_tune_use_gran"))) {
- dram_tune_use_gran = strtoul(s, NULL, 0);
+ dram_tune_use_gran = strtoul(s, NULL, 0);
}
// allow override of the test repeats (bursts) per thread create
@@ -1422,9 +926,9 @@ int perform_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int do_tune)
// allow override of Read ODT setting just during the tuning run(s)
if ((s = getenv("ddr_tune_use_rodt")) != NULL) {
int temp = strtoul(s, NULL, 10);
- // validity check
- if (temp >= 0 && temp <= 7)
- dram_tune_use_rodt = temp;
+ // validity check
+ if (temp >= 0 && temp <= 7)
+ dram_tune_use_rodt = temp;
}
#endif
@@ -1432,13 +936,13 @@ int perform_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int do_tune)
// allow override of the test pattern
// FIXME: a bit simplistic...
if ((s = getenv("ddr_tune_use_pattern")) != NULL) {
- int patno = strtoul(s, NULL, 10);
- if (patno == 2)
- dram_tune_test_pattern = test_pattern_2;
- else if (patno == 3)
- dram_tune_test_pattern = test_pattern_3;
- else // all other values use default
- dram_tune_test_pattern = test_pattern_1;
+ int patno = strtoul(s, NULL, 10);
+ if (patno == 2)
+ dram_tune_test_pattern = test_pattern_2;
+ else if (patno == 3)
+ dram_tune_test_pattern = test_pattern_3;
+ else // all other values use default
+ dram_tune_test_pattern = test_pattern_1;
}
#endif
@@ -1449,24 +953,24 @@ int perform_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int do_tune)
// print current working values
ddr_print2("N%d: Tuning will use %d cores of max %d cores, and use %d repeats.\n",
- node, dram_tune_use_cores, dram_tune_max_cores,
- dram_tune_use_bursts);
+ node, dram_tune_use_cores, dram_tune_max_cores,
+ dram_tune_use_bursts);
#if USE_L2_WAYS_LIMIT
// see if L2 ways are limited
if ((s = lookup_env_parameter("limit_l2_ways")) != NULL) {
- ways = strtoul(s, NULL, 10);
- ways_print = 1;
+ ways = strtoul(s, NULL, 10);
+ ways_print = 1;
} else {
- ways = bdk_l2c_get_num_assoc(node);
+ ways = bdk_l2c_get_num_assoc(node);
}
#endif
#if 0
// if RODT is to be overridden during tuning, note change
if (dram_tune_use_rodt >= 0) {
- ddr_print("N%d: using RODT %d for tuning.\n",
- node, dram_tune_use_rodt);
+ ddr_print("N%d: using RODT %d for tuning.\n",
+ node, dram_tune_use_rodt);
}
#endif
@@ -1479,21 +983,21 @@ int perform_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int do_tune)
for (lmc = 0; lmc < num_lmcs; lmc++) {
#if 0
- // if RODT change, save old and set new here...
- if (dram_tune_use_rodt >= 0) {
- comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
- save_rodt[lmc] = comp_ctl2.s.rodt_ctl;
- comp_ctl2.s.rodt_ctl = dram_tune_use_rodt;
- DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(lmc), comp_ctl2.u);
- BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
- }
+ // if RODT change, save old and set new here...
+ if (dram_tune_use_rodt >= 0) {
+ comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
+ save_rodt[lmc] = comp_ctl2.s.rodt_ctl;
+ comp_ctl2.s.rodt_ctl = dram_tune_use_rodt;
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(lmc), comp_ctl2.u);
+ BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
+ }
#endif
- /* Disable ECC for DRAM tests */
- lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
- save_ecc_ena[lmc] = lmc_config.s.ecc_ena;
- lmc_config.s.ecc_ena = 0;
- DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
- lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ /* Disable ECC for DRAM tests */
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ save_ecc_ena[lmc] = lmc_config.s.ecc_ena;
+ lmc_config.s.ecc_ena = 0;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
} /* for (lmc = 0; lmc < num_lmcs; lmc++) */
@@ -1505,8 +1009,8 @@ int perform_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int do_tune)
// testing is done on all LMCs simultaneously
// FIXME: for now, loop here to show what happens multiple times
for (loop = 0; loop < loops; loop++) {
- /* Perform DLL offset tuning */
- errs = auto_set_dll_offset(node, dll_offset_mode, num_lmcs, ddr_interface_64b, do_tune);
+ /* Perform DLL offset tuning */
+ errs = auto_set_dll_offset(node, dll_offset_mode, num_lmcs, ddr_interface_64b, do_tune);
}
#if USE_L2_WAYS_LIMIT
@@ -1518,21 +1022,21 @@ int perform_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int do_tune)
debug_print("N%d: %s: starting LMCs cleanup.\n", node, __FUNCTION__);
for (lmc = 0; lmc < num_lmcs; lmc++) {
- /* Restore ECC for DRAM tests */
- lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
- lmc_config.s.ecc_ena = save_ecc_ena[lmc];
- DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
- lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ /* Restore ECC for DRAM tests */
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ lmc_config.s.ecc_ena = save_ecc_ena[lmc];
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
#if 0
- // if RODT change, restore old here...
- if (dram_tune_use_rodt >= 0) {
- comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
- comp_ctl2.s.rodt_ctl = save_rodt[lmc];
- DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(lmc), comp_ctl2.u);
- BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
- }
+ // if RODT change, restore old here...
+ if (dram_tune_use_rodt >= 0) {
+ comp_ctl2.u = BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
+ comp_ctl2.s.rodt_ctl = save_rodt[lmc];
+ DRAM_CSR_WRITE(node, BDK_LMCX_COMP_CTL2(lmc), comp_ctl2.u);
+ BDK_CSR_READ(node, BDK_LMCX_COMP_CTL2(lmc));
+ }
#endif
- // finally, see if there are any read offset overrides after tuning
+ // finally, see if there are any read offset overrides after tuning
// FIXME: provide a way to do write offsets also??
if (dll_offset_mode == 2) {
for (int by = 0; by < 9; by++) {
@@ -1551,20 +1055,24 @@ int perform_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int do_tune)
#if 0
// if RODT was overridden during tuning, note restore
if (dram_tune_use_rodt >= 0) {
- ddr_print("N%d: restoring RODT %d after tuning.\n",
- node, save_rodt[0]); // FIXME? use LMC0
+ ddr_print("N%d: restoring RODT %d after tuning.\n",
+ node, save_rodt[0]); // FIXME? use LMC0
}
#endif
// put any cores on this node, that were not running at the start, back into reset
- uint64_t reset_coremask = bdk_get_running_coremask(node) & ~orig_coremask;
+ /* FIXME(dhendrix): don't reset cores... */
+// uint64_t reset_coremask = bdk_get_running_coremask(node) & ~orig_coremask;
+ uint64_t reset_coremask = 0;
if (reset_coremask) {
- ddr_print4("N%d: %s: Stopping cores 0x%lx\n", node, __FUNCTION__,
- reset_coremask);
- bdk_reset_cores(node, reset_coremask);
+ /* FIXME(dhendrix): %lx --> %llx */
+ ddr_print4("N%d: %s: Stopping cores 0x%llx\n", node, __FUNCTION__,
+ reset_coremask);
+ bdk_reset_cores(node, reset_coremask);
} else {
- ddr_print4("N%d: %s: leaving cores set to 0x%lx\n", node, __FUNCTION__,
- orig_coremask);
+ /* FIXME(dhendrix): %lx --> %llx */
+ ddr_print4("N%d: %s: leaving cores set to 0x%llx\n", node, __FUNCTION__,
+ orig_coremask);
}
return errs;
@@ -1656,7 +1164,8 @@ setup_lfsr_pattern(bdk_node_t node, int lmc, uint64_t data)
DRAM_CSR_WRITE(node, BDK_LMCX_CHAR_CTL(lmc), char_ctl.u);
}
-int
+/* FIXME(dhendrix): made static to avoid need for prototype */
+static int
choose_best_hw_patterns(bdk_node_t node, int lmc, int mode)
{
int new_mode = mode;
@@ -1705,7 +1214,7 @@ run_best_hw_patterns(bdk_node_t node, int lmc, uint64_t phys_addr,
if (mode == DBTRAIN_LFSR) {
setup_lfsr_pattern(node, lmc, 0);
errors = test_dram_byte_hw(node, lmc, phys_addr, mode, xor_data);
- VB_PRT(VBL_DEV2, "%s: LFSR at A:0x%012lx errors 0x%x\n",
+ VB_PRT(VBL_DEV2, "%s: LFSR at A:0x%012llx errors 0x%x\n",
__FUNCTION__, phys_addr, errors);
} else {
for (pattern = 0; pattern < NUM_BYTE_PATTERNS; pattern++) {
@@ -1714,7 +1223,7 @@ run_best_hw_patterns(bdk_node_t node, int lmc, uint64_t phys_addr,
errs = test_dram_byte_hw(node, lmc, phys_addr, mode, xor_data);
- VB_PRT(VBL_DEV2, "%s: PATTERN %d at A:0x%012lx errors 0x%x\n",
+ VB_PRT(VBL_DEV2, "%s: PATTERN %d at A:0x%012llx errors 0x%x\n",
__FUNCTION__, pattern, phys_addr, errs);
errors |= errs;
@@ -1738,7 +1247,7 @@ hw_assist_test_dll_offset(bdk_node_t node, int dll_offset_mode,
int pattern;
const uint64_t *pattern_p;
int byte;
- char *mode_str = (dll_offset_mode == 2) ? "Read" : "Write";
+ const char *mode_str = (dll_offset_mode == 2) ? "Read" : "Write";
int pat_best_offset[9];
uint64_t phys_addr;
int pat_beg, pat_end;
@@ -1769,7 +1278,7 @@ hw_assist_test_dll_offset(bdk_node_t node, int dll_offset_mode,
memset(new_best_offset, 0, sizeof(new_best_offset));
for (pattern = 0; pattern < NUM_BYTE_PATTERNS; pattern++) {
- memset(pat_best_offset, 0, sizeof(pat_best_offset));
+ memset(pat_best_offset, 0, sizeof(pat_best_offset));
if (mode == DBTRAIN_TEST) {
pattern_p = byte_patterns[pattern];
@@ -1778,47 +1287,47 @@ hw_assist_test_dll_offset(bdk_node_t node, int dll_offset_mode,
setup_lfsr_pattern(node, lmc, 0);
}
- // now loop through all legal values for the DLL byte offset...
+ // now loop through all legal values for the DLL byte offset...
#define BYTE_OFFSET_INCR 3 // FIXME: make this tunable?
- tot_errors = 0;
+ tot_errors = 0;
- memset(rank_delay_count, 0, sizeof(rank_delay_count));
- memset(rank_delay_start, 0, sizeof(rank_delay_start));
- memset(rank_delay_best_count, 0, sizeof(rank_delay_best_count));
- memset(rank_delay_best_start, 0, sizeof(rank_delay_best_start));
+ memset(rank_delay_count, 0, sizeof(rank_delay_count));
+ memset(rank_delay_start, 0, sizeof(rank_delay_start));
+ memset(rank_delay_best_count, 0, sizeof(rank_delay_best_count));
+ memset(rank_delay_best_start, 0, sizeof(rank_delay_best_start));
- for (byte_offset = -63; byte_offset < 64; byte_offset += BYTE_OFFSET_INCR) {
+ for (byte_offset = -63; byte_offset < 64; byte_offset += BYTE_OFFSET_INCR) {
- // do the setup on the active LMC
- // set the bytelanes DLL offsets
- change_dll_offset_enable(node, lmc, 0);
- load_dll_offset(node, lmc, dll_offset_mode, byte_offset, bytelane); // FIXME? bytelane?
- change_dll_offset_enable(node, lmc, 1);
+ // do the setup on the active LMC
+ // set the bytelanes DLL offsets
+ change_dll_offset_enable(node, lmc, 0);
+ load_dll_offset(node, lmc, dll_offset_mode, byte_offset, bytelane); // FIXME? bytelane?
+ change_dll_offset_enable(node, lmc, 1);
- bdk_watchdog_poke();
+ bdk_watchdog_poke();
- // run the test on each rank
- // only 1 call per rank should be enough, let the bursts, loops, etc, control the load...
-
- off_errors = 0; // errors for this byte_offset, all ranks
+ // run the test on each rank
+ // only 1 call per rank should be enough, let the bursts, loops, etc, control the load...
+
+ off_errors = 0; // errors for this byte_offset, all ranks
active_ranks = 0;
- for (rankx = 0; rankx < 4; rankx++) {
+ for (rankx = 0; rankx < 4; rankx++) {
if (!(rank_mask & (1 << rankx)))
continue;
- phys_addr = hw_rank_offset * active_ranks;
- // FIXME: now done by test_dram_byte_hw()
+ phys_addr = hw_rank_offset * active_ranks;
+ // FIXME: now done by test_dram_byte_hw()
//phys_addr |= (lmc << 7);
//phys_addr = bdk_numa_get_address(node, phys_addr); // map to node
active_ranks++;
// NOTE: return is a now a bitmask of the erroring bytelanes..
- errors[rankx] = test_dram_byte_hw(node, lmc, phys_addr, mode, NULL);
+ errors[rankx] = test_dram_byte_hw(node, lmc, phys_addr, mode, NULL);
for (byte = byte_lo; byte <= byte_hi; byte++) { // do bytelane(s)
@@ -1826,7 +1335,7 @@ hw_assist_test_dll_offset(bdk_node_t node, int dll_offset_mode,
if (errors[rankx] & (1 << byte)) { // yes, an error in the byte lane in this rank
off_errors |= (1 << byte);
- ddr_print5("N%d.LMC%d.R%d: Bytelane %d DLL %s Offset Test %3d: Address 0x%012lx errors 0x%x\n",
+ ddr_print5("N%d.LMC%d.R%d: Bytelane %d DLL %s Offset Test %3d: Address 0x%012llx errors 0x%x\n",
node, lmc, rankx, bytelane, mode_str,
byte_offset, phys_addr, errors[rankx]);
@@ -1854,13 +1363,13 @@ hw_assist_test_dll_offset(bdk_node_t node, int dll_offset_mode,
}
}
} /* for (byte = byte_lo; byte <= byte_hi; byte++) */
- } /* for (rankx = 0; rankx < 4; rankx++) */
+ } /* for (rankx = 0; rankx < 4; rankx++) */
- tot_errors |= off_errors;
+ tot_errors |= off_errors;
- } /* for (byte_offset = -63; byte_offset < 64; byte_offset += BYTE_OFFSET_INCR) */
+ } /* for (byte_offset = -63; byte_offset < 64; byte_offset += BYTE_OFFSET_INCR) */
- // now choose the best byte_offsets for this pattern according to the best windows of the tested ranks
+ // now choose the best byte_offsets for this pattern according to the best windows of the tested ranks
// calculate offset by constructing an average window from the rank windows
for (byte = byte_lo; byte <= byte_hi; byte++) {
@@ -1928,7 +1437,7 @@ hw_assist_test_dll_offset(bdk_node_t node, int dll_offset_mode,
// print whether there are errors or not, but only when verbose...
tot_errors = run_test_dram_byte_threads(node, num_lmcs, bytemask);
printf("N%d.LMC%d: Bytelane %d DLL %s Offset Final Test: errors 0x%x\n",
- node, lmc, bytelane, mode_str, tot_errors);
+ node, lmc, bytelane, mode_str, tot_errors);
#endif
}
@@ -1946,7 +1455,7 @@ int perform_HW_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int bytel
// see if we want to do the tuning more than once per LMC...
if ((s = getenv("ddr_tune_ecc_loops"))) {
- loops = strtoul(s, NULL, 0);
+ loops = strtoul(s, NULL, 0);
}
// allow override of the test repeats (bursts)
@@ -1956,8 +1465,8 @@ int perform_HW_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int bytel
// print current working values
ddr_print2("N%d: H/W Tuning for bytelane %d will use %d loops, %d bursts, and %d patterns.\n",
- node, bytelane, loops, dram_tune_byte_bursts,
- NUM_BYTE_PATTERNS);
+ node, bytelane, loops, dram_tune_byte_bursts,
+ NUM_BYTE_PATTERNS);
// FIXME? get flag from LMC0 only
lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(0));
@@ -1966,42 +1475,42 @@ int perform_HW_dll_offset_tuning(bdk_node_t node, int dll_offset_mode, int bytel
for (lmc = 0; lmc < num_lmcs; lmc++) {
- ddr_print4("N%d: H/W Tuning: starting LMC%d bytelane %d tune.\n", node, lmc, bytelane);
-
- /* Enable ECC for the HW tests */
- // NOTE: we do enable ECC, but the HW tests used will not generate "visible" errors
- lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
- save_ecc_ena[lmc] = lmc_config.s.ecc_ena;
- lmc_config.s.ecc_ena = 1;
- DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
- lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
-
- // testing is done on a single LMC at a time
- // FIXME: for now, loop here to show what happens multiple times
- for (loop = 0; loop < loops; loop++) {
- /* Perform DLL offset tuning */
- //auto_set_dll_offset(node, 1 /* 1=write */, lmc, bytelane);
- hw_assist_test_dll_offset(node, 2 /* 2=read */, lmc, bytelane);
- }
-
- // perform cleanup on active LMC
- ddr_print4("N%d: H/W Tuning: finishing LMC%d bytelane %d tune.\n", node, lmc, bytelane);
-
- /* Restore ECC for DRAM tests */
- lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
- lmc_config.s.ecc_ena = save_ecc_ena[lmc];
- DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
- lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
-
- // finally, see if there are any read offset overrides after tuning
- for (int by = 0; by < 9; by++) {
- if ((s = lookup_env_parameter("ddr%d_tune_byte%d", lmc, by)) != NULL) {
- int dllro = strtoul(s, NULL, 10);
- change_dll_offset_enable(node, lmc, 0);
- load_dll_offset(node, lmc, 2 /* 2=read */, dllro, by);
- change_dll_offset_enable(node, lmc, 1);
- }
- }
+ ddr_print4("N%d: H/W Tuning: starting LMC%d bytelane %d tune.\n", node, lmc, bytelane);
+
+ /* Enable ECC for the HW tests */
+ // NOTE: we do enable ECC, but the HW tests used will not generate "visible" errors
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ save_ecc_ena[lmc] = lmc_config.s.ecc_ena;
+ lmc_config.s.ecc_ena = 1;
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+
+ // testing is done on a single LMC at a time
+ // FIXME: for now, loop here to show what happens multiple times
+ for (loop = 0; loop < loops; loop++) {
+ /* Perform DLL offset tuning */
+ //auto_set_dll_offset(node, 1 /* 1=write */, lmc, bytelane);
+ hw_assist_test_dll_offset(node, 2 /* 2=read */, lmc, bytelane);
+ }
+
+ // perform cleanup on active LMC
+ ddr_print4("N%d: H/W Tuning: finishing LMC%d bytelane %d tune.\n", node, lmc, bytelane);
+
+ /* Restore ECC for DRAM tests */
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+ lmc_config.s.ecc_ena = save_ecc_ena[lmc];
+ DRAM_CSR_WRITE(node, BDK_LMCX_CONFIG(lmc), lmc_config.u);
+ lmc_config.u = BDK_CSR_READ(node, BDK_LMCX_CONFIG(lmc));
+
+ // finally, see if there are any read offset overrides after tuning
+ for (int by = 0; by < 9; by++) {
+ if ((s = lookup_env_parameter("ddr%d_tune_byte%d", lmc, by)) != NULL) {
+ int dllro = strtoul(s, NULL, 10);
+ change_dll_offset_enable(node, lmc, 0);
+ load_dll_offset(node, lmc, 2 /* 2=read */, dllro, by);
+ change_dll_offset_enable(node, lmc, 1);
+ }
+ }
} /* for (lmc = 0; lmc < num_lmcs; lmc++) */
diff --git a/src/vendorcode/cavium/bdk/libdram/dram-util.h b/src/vendorcode/cavium/bdk/libdram/dram-util.h
index f8ab6c1552..c9a96ba5ae 100644
--- a/src/vendorcode/cavium/bdk/libdram/dram-util.h
+++ b/src/vendorcode/cavium/bdk/libdram/dram-util.h
@@ -42,6 +42,8 @@
* are not meant for users's of the libdram API.
*/
+#if 0
+/* FIXME(dhendrix): min/max are defined in stdlib.h */
/**
* Standard min(a,b) macro
*/
@@ -56,6 +58,7 @@
#define max(X, Y) \
({ typeof (X) __x = (X); typeof(Y) __y = (Y); \
(__x > __y) ? __x : __y; })
+#endif
/**
* Absolute value of an integer
diff --git a/src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.c b/src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.c
index cdc799744f..747c0e8767 100644
--- a/src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.c
+++ b/src/vendorcode/cavium/bdk/libdram/lib_octeon_shared.c
@@ -43,6 +43,10 @@
#include "libbdk-arch/bdk-csrs-l2c.h"
#include "dram-internal.h"
+#include "dram-env.h"
+#include <libbdk-hal/bdk-rng.h>
+#include <lame_string.h>
+
/* Define DDR_DEBUG to debug the DDR interface. This also enables the
** output necessary for review by Cavium Inc., Inc. */
/* #define DDR_DEBUG */
@@ -166,8 +170,8 @@ static int init_octeon_dram_interface(bdk_node_t node,
}
}
- error_print("N%d.LMC%d Configuration Completed: %d MB\n",
- node, ddr_interface_num, mem_size_mbytes);
+ printf("N%d.LMC%d Configuration Completed: %d MB\n",
+ node, ddr_interface_num, mem_size_mbytes);
return mem_size_mbytes;
}
@@ -503,7 +507,7 @@ int test_dram_byte_hw(bdk_node_t node, int ddr_interface_num,
errors = 0;
bdk_dram_address_extract_info(p, &node_address, &lmc, &dimm, &prank, &lrank, &bank, &row, &col);
- VB_PRT(VBL_DEV2, "test_dram_byte_hw: START at A:0x%012lx, N%d L%d D%d R%d/%d B%1x Row:%05x Col:%05x\n",
+ VB_PRT(VBL_DEV2, "test_dram_byte_hw: START at A:0x%012llx, N%d L%d D%d R%d/%d B%1x Row:%05x Col:%05x\n",
p, node_address, lmc, dimm, prank, lrank, bank, row, col);
// only check once per call, and ignore if no match...
@@ -540,7 +544,7 @@ int test_dram_byte_hw(bdk_node_t node, int ddr_interface_num,
p1 = p + k;
bdk_dram_address_extract_info(p1, &node_address, &lmc, &dimm, &prank, &lrank, &bank, &row, &col);
- VB_PRT(VBL_DEV3, "test_dram_byte_hw: NEXT interation at A:0x%012lx, N%d L%d D%d R%d/%d B%1x Row:%05x Col:%05x\n",
+ VB_PRT(VBL_DEV3, "test_dram_byte_hw: NEXT interation at A:0x%012llx, N%d L%d D%d R%d/%d B%1x Row:%05x Col:%05x\n",
p1, node_address, lmc, dimm, prank, lrank, bank, row, col);
/*
@@ -1013,7 +1017,7 @@ int initialize_ddr_clock(bdk_node_t node,
// always write LMC0 CSR, it must be active
DRAM_CSR_WRITE(node, BDK_LMCX_DDR_PLL_CTL(0), ddr_pll_ctl.u);
- ddr_print("%-45s : 0x%016lx\n", "LMC0: DDR_PLL_CTL", ddr_pll_ctl.u);
+ ddr_print("%-45s : 0x%016llx\n", "LMC0: DDR_PLL_CTL", ddr_pll_ctl.u);
// only when LMC1 is active
// NOTE: 81xx has only 1 LMC, and 83xx can operate in 1-LMC mode
@@ -1030,7 +1034,7 @@ int initialize_ddr_clock(bdk_node_t node,
// always write LMC1 CSR when it is active
DRAM_CSR_WRITE(node, BDK_LMCX_DDR_PLL_CTL(1), ddr_pll_ctl.u);
- ddr_print("%-45s : 0x%016lx\n", "LMC1: DDR_PLL_CTL", ddr_pll_ctl.u);
+ ddr_print("%-45s : 0x%016llx\n", "LMC1: DDR_PLL_CTL", ddr_pll_ctl.u);
}
/*
@@ -1107,7 +1111,7 @@ int initialize_ddr_clock(bdk_node_t node,
if (clkf > max_clkf) continue; /* PLL requires clkf to be limited */
if (_abs(error) > _abs(best_error)) continue;
- VB_PRT(VBL_TME, "clkr: %2lu, en[%d]: %2d, clkf: %4lu, pll_MHz: %4lu, ddr_hertz: %8lu, error: %8ld\n",
+ VB_PRT(VBL_TME, "clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld\n",
clkr, save_en_idx, _en[save_en_idx], clkf, pll_MHz, calculated_ddr_hertz, error);
/* Favor the highest PLL frequency. */
@@ -1143,7 +1147,7 @@ int initialize_ddr_clock(bdk_node_t node,
best_error = ddr_hertz - best_calculated_ddr_hertz;
}
- ddr_print("clkr: %2lu, en[%d]: %2d, clkf: %4lu, pll_MHz: %4lu, ddr_hertz: %8lu, error: %8ld <==\n",
+ ddr_print("clkr: %2llu, en[%d]: %2d, clkf: %4llu, pll_MHz: %4llu, ddr_hertz: %8llu, error: %8lld <==\n",
best_clkr, best_en_idx, _en[best_en_idx], best_clkf, best_pll_MHz,
best_calculated_ddr_hertz, best_error);
@@ -1177,7 +1181,7 @@ int initialize_ddr_clock(bdk_node_t node,
// make sure we preserve any settings already there
ddr_pll_ctl.u = BDK_CSR_READ(node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num));
- ddr_print("LMC%d: DDR_PLL_CTL : 0x%016lx\n",
+ ddr_print("LMC%d: DDR_PLL_CTL : 0x%016llx\n",
loop_interface_num, ddr_pll_ctl.u);
ddr_pll_ctl.cn83xx.ddr_ps_en = best_en_idx;
@@ -1187,7 +1191,7 @@ int initialize_ddr_clock(bdk_node_t node,
ddr_pll_ctl.cn83xx.bwadj = new_bwadj;
DRAM_CSR_WRITE(node, BDK_LMCX_DDR_PLL_CTL(loop_interface_num), ddr_pll_ctl.u);
- ddr_print("LMC%d: DDR_PLL_CTL : 0x%016lx\n",
+ ddr_print("LMC%d: DDR_PLL_CTL : 0x%016llx\n",
loop_interface_num, ddr_pll_ctl.u);
}
}
@@ -1579,7 +1583,7 @@ int initialize_ddr_clock(bdk_node_t node,
lmc_phy_ctl.u = BDK_CSR_READ(node, BDK_LMCX_PHY_CTL(loop_interface_num));
lmc_phy_ctl.cn83xx.lv_mode = (~loop_interface_num) & 1; /* Odd LMCs = 0, Even LMCs = 1 */
- ddr_print("LMC%d: PHY_CTL : 0x%016lx\n",
+ ddr_print("LMC%d: PHY_CTL : 0x%016llx\n",
loop_interface_num, lmc_phy_ctl.u);
DRAM_CSR_WRITE(node, BDK_LMCX_PHY_CTL(loop_interface_num), lmc_phy_ctl.u);
}
@@ -1860,7 +1864,7 @@ restart_training:
// NOTE: return is a bitmask of the erroring bytelanes - we only print it
errors = test_dram_byte_hw(node, lmc, phys_addr, DBTRAIN_DBI, NULL);
- ddr_print("N%d.LMC%d: DBI switchover: TEST: rank %d, phys_addr 0x%lx, errors 0x%x.\n",
+ ddr_print("N%d.LMC%d: DBI switchover: TEST: rank %d, phys_addr 0x%llx, errors 0x%x.\n",
node, lmc, rankx, phys_addr, errors);
// NEXT - check for locking
@@ -1895,7 +1899,7 @@ restart_training:
// end of DBI switchover
///////////////////////////////////////////////////////////
-uint32_t measure_octeon_ddr_clock(bdk_node_t node,
+static uint32_t measure_octeon_ddr_clock(bdk_node_t node,
const ddr_configuration_t *ddr_configuration,
uint32_t cpu_hertz,
uint32_t ddr_hertz,
@@ -1926,17 +1930,14 @@ uint32_t measure_octeon_ddr_clock(bdk_node_t node,
core_clocks = bdk_clock_get_count(BDK_CLOCK_TIME) - core_clocks;
calc_ddr_hertz = ddr_clocks * bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) / core_clocks;
- /* Asim doesn't have a DDR clock, force the measurement to be correct */
- if (bdk_is_platform(BDK_PLATFORM_ASIM))
- calc_ddr_hertz = ddr_hertz;
-
- ddr_print("LMC%d: Measured DDR clock: %lu, cpu clock: %u, ddr clocks: %lu\n",
+ ddr_print("LMC%d: Measured DDR clock: %llu, cpu clock: %u, ddr clocks: %llu\n",
ddr_interface_num, calc_ddr_hertz, cpu_hertz, ddr_clocks);
/* Check for unreasonable settings. */
if (calc_ddr_hertz == 0) {
error_print("DDR clock misconfigured. Exiting.\n");
- exit(1);
+ /* FIXME(dhendrix): We don't exit() in coreboot */
+// exit(1);
}
return calc_ddr_hertz;
}
diff --git a/src/vendorcode/cavium/bdk/libdram/libdram-config-load.c b/src/vendorcode/cavium/bdk/libdram/libdram-config-load.c
index 5173290187..76b5ddc57d 100644
--- a/src/vendorcode/cavium/bdk/libdram/libdram-config-load.c
+++ b/src/vendorcode/cavium/bdk/libdram/libdram-config-load.c
@@ -38,6 +38,14 @@
***********************license end**************************************/
#include <bdk.h>
+/* FIXME(dhendrix): added */
+#include <console/console.h> /* for die() */
+#include <string.h>
+#include <libbdk-arch/bdk-model.h>
+#include <libbdk-hal/bdk-config.h>
+#include <soc/twsi.h>
+#include <device/i2c_simple.h>
+
/**
* Load a "odt_*rank_config" structure
*
@@ -157,10 +165,12 @@ const dram_config_t *libdram_config_load(bdk_node_t node)
}
else
{
+#if 0
int spd_size;
const void *spd_data = bdk_config_get_blob(&spd_size, BDK_CONFIG_DDR_SPD_DATA, dimm, lmc, node);
if (spd_data && spd_size)
cfg->config[lmc].dimm_config_table[dimm].spd_ptr = spd_data;
+#endif
}
}
}
diff --git a/src/vendorcode/cavium/bdk/libdram/libdram.c b/src/vendorcode/cavium/bdk/libdram/libdram.c
index b19486694c..551ba24c42 100644
--- a/src/vendorcode/cavium/bdk/libdram/libdram.c
+++ b/src/vendorcode/cavium/bdk/libdram/libdram.c
@@ -36,9 +36,20 @@
* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+
#include <bdk.h>
-#include "libbdk-arch/bdk-csrs-mio_fus.h"
+#include <libbdk-arch/bdk-csrs-mio_fus.h>
+#include <libbdk-dram/bdk-dram-config.h>
+#include <libbdk-hal/bdk-config.h>
+#include <libbdk-hal/bdk-gpio.h>
+#include <libbdk-hal/bdk-l2c.h>
+#include <libbdk-hal/bdk-utils.h>
+#include <libbdk-os/bdk-init.h>
+#include <libdram/libdram-config.h>
#include "dram-internal.h"
+#include <stddef.h> /* for NULL */
+#include <lame_string.h> /* for strtol() and friends */
+
/* This global variable is accessed through dram_is_verbose() to determine
the verbosity level. Use that function instead of setting it directly */
@@ -55,35 +66,33 @@ dram_config_t __libdram_global_cfg;
static void bdk_dram_clear_mem(bdk_node_t node)
{
- if (!bdk_is_platform(BDK_PLATFORM_ASIM)) {
- uint64_t mbytes = bdk_dram_get_size_mbytes(node);
- uint64_t skip = (node == bdk_numa_master()) ? bdk_dram_get_top_of_bdk() : 0;
- uint64_t len = (mbytes << 20) - skip;
-
- BDK_TRACE(DRAM, "N%d: Clearing DRAM\n", node);
- if (skip)
- {
- /* All memory below skip may contain valid data, so we can't clear
- it. We still need to make sure all cache lines in this area are
- fully dirty so that ECC bits will be updated on store. A single
- write to the cache line isn't good enough because partial LMC
- writes may be enabled */
- ddr_print("N%d: Rewriting DRAM: start 0 length 0x%lx\n", node, skip);
- volatile uint64_t *ptr = bdk_phys_to_ptr(bdk_numa_get_address(node, 8));
- /* The above pointer got address 8 to avoid NULL pointer checking
- in bdk_phys_to_ptr(). Correct it here */
- ptr--;
- uint64_t *end = bdk_phys_to_ptr(bdk_numa_get_address(node, skip));
- while (ptr < end)
- {
- *ptr = *ptr;
- ptr++;
- }
- }
- ddr_print("N%d: Clearing DRAM: start 0x%lx length 0x%lx\n", node, skip, len);
- bdk_zero_memory(bdk_phys_to_ptr(bdk_numa_get_address(node, skip)), len);
- BDK_TRACE(DRAM, "N%d: DRAM clear complete\n", node);
- }
+ uint64_t mbytes = bdk_dram_get_size_mbytes(node);
+ uint64_t skip = (node == bdk_numa_master()) ? bdk_dram_get_top_of_bdk() : 0;
+ uint64_t len = (mbytes << 20) - skip;
+
+ BDK_TRACE(DRAM, "N%d: Clearing DRAM\n", node);
+ if (skip)
+ {
+ /* All memory below skip may contain valid data, so we can't clear
+ it. We still need to make sure all cache lines in this area are
+ fully dirty so that ECC bits will be updated on store. A single
+ write to the cache line isn't good enough because partial LMC
+ writes may be enabled */
+ ddr_print("N%d: Rewriting DRAM: start 0 length 0x%llx\n", node, skip);
+ volatile uint64_t *ptr = bdk_phys_to_ptr(bdk_numa_get_address(node, 8));
+ /* The above pointer got address 8 to avoid NULL pointer checking
+ in bdk_phys_to_ptr(). Correct it here */
+ ptr--;
+ uint64_t *end = bdk_phys_to_ptr(bdk_numa_get_address(node, skip));
+ while (ptr < end)
+ {
+ *ptr = *ptr;
+ ptr++;
+ }
+ }
+ ddr_print("N%d: Clearing DRAM: start 0x%llx length 0x%llx\n", node, skip, len);
+ bdk_zero_memory(bdk_phys_to_ptr(bdk_numa_get_address(node, skip)), len);
+ BDK_TRACE(DRAM, "N%d: DRAM clear complete\n", node);
}
static void bdk_dram_clear_ecc(bdk_node_t node)
@@ -110,7 +119,7 @@ static void bdk_dram_enable_ecc_reporting(bdk_node_t node)
if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
DRAM_CSR_WRITE(node, BDK_LMCX_INT_ENA_W1S(lmc), -1ULL);
BDK_CSR_INIT(lmc_int_ena_w1s, node, BDK_LMCX_INT_ENA_W1S(lmc));
- ddr_print("N%d.LMC%d: %-36s : 0x%08lx\n",
+ ddr_print("N%d.LMC%d: %-36s : 0x%08llx\n",
node, lmc, "LMC_INT_ENA_W1S", lmc_int_ena_w1s.u);
}
}
@@ -130,7 +139,7 @@ static void bdk_dram_disable_ecc_reporting(bdk_node_t node)
if (! CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X)) { // added 81xx and 83xx
DRAM_CSR_WRITE(node, BDK_LMCX_INT_ENA_W1C(lmc), -1ULL);
BDK_CSR_INIT(lmc_int_ena_w1c, node, BDK_LMCX_INT_ENA_W1C(lmc));
- ddr_print("N%d.LMC%d: %-36s : 0x%08lx\n",
+ ddr_print("N%d.LMC%d: %-36s : 0x%08llx\n",
node, lmc, "LMC_INT_ENA_W1C", lmc_int_ena_w1c.u);
}
}
@@ -171,14 +180,15 @@ static int bdk_libdram_tune_node(int node)
// Automatically tune the data byte DLL write offsets
// allow override of default setting
str = getenv("ddr_tune_write_offsets");
+ str = NULL;
if (str)
do_dllwo = !!strtoul(str, NULL, 0);
if (do_dllwo) {
- BDK_TRACE(DRAM, "N%d: Starting DLL Write Offset Tuning for LMCs\n", node);
- errs = perform_dll_offset_tuning(node, /* write */1, /* tune */1);
- BDK_TRACE(DRAM, "N%d: Finished DLL Write Offset Tuning for LMCs, %d errors)\n",
- node, errs);
- tot_errs += errs;
+ BDK_TRACE(DRAM, "N%d: Starting DLL Write Offset Tuning for LMCs\n", node);
+ errs = perform_dll_offset_tuning(node, /* write */1, /* tune */1);
+ BDK_TRACE(DRAM, "N%d: Finished DLL Write Offset Tuning for LMCs, %d errors)\n",
+ node, errs);
+ tot_errs += errs;
}
// disabled by default for now, does not seem to be needed much?
@@ -287,9 +297,6 @@ static int bdk_libdram_maybe_tune_node(int node)
*/
int libdram_config(int node, const dram_config_t *dram_config, int ddr_clock_override)
{
- if (bdk_is_platform(BDK_PLATFORM_ASIM))
- return bdk_dram_get_size_mbytes(node);
-
/* Boards may need to mux the TWSI connection between THUNDERX and the BMC.
This allows the BMC to monitor DIMM temeratures and health */
int gpio_select = bdk_config_get_int(BDK_CONFIG_DRAM_CONFIG_GPIO);
@@ -446,7 +453,7 @@ int libdram_tune(int node)
// the only way this entry point should be called is from a MENU item,
// so, enable any non-running cores on this node, and leave them
// running at the end...
- ddr_print("N%d: %s: Starting cores (mask was 0x%lx)\n",
+ ddr_print("N%d: %s: Starting cores (mask was 0x%llx)\n",
node, __FUNCTION__, bdk_get_running_coremask(node));
bdk_init_cores(node, ~0ULL);
@@ -600,7 +607,7 @@ int libdram_margin_read_timing(int node)
int libdram_margin(int node)
{
int ret_rt, ret_wt, ret_rv, ret_wv;
- char *risk[2] = { "Low Risk", "Needs Review" };
+ const char *risk[2] = { "Low Risk", "Needs Review" };
int l2c_is_locked = bdk_l2c_is_locked(node);
// for now, no margining on 81xx, until we can reduce the dynamic runtime size...
@@ -614,7 +621,7 @@ int libdram_margin(int node)
// the only way this entry point should be called is from a MENU item,
// so, enable any non-running cores on this node, and leave them
// running at the end...
- ddr_print("N%d: %s: Starting cores (mask was 0x%lx)\n",
+ ddr_print("N%d: %s: Starting cores (mask was 0x%llx)\n",
node, __FUNCTION__, bdk_get_running_coremask(node));
bdk_init_cores(node, ~0ULL);
@@ -712,7 +719,7 @@ uint32_t libdram_get_freq_from_pll(int node, int lmc)
#ifndef DRAM_CSR_WRITE_INLINE
void dram_csr_write(bdk_node_t node, const char *csr_name, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value)
{
- VB_PRT(VBL_CSRS, "N%d: DDR Config %s[%016lx] => %016lx\n", node, csr_name, address, value);
+ VB_PRT(VBL_CSRS, "N%d: DDR Config %s[%016llx] => %016llx\n", node, csr_name, address, value);
bdk_csr_write(node, type, busnum, size, address, value);
}
#endif
diff --git a/src/vendorcode/cavium/include/bdk/bdk-devicetree.h b/src/vendorcode/cavium/include/bdk/bdk-devicetree.h
new file mode 100644
index 0000000000..559e4b531f
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/bdk-devicetree.h
@@ -0,0 +1,20 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2017-present Facebook, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+struct bdk_devicetree_key_value {
+ const char *key;
+ const char *value;
+};
diff --git a/src/vendorcode/cavium/include/bdk/bdk-minimal.h b/src/vendorcode/cavium/include/bdk/bdk-minimal.h
new file mode 100644
index 0000000000..3ecf5a700a
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/bdk-minimal.h
@@ -0,0 +1,60 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ * Copyright 2017-present Facebook, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ *
+ * bdk-minimal.h: Subset of bdk.h used by coreboot
+ */
+
+#ifndef __SOC_CAVIUM_COMMON_BDK_MINIMAL_H__
+#define __SOC_CAVIUM_COMMON_BDK_MINIMAL_H__
+
+#include <console/console.h> /* for printk */
+#include <endian.h>
+#include <stddef.h> /* for NULL */
+#include <libbdk-hal/bdk-access.h>
+
+#define bdk_le16_to_cpu(x) le16_to_cpu(x)
+#define bdk_le32_to_cpu(x) le32_to_cpu(x)
+#define bdk_le64_to_cpu(x) le64_to_cpu(x)
+#define bdk_be16_to_cpu(x) be16_to_cpu(x)
+#define bdk_be32_to_cpu(x) be32_to_cpu(x)
+#define bdk_be64_to_cpu(x) be64_to_cpu(x)
+#define bdk_cpu_to_le16(x) cpu_to_le16(x)
+#define bdk_cpu_to_le32(x) cpu_to_le32(x)
+#define bdk_cpu_to_le64(x) cpu_to_le64(x)
+
+#define __BYTE_ORDER __BYTE_ORDER__
+/* Watch out for __BIG_ENDIAN. coreboot usually checks if it's defined at all
+ * but the Cavium BDK checks its value. */
+#define __BIG_ENDIAN 4321
+
+#define printf(format, ...) printk(BIOS_DEBUG, format, ##__VA_ARGS__)
+#define puts(str) printk(BIOS_INFO, str)
+#define fflush(x) /* output gets flushed automatically */
+
+/* careful, the ordering matters for some headers */
+#include <libbdk-arch/bdk-warn.h>
+#include <libbdk-arch/bdk-asm.h>
+#include <libbdk-arch/bdk-model.h>
+#include <libbdk-arch/bdk-numa.h>
+
+#include <libbdk-arch/bdk-require.h>
+#include <libbdk-arch/bdk-csr.h>
+
+#include <libbdk-os/bdk-thread.h>
+
+/* FIXME: experiment to see if including the universe here will solve some
+ * current build issues... */
+#include <libbdk-arch/bdk-arch.h>
+#include <libbdk-boot/bdk-boot.h>
+#include <libbdk-dram/bdk-dram.h>
+#include <libdram/libdram.h>
+
+static inline char *getenv(const char *name) { return NULL; }
+
+#endif /* !__SOC_CAVIUM_COMMON_BDK_MINIMAL_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/bdk.h b/src/vendorcode/cavium/include/bdk/bdk.h
index c3e0a518db..96b349ad11 100644
--- a/src/vendorcode/cavium/include/bdk/bdk.h
+++ b/src/vendorcode/cavium/include/bdk/bdk.h
@@ -1,80 +1,18 @@
-#ifndef __BDK_H__
-#define __BDK_H__
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2017-present Facebook, Inc.
*
- * Master include file for all BDK function.
+ * SPDX-License-Identifier: BSD-3-Clause
*
- * <hr>$Revision: 49448 $<hr>
+ * bdk.h: This is a stub for BDK compatibility. The real bdk.h is an uber-
+ * header that pulls in everything. For our purposes we'll create a minimal
+ * version that includes only the stuff we need.
*/
-#include <stdint.h>
-#include <stdbool.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <string.h>
-#include <stdio.h>
-#include "../libbdk-arch/bdk-arch.h"
-#include "../libbdk-os/bdk-os.h"
-#include "../libfatfs/ff.h"
-#include "../libfatfs/diskio.h"
-#ifndef BDK_BUILD_HOST
-#include "../libbdk-hal/bdk-hal.h"
-#include "../libbdk-boot/bdk-boot.h"
-#include "../libbdk-dram/bdk-dram.h"
-#include "../libbdk-driver/bdk-driver.h"
-#include "../libbdk-trust/bdk-trust.h"
-#include "../libdram/libdram.h"
-#include "bdk-functions.h"
-#endif
-#include "../libbdk-lua/bdk-lua.h"
-#include "../libbdk-bist/bist.h"
+#ifndef __SOC_CAVIUM_COMMON_BDK_H__
+#define __SOC_CAVIUM_COMMON_BDK_H__
-/**
- * @mainpage
- *
- * This document goes through the internal details of the BDK. Its purpose is
- * to serve as a API reference for people writing applications. Users of the
- * BDK's binary applications do not need these details.
- */
+#include "bdk-minimal.h"
-#endif
+#endif /* !__SOC_CAVIUM_COMMON_BDK_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/lame_string.h b/src/vendorcode/cavium/include/bdk/lame_string.h
new file mode 100644
index 0000000000..7ada9007b4
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/lame_string.h
@@ -0,0 +1,19 @@
+/*
+ * This file is part of the coreboot project.
+ *
+ * Copyright 2018-present Facebook, Inc.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#ifndef __LAME_STRING_H__
+#define __LAME_STRING_H__
+
+long int strtol(const char *nptr, char **endptr, int base);
+long long int strtoll(const char *nptr, char **endptr, int base);
+unsigned long int strtoul(const char *nptr, char **endptr, int base);
+unsigned long long int strtoull(const char *nptr, char **endptr, int base);
+int str_to_hex(const char *str, int64_t *val);
+int str_to_int(const char *str, int64_t *val);
+
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-arch.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-arch.h
index e2434a72d8..660f3e83d6 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-arch.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-arch.h
@@ -48,24 +48,24 @@
* <hr>$Revision: 49448 $<hr>
*/
+#include <arch/byteorder.h>
+
#ifndef __BYTE_ORDER
- #if !defined(__ORDER_BIG_ENDIAN__) || !defined(__ORDER_LITTLE_ENDIAN__) || !defined(__BYTE_ORDER__)
- #error Unable to determine Endian mode
- #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
- #define __BYTE_ORDER __ORDER_BIG_ENDIAN__
- #define BDK_LITTLE_ENDIAN_STRUCT __attribute__ ((scalar_storage_order("little-endian")))
- #elif __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
- #define __BYTE_ORDER __ORDER_LITTLE_ENDIAN__
- #define BDK_LITTLE_ENDIAN_STRUCT
- #else
- #error Unable to determine Endian mode
- #endif
- #define __BIG_ENDIAN __ORDER_BIG_ENDIAN__
- #define __LITTLE_ENDIAN __ORDER_LITTLE_ENDIAN__
+ #if (__LITTLE_ENDIAN)
+ #define __BYTE_ORDER __LITTLE_ENDIAN
+ #elif defined(__BIG_ENDIAN)
+ #define __BYTE_ORDER __BIG_ENDIAN
+ #endif
+#endif
+
+#ifndef __LITTLE_ENDIAN
+ #define __LITTLE_ENDIAN 1234
+#endif
+#ifndef __BIG_ENDIAN
+ #define __BIG_ENDIAN 4321
#endif
#include "bdk-require.h"
-#include "bdk-swap.h"
#ifndef BDK_BUILD_HOST
#include "bdk-asm.h"
#endif
@@ -76,10 +76,8 @@
#include "bdk-lmt.h"
#endif
#include "bdk-warn.h"
-#include "bdk-version.h"
#ifndef BDK_BUILD_HOST
#include "bdk-fuse.h"
-#include "bdk-platform.h"
#endif
#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csr.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csr.h
index 95805ec671..8aa860faa5 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csr.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csr.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_CSR_H__
+#define __CB_BDK_CSR_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -37,6 +39,9 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+//#include <libbdk-arch/bdk-csrs-rst.h> /* FIXME: circular dependency with this header */
+#include <libbdk-hal/bdk-clock.h> /* FIXME(dhendrix): added */
+
/**
* @file
*
@@ -106,7 +111,7 @@ extern int bdk_csr_write_by_name(bdk_node_t node, const char *name, uint64_t val
extern int __bdk_csr_lookup_index(const char *name, int params[]);
extern int bdk_csr_get_name(const char *last_name, char *buffer);
struct bdk_readline_tab;
-extern struct bdk_readline_tab *__bdk_csr_get_tab_complete() BDK_WEAK;
+extern struct bdk_readline_tab *__bdk_csr_get_tab_complete(void) BDK_WEAK;
extern uint64_t bdk_sysreg_read(int node, int core, uint64_t regnum);
extern void bdk_sysreg_write(int node, int core, uint64_t regnum, uint64_t value);
@@ -125,10 +130,11 @@ extern void bdk_sysreg_write(int node, int core, uint64_t regnum, uint64_t value
*
* @return The value of the CSR
*/
+/* FIXME(dhendrix): Moved __bdk_csr_read_slow out of the function body... */
+extern uint64_t __bdk_csr_read_slow(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address);
static inline uint64_t bdk_csr_read(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address) __attribute__ ((always_inline));
static inline uint64_t bdk_csr_read(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address)
{
- extern uint64_t __bdk_csr_read_slow(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address);
switch (type)
{
case BDK_CSR_TYPE_DAB:
@@ -174,10 +180,11 @@ static inline uint64_t bdk_csr_read(bdk_node_t node, bdk_csr_type_t type, int bu
* @param address The address of the CSR
* @param value Value to write to the CSR
*/
+/* FIXME(dhendrix): Moved __bdk_csr_write_slow out of the function body... */
+extern void __bdk_csr_write_slow(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value);
static inline void bdk_csr_write(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value) __attribute__ ((always_inline));
static inline void bdk_csr_write(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value)
{
- extern void __bdk_csr_write_slow(bdk_node_t node, bdk_csr_type_t type, int busnum, int size, uint64_t address, uint64_t value);
switch (type)
{
case BDK_CSR_TYPE_DAB:
@@ -264,6 +271,8 @@ static inline void bdk_csr_write(bdk_node_t node, bdk_csr_type_t type, int busnu
* 2) Check if ("type".s."field" "op" "value")
* 3) If #2 isn't true loop to #1 unless too much time has passed.
*/
+/* FIXME(dhendrix): removed bdk_thread_yield() */
+#if 0
#define BDK_CSR_WAIT_FOR_FIELD(node, csr, field, op, value, timeout_usec) \
({int result; \
do { \
@@ -285,6 +294,27 @@ static inline void bdk_csr_write(bdk_node_t node, bdk_csr_type_t type, int busnu
} \
} while (0); \
result;})
+#endif
+#define BDK_CSR_WAIT_FOR_FIELD(node, csr, field, op, value, timeout_usec) \
+ ({int result; \
+ do { \
+ uint64_t done = bdk_clock_get_count(BDK_CLOCK_TIME) + (uint64_t)timeout_usec * \
+ bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) / 1000000; \
+ typedef_##csr c; \
+ uint64_t _tmp_address = csr; \
+ while (1) \
+ { \
+ c.u = bdk_csr_read(node, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), _tmp_address); \
+ if ((c.s.field) op (value)) { \
+ result = 0; \
+ break; \
+ } else if (bdk_clock_get_count(BDK_CLOCK_TIME) > done) { \
+ result = -1; \
+ break; \
+ } \
+ } \
+ } while (0); \
+ result;})
/**
* This macro spins on a field waiting for it to reach a value. It
@@ -299,6 +329,8 @@ static inline void bdk_csr_write(bdk_node_t node, bdk_csr_type_t type, int busnu
* change bit locations, the compiler will not catch those changes
* with this macro. Changes silently do the wrong thing at runtime.
*/
+/* FIXME(dhendrix): removed bdk_thread_yield() */
+#if 0
#define BDK_CSR_WAIT_FOR_CHIP_FIELD(node, csr, chip, field, op, value, timeout_usec) \
({int result; \
do { \
@@ -320,5 +352,27 @@ static inline void bdk_csr_write(bdk_node_t node, bdk_csr_type_t type, int busnu
} \
} while (0); \
result;})
+#endif
+#define BDK_CSR_WAIT_FOR_CHIP_FIELD(node, csr, chip, field, op, value, timeout_usec) \
+ ({int result; \
+ do { \
+ uint64_t done = bdk_clock_get_count(BDK_CLOCK_TIME) + (uint64_t)timeout_usec * \
+ bdk_clock_get_rate(bdk_numa_local(), BDK_CLOCK_TIME) / 1000000; \
+ typedef_##csr c; \
+ uint64_t _tmp_address = csr; \
+ while (1) \
+ { \
+ c.u = bdk_csr_read(node, bustype_##csr, busnum_##csr, sizeof(typedef_##csr), _tmp_address); \
+ if ((c.chip.field) op (value)) { \
+ result = 0; \
+ break; \
+ } else if (bdk_clock_get_count(BDK_CLOCK_TIME) > done) { \
+ result = -1; \
+ break; \
+ } \
+ } \
+ } while (0); \
+ result;})
/** @} */
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-bgx.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-bgx.h
new file mode 100644
index 0000000000..5de214ac23
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-bgx.h
@@ -0,0 +1,17565 @@
+#ifndef __BDK_CSRS_BGX_H__
+#define __BDK_CSRS_BGX_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium BGX.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration bgx_bar_e
+ *
+ * BGX Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_BGX_BAR_E_BGXX_PF_BAR0(a) (0x87e0e0000000ll + 0x1000000ll * (a))
+#define BDK_BGX_BAR_E_BGXX_PF_BAR0_SIZE 0x400000ull
+#define BDK_BGX_BAR_E_BGXX_PF_BAR4(a) (0x87e0e0400000ll + 0x1000000ll * (a))
+#define BDK_BGX_BAR_E_BGXX_PF_BAR4_SIZE 0x400000ull
+
+/**
+ * Enumeration bgx_int_vec_e
+ *
+ * BGX MSI-X Vector Enumeration
+ * Enumeration the MSI-X interrupt vectors.
+ */
+#define BDK_BGX_INT_VEC_E_CMRX_INT(a) (0 + 7 * (a))
+#define BDK_BGX_INT_VEC_E_CMR_MEM_INT (0x1c)
+#define BDK_BGX_INT_VEC_E_GMPX_GMI_RX_INT(a) (5 + 7 * (a))
+#define BDK_BGX_INT_VEC_E_GMPX_GMI_TX_INT(a) (6 + 7 * (a))
+#define BDK_BGX_INT_VEC_E_GMPX_PCS_INT(a) (4 + 7 * (a))
+#define BDK_BGX_INT_VEC_E_SMUX_RX_INT(a) (2 + 7 * (a))
+#define BDK_BGX_INT_VEC_E_SMUX_TX_INT(a) (3 + 7 * (a))
+#define BDK_BGX_INT_VEC_E_SPUX_INT(a) (1 + 7 * (a))
+#define BDK_BGX_INT_VEC_E_SPU_MEM_INT (0x1d)
+
+/**
+ * Enumeration bgx_lmac_types_e
+ *
+ * BGX LMAC Type Enumeration
+ * Enumerates the LMAC Types that BGX supports.
+ */
+#define BDK_BGX_LMAC_TYPES_E_FORTYG_R (4)
+#define BDK_BGX_LMAC_TYPES_E_QSGMII (6)
+#define BDK_BGX_LMAC_TYPES_E_RGMII (5)
+#define BDK_BGX_LMAC_TYPES_E_RXAUI (2)
+#define BDK_BGX_LMAC_TYPES_E_SGMII (0)
+#define BDK_BGX_LMAC_TYPES_E_TENG_R (3)
+#define BDK_BGX_LMAC_TYPES_E_XAUI (1)
+
+/**
+ * Enumeration bgx_opcode_e
+ *
+ * INTERNAL: BGX Error Opcode Enumeration
+ *
+ * Enumerates the error opcodes created by BGX and presented to NCSI/TNS/NIC.
+ */
+#define BDK_BGX_OPCODE_E_RE_FCS (7)
+#define BDK_BGX_OPCODE_E_RE_FCS_RCV (8)
+#define BDK_BGX_OPCODE_E_RE_JABBER (2)
+#define BDK_BGX_OPCODE_E_RE_NONE (0)
+#define BDK_BGX_OPCODE_E_RE_PARTIAL (1)
+#define BDK_BGX_OPCODE_E_RE_RX_CTL (0xb)
+#define BDK_BGX_OPCODE_E_RE_SKIP (0xc)
+#define BDK_BGX_OPCODE_E_RE_TERMINATE (9)
+
+/**
+ * Enumeration bgx_spu_br_train_cst_e
+ *
+ * BGX Training Coefficient Status Enumeration
+ * 2-bit status for each coefficient as defined in 802.3-2008, Table 72-5.
+ */
+#define BDK_BGX_SPU_BR_TRAIN_CST_E_MAXIMUM (3)
+#define BDK_BGX_SPU_BR_TRAIN_CST_E_MINIMUM (2)
+#define BDK_BGX_SPU_BR_TRAIN_CST_E_NOT_UPDATED (0)
+#define BDK_BGX_SPU_BR_TRAIN_CST_E_UPDATED (1)
+
+/**
+ * Enumeration bgx_spu_br_train_cup_e
+ *
+ * BGX Training Coefficient Enumeration
+ * 2-bit command for each coefficient as defined in 802.3-2008, Table 72-4.
+ */
+#define BDK_BGX_SPU_BR_TRAIN_CUP_E_DECREMENT (1)
+#define BDK_BGX_SPU_BR_TRAIN_CUP_E_HOLD (0)
+#define BDK_BGX_SPU_BR_TRAIN_CUP_E_INCREMENT (2)
+#define BDK_BGX_SPU_BR_TRAIN_CUP_E_RSV_CMD (3)
+
+/**
+ * Structure bgx_spu_br_lane_train_status_s
+ *
+ * BGX Lane Training Status Structure
+ * This is the group of lane status bits for a single lane in the BASE-R PMD status register
+ * (MDIO address 1.151) as defined in 802.3ba-2010, Table 45-55.
+ */
+union bdk_bgx_spu_br_lane_train_status_s
+{
+ uint32_t u;
+ struct bdk_bgx_spu_br_lane_train_status_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t training_failure : 1; /**< [ 3: 3] Link training failure. */
+ uint32_t training : 1; /**< [ 2: 2] Link training state.
+ 0 = Training in progress.
+ 1 = Training has completed. */
+ uint32_t frame_lock : 1; /**< [ 1: 1] Frame lock status. Set when training frame delineation has been detected. */
+ uint32_t rx_trained : 1; /**< [ 0: 0] Receiver trained status.
+ 0 = Receiver training.
+ 1 = Receiver trained and ready to receive data for the lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rx_trained : 1; /**< [ 0: 0] Receiver trained status.
+ 0 = Receiver training.
+ 1 = Receiver trained and ready to receive data for the lane. */
+ uint32_t frame_lock : 1; /**< [ 1: 1] Frame lock status. Set when training frame delineation has been detected. */
+ uint32_t training : 1; /**< [ 2: 2] Link training state.
+ 0 = Training in progress.
+ 1 = Training has completed. */
+ uint32_t training_failure : 1; /**< [ 3: 3] Link training failure. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgx_spu_br_lane_train_status_s_s cn; */
+};
+
+/**
+ * Structure bgx_spu_br_train_cup_s
+ *
+ * BGX Lane Training Coeffiecient Structure
+ * This is the coefficient update field of the BASE-R link training packet as defined in
+ * 802.3-2008, Table 72-4.
+ */
+union bdk_bgx_spu_br_train_cup_s
+{
+ uint32_t u;
+ struct bdk_bgx_spu_br_train_cup_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_14_31 : 18;
+ uint32_t preset : 1; /**< [ 13: 13] Preset. Set to indicate that all TX coefficients be set to a state where equalization is
+ turned off, i.e. the precursor (k = -1) and postcursor (k = +1) coefficients should be set
+ to 0 and the main
+ (k = 0) coefficient should be set to its maximum value. */
+ uint32_t init : 1; /**< [ 12: 12] Initialize. Set to indicate that the TX coefficients should be set to meet the conditions
+ defined in 802.3-2008 sub-clause 72.6.10.4.2. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t post_cup : 2; /**< [ 5: 4] Post-cursor (k = +1) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+ uint32_t main_cup : 2; /**< [ 3: 2] Main (k = 0) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+ uint32_t pre_cup : 2; /**< [ 1: 0] Pre-cursor (k = -1) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+#else /* Word 0 - Little Endian */
+ uint32_t pre_cup : 2; /**< [ 1: 0] Pre-cursor (k = -1) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+ uint32_t main_cup : 2; /**< [ 3: 2] Main (k = 0) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+ uint32_t post_cup : 2; /**< [ 5: 4] Post-cursor (k = +1) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t init : 1; /**< [ 12: 12] Initialize. Set to indicate that the TX coefficients should be set to meet the conditions
+ defined in 802.3-2008 sub-clause 72.6.10.4.2. */
+ uint32_t preset : 1; /**< [ 13: 13] Preset. Set to indicate that all TX coefficients be set to a state where equalization is
+ turned off, i.e. the precursor (k = -1) and postcursor (k = +1) coefficients should be set
+ to 0 and the main
+ (k = 0) coefficient should be set to its maximum value. */
+ uint32_t reserved_14_31 : 18;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgx_spu_br_train_cup_s_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t reserved_14_15 : 2;
+ uint32_t preset : 1; /**< [ 13: 13] Preset. Set to indicate that all TX coefficients be set to a state where equalization is
+ turned off, i.e. the precursor (k = -1) and postcursor (k = +1) coefficients should be set
+ to 0 and the main
+ (k = 0) coefficient should be set to its maximum value. */
+ uint32_t init : 1; /**< [ 12: 12] Initialize. Set to indicate that the TX coefficients should be set to meet the conditions
+ defined in 802.3-2008 sub-clause 72.6.10.4.2. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t post_cup : 2; /**< [ 5: 4] Post-cursor (k = +1) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+ uint32_t main_cup : 2; /**< [ 3: 2] Main (k = 0) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+ uint32_t pre_cup : 2; /**< [ 1: 0] Pre-cursor (k = -1) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+#else /* Word 0 - Little Endian */
+ uint32_t pre_cup : 2; /**< [ 1: 0] Pre-cursor (k = -1) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+ uint32_t main_cup : 2; /**< [ 3: 2] Main (k = 0) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+ uint32_t post_cup : 2; /**< [ 5: 4] Post-cursor (k = +1) coefficient update. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CUP_E. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t init : 1; /**< [ 12: 12] Initialize. Set to indicate that the TX coefficients should be set to meet the conditions
+ defined in 802.3-2008 sub-clause 72.6.10.4.2. */
+ uint32_t preset : 1; /**< [ 13: 13] Preset. Set to indicate that all TX coefficients be set to a state where equalization is
+ turned off, i.e. the precursor (k = -1) and postcursor (k = +1) coefficients should be set
+ to 0 and the main
+ (k = 0) coefficient should be set to its maximum value. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } cn;
+};
+
+/**
+ * Structure bgx_spu_br_train_rep_s
+ *
+ * BGX Training Report Structure
+ * This is the status report field of the BASE-R link training packet as defined in 802.3-2008,
+ * Table 72-5.
+ */
+union bdk_bgx_spu_br_train_rep_s
+{
+ uint32_t u;
+ struct bdk_bgx_spu_br_train_rep_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t rx_ready : 1; /**< [ 15: 15] Receiver ready. Set to indicate that the local receiver has determined that training is
+ complete and is prepared to receive data. */
+ uint32_t reserved_6_14 : 9;
+ uint32_t post_cst : 2; /**< [ 5: 4] Post-cursor (k = +1) coefficient status. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CST_E. */
+ uint32_t main_cst : 2; /**< [ 3: 2] Main (k = 0) coefficient status. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CST_E. */
+ uint32_t pre_cst : 2; /**< [ 1: 0] Pre-cursor (k = -1) coefficient status. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CST_E. */
+#else /* Word 0 - Little Endian */
+ uint32_t pre_cst : 2; /**< [ 1: 0] Pre-cursor (k = -1) coefficient status. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CST_E. */
+ uint32_t main_cst : 2; /**< [ 3: 2] Main (k = 0) coefficient status. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CST_E. */
+ uint32_t post_cst : 2; /**< [ 5: 4] Post-cursor (k = +1) coefficient status. Valid when PRESET = INIT = 0. Enumerated by
+ BGX_SPU_BR_TRAIN_CST_E. */
+ uint32_t reserved_6_14 : 9;
+ uint32_t rx_ready : 1; /**< [ 15: 15] Receiver ready. Set to indicate that the local receiver has determined that training is
+ complete and is prepared to receive data. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgx_spu_br_train_rep_s_s cn; */
+};
+
+/**
+ * Structure bgx_spu_sds_cu_s
+ *
+ * INTERNAL: BGX Training Coeffiecient Structure
+ *
+ * This structure is similar to BGX_SPU_BR_TRAIN_CUP_S format, but with reserved fields removed
+ * and [RCVR_READY] field added.
+ */
+union bdk_bgx_spu_sds_cu_s
+{
+ uint32_t u;
+ struct bdk_bgx_spu_sds_cu_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t rcvr_ready : 1; /**< [ 8: 8] See BGX_SPU_BR_TRAIN_REP_S[RX_READY]. */
+ uint32_t preset : 1; /**< [ 7: 7] See BGX_SPU_BR_TRAIN_CUP_S[PRESET]. */
+ uint32_t initialize : 1; /**< [ 6: 6] See BGX_SPU_BR_TRAIN_CUP_S[INIT]. */
+ uint32_t post_cu : 2; /**< [ 5: 4] See BGX_SPU_BR_TRAIN_CUP_S[POST_CUP]. */
+ uint32_t main_cu : 2; /**< [ 3: 2] See BGX_SPU_BR_TRAIN_CUP_S[MAIN_CUP]. */
+ uint32_t pre_cu : 2; /**< [ 1: 0] See BGX_SPU_BR_TRAIN_CUP_S[PRE_CUP]. */
+#else /* Word 0 - Little Endian */
+ uint32_t pre_cu : 2; /**< [ 1: 0] See BGX_SPU_BR_TRAIN_CUP_S[PRE_CUP]. */
+ uint32_t main_cu : 2; /**< [ 3: 2] See BGX_SPU_BR_TRAIN_CUP_S[MAIN_CUP]. */
+ uint32_t post_cu : 2; /**< [ 5: 4] See BGX_SPU_BR_TRAIN_CUP_S[POST_CUP]. */
+ uint32_t initialize : 1; /**< [ 6: 6] See BGX_SPU_BR_TRAIN_CUP_S[INIT]. */
+ uint32_t preset : 1; /**< [ 7: 7] See BGX_SPU_BR_TRAIN_CUP_S[PRESET]. */
+ uint32_t rcvr_ready : 1; /**< [ 8: 8] See BGX_SPU_BR_TRAIN_REP_S[RX_READY]. */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgx_spu_sds_cu_s_s cn; */
+};
+
+/**
+ * Structure bgx_spu_sds_skew_status_s
+ *
+ * BGX Skew Status Structure
+ * Provides receive skew information detected for a physical SerDes lane when it is assigned to a
+ * multilane LMAC/LPCS. Contents are valid when RX deskew is done for the associated LMAC/LPCS.
+ */
+union bdk_bgx_spu_sds_skew_status_s
+{
+ uint32_t u;
+ struct bdk_bgx_spu_sds_skew_status_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t lane_skew : 5; /**< [ 24: 20] Lane skew. The SerDes lane's receive skew/delay in number of code-groups (BASE-X) or
+ blocks (40GBASE-R) relative to the earliest (least delayed) lane of the LMAC/LPCS. */
+ uint32_t reserved_18_19 : 2;
+ uint32_t am_lane_id : 2; /**< [ 17: 16] Alignment Marker ID. Valid for 40GBASE-R only. This is the PCS lane number of the
+ alignment marker received on the SerDes lane. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t am_timestamp : 12; /**< [ 11: 0] Alignment marker PTP timestamp. Valid for 40GBASE-R only. Contains the lower 12 bits of
+ the PTP timestamp of the alignment marker received on the SerDes lane during align/skew
+ detection. */
+#else /* Word 0 - Little Endian */
+ uint32_t am_timestamp : 12; /**< [ 11: 0] Alignment marker PTP timestamp. Valid for 40GBASE-R only. Contains the lower 12 bits of
+ the PTP timestamp of the alignment marker received on the SerDes lane during align/skew
+ detection. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t am_lane_id : 2; /**< [ 17: 16] Alignment Marker ID. Valid for 40GBASE-R only. This is the PCS lane number of the
+ alignment marker received on the SerDes lane. */
+ uint32_t reserved_18_19 : 2;
+ uint32_t lane_skew : 5; /**< [ 24: 20] Lane skew. The SerDes lane's receive skew/delay in number of code-groups (BASE-X) or
+ blocks (40GBASE-R) relative to the earliest (least delayed) lane of the LMAC/LPCS. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgx_spu_sds_skew_status_s_s cn; */
+};
+
+/**
+ * Structure bgx_spu_sds_sr_s
+ *
+ * INTERNAL: BGX Lane Training Coefficient Structure
+ *
+ * Similar to BGX_SPU_BR_TRAIN_REP_S format, but with reserved and RX_READY fields removed.
+ */
+union bdk_bgx_spu_sds_sr_s
+{
+ uint32_t u;
+ struct bdk_bgx_spu_sds_sr_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_6_31 : 26;
+ uint32_t post_status : 2; /**< [ 5: 4] See BGX_SPU_BR_TRAIN_REP_S[POST_CST]. */
+ uint32_t main_status : 2; /**< [ 3: 2] See BGX_SPU_BR_TRAIN_REP_S[MAIN_CST]. */
+ uint32_t pre_status : 2; /**< [ 1: 0] See BGX_SPU_BR_TRAIN_REP_S[PRE_CST]. */
+#else /* Word 0 - Little Endian */
+ uint32_t pre_status : 2; /**< [ 1: 0] See BGX_SPU_BR_TRAIN_REP_S[PRE_CST]. */
+ uint32_t main_status : 2; /**< [ 3: 2] See BGX_SPU_BR_TRAIN_REP_S[MAIN_CST]. */
+ uint32_t post_status : 2; /**< [ 5: 4] See BGX_SPU_BR_TRAIN_REP_S[POST_CST]. */
+ uint32_t reserved_6_31 : 26;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgx_spu_sds_sr_s_s cn; */
+};
+
+/**
+ * Register (RSL) bgx#_cmr#_config
+ *
+ * BGX CMR Configuration Registers
+ * Logical MAC/PCS configuration registers; one per LMAC. The maximum number of LMACs (and
+ * maximum LMAC ID) that can be enabled by these registers is limited by
+ * BGX()_CMR_RX_LMACS[LMACS] and BGX()_CMR_TX_LMACS[LMACS]. When multiple LMACs are
+ * enabled, they must be configured with the same [LMAC_TYPE] value.
+ *
+ * Internal:
+ * \<pre\>
+ * Typical configurations:
+ * ---------------------------------------------------------------------------
+ * Configuration LMACS Register [ENABLE] [LMAC_TYPE]
+ * ---------------------------------------------------------------------------
+ * 1x40GBASE-R4 1 BGXn_CMR0_CONFIG 1 4
+ * BGXn_CMR1_CONFIG 0 --
+ * BGXn_CMR2_CONFIG 0 --
+ * BGXn_CMR3_CONFIG 0 --
+ * ---------------------------------------------------------------------------
+ * 4x10GBASE-R 4 BGXn_CMR0_CONFIG 1 3
+ * BGXn_CMR1_CONFIG 1 3
+ * BGXn_CMR2_CONFIG 1 3
+ * BGXn_CMR3_CONFIG 1 3
+ * ---------------------------------------------------------------------------
+ * 2xRXAUI 2 BGXn_CMR0_CONFIG 1 2
+ * BGXn_CMR1_CONFIG 1 2
+ * BGXn_CMR2_CONFIG 0 --
+ * BGXn_CMR3_CONFIG 0 --
+ * ---------------------------------------------------------------------------
+ * 1x10GBASE-X/XAUI/DXAUI 1 BGXn_CMR0_CONFIG 1 1
+ * BGXn_CMR1_CONFIG 0 --
+ * BGXn_CMR2_CONFIG 0 --
+ * BGXn_CMR3_CONFIG 0 --
+ * ---------------------------------------------------------------------------
+ * 4xSGMII/1000BASE-X 4 BGXn_CMR0_CONFIG 1 0
+ * BGXn_CMR1_CONFIG 1 0
+ * BGXn_CMR2_CONFIG 1 0
+ * BGXn_CMR3_CONFIG 1 0
+ * ---------------------------------------------------------------------------
+ * \</pre\>
+ */
+union bdk_bgxx_cmrx_config
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_config_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t p2x_select : 1; /**< [ 17: 17](R/W) Selects interior side P2X interface over which the LMAC will communicate:
+ \<pre\>
+ [P2X_SELECT] Name Connected block
+ -------------------------------------------
+ 0 P2X0 NIC
+ 1 P2X1 PKO
+ \</pre\> */
+ uint64_t x2p_select : 1; /**< [ 16: 16](R/W) Selects interior side X2P interface over which the LMAC will communicate:
+ \<pre\>
+ [X2P_SELECT] Name Connected block
+ -------------------------------------------
+ 0 X2P0 NIC
+ 1 X2P1 PKI
+ \</pre\> */
+ uint64_t enable : 1; /**< [ 15: 15](R/W) Logical MAC/PCS enable. This is the master enable for the LMAC. When clear, all the
+ dedicated BGX context state for the LMAC (state machines, FIFOs, counters, etc.) is reset,
+ and LMAC access to shared BGX resources (data path, SerDes lanes) is disabled.
+
+ When set, LMAC operation is enabled, including link bring-up, synchronization, and
+ transmit/receive of idles and fault sequences. Note that configuration registers for an
+ LMAC are not reset when this bit is clear, allowing software to program them before
+ setting this bit to enable the LMAC. This bit together with [LMAC_TYPE] is also used to
+ enable the clocking to the GMP and/or blocks of the Super path (SMU and SPU). CMR clocking
+ is enabled when any of the paths are enabled. */
+ uint64_t data_pkt_rx_en : 1; /**< [ 14: 14](R/W) Data packet receive enable. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 1, the reception of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 0, the MAC
+ layer
+ drops received data and flow-control packets. */
+ uint64_t data_pkt_tx_en : 1; /**< [ 13: 13](R/W) Data packet transmit enable. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 1, the transmission
+ of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 0, the MAC
+ layer
+ suppresses the transmission of new data and packets for the LMAC. */
+ uint64_t int_beat_gen : 1; /**< [ 12: 12](R/W) Internal beat generation. This bit is used for debug/test purposes and should be clear
+ during normal operation. When set, the LMAC's PCS layer ignores RXVALID and
+ TXREADY/TXCREDIT from the associated SerDes lanes, internally generates fake (idle)
+ RXVALID and TXCREDIT pulses, and suppresses transmission to the SerDes. */
+ uint64_t mix_en : 1; /**< [ 11: 11](R/W) Must be 0. */
+ uint64_t lmac_type : 3; /**< [ 10: 8](R/W) Logical MAC/PCS/prt type:
+
+ \<pre\>
+ LMAC_TYPE Name Description NUM_PCS_LANES
+ ----------------------------------------------------------
+ 0x0 SGMII SGMII/1000BASE-X 1
+ 0x1 XAUI 10GBASE-X/XAUI or DXAUI 4
+ 0x2 RXAUI Reduced XAUI 2
+ 0x3 10G_R 10GBASE-R 1
+ 0x4 40G_R 40GBASE-R 4
+ 0x5 -- Reserved -
+ 0x6 QSGMII QSGMII 1
+ Other -- Reserved -
+ \</pre\>
+
+ NUM_PCS_LANES specifies the number of PCS lanes that are valid for
+ each type. Each valid PCS lane is mapped to a physical SerDes lane
+ based on the programming of [LANE_TO_SDS].
+
+ This field must be programmed to its final value before [ENABLE] is set, and must not
+ be changed when [ENABLE] = 1. */
+ uint64_t lane_to_sds : 8; /**< [ 7: 0](R/W) PCS lane-to-SerDes mapping.
+ This is an array of 2-bit values that map each logical PCS lane to a
+ physical SerDes lane, as follows:
+
+ \<pre\>
+ Bits Description Reset value
+ ------------------------------------------
+ \<7:6\> PCS Lane 3 SerDes ID 0x3
+ \<5:4\> PCS Lane 2 SerDes ID 0x2
+ \<3:2\> PCS Lane 1 SerDes ID 0x1
+ \<1:0\> PCS Lane 0 SerDes ID 0x0
+ \</pre\>
+
+ PCS lanes 0 through NUM_PCS_LANES-1 are valid, where NUM_PCS_LANES is a function of the
+ logical MAC/PCS type (see [LMAC_TYPE]). For example, when [LMAC_TYPE] = SGMII,
+ then NUM_PCS_LANES = 1, PCS lane 0 is valid and the associated physical SerDes lanes
+ are selected by bits \<1:0\>.
+
+ For 40GBASE-R ([LMAC_TYPE] = 40G_R), all four PCS lanes are valid, and the PCS lane IDs
+ determine the block distribution order and associated alignment markers on the transmit
+ side. This is not necessarily the order in which PCS lanes receive data because 802.3
+ allows multilane BASE-R receive lanes to be reordered. When a lane (called service
+ interface in 802.3ba-2010) has achieved alignment marker lock on the receive side (i.e.
+ the associated BGX()_SPU()_BR_ALGN_STATUS[MARKER_LOCK] = 1), then the actual
+ detected RX PCS lane number is recorded in the corresponding
+ BGX()_SPU()_BR_LANE_MAP[LNx_MAPPING].
+
+ For QSGMII, [LANE_TO_SDS]\<1:0\> for LMAC 0 selects the physical SerDes lane shared by four
+ LMACs, and [LANE_TO_SDS]\<1:0\> must be unique for each of the four LMACs.
+
+ This field must be programmed to its final value before [ENABLE] is set, and
+ must not be changed when [ENABLE] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_to_sds : 8; /**< [ 7: 0](R/W) PCS lane-to-SerDes mapping.
+ This is an array of 2-bit values that map each logical PCS lane to a
+ physical SerDes lane, as follows:
+
+ \<pre\>
+ Bits Description Reset value
+ ------------------------------------------
+ \<7:6\> PCS Lane 3 SerDes ID 0x3
+ \<5:4\> PCS Lane 2 SerDes ID 0x2
+ \<3:2\> PCS Lane 1 SerDes ID 0x1
+ \<1:0\> PCS Lane 0 SerDes ID 0x0
+ \</pre\>
+
+ PCS lanes 0 through NUM_PCS_LANES-1 are valid, where NUM_PCS_LANES is a function of the
+ logical MAC/PCS type (see [LMAC_TYPE]). For example, when [LMAC_TYPE] = SGMII,
+ then NUM_PCS_LANES = 1, PCS lane 0 is valid and the associated physical SerDes lanes
+ are selected by bits \<1:0\>.
+
+ For 40GBASE-R ([LMAC_TYPE] = 40G_R), all four PCS lanes are valid, and the PCS lane IDs
+ determine the block distribution order and associated alignment markers on the transmit
+ side. This is not necessarily the order in which PCS lanes receive data because 802.3
+ allows multilane BASE-R receive lanes to be reordered. When a lane (called service
+ interface in 802.3ba-2010) has achieved alignment marker lock on the receive side (i.e.
+ the associated BGX()_SPU()_BR_ALGN_STATUS[MARKER_LOCK] = 1), then the actual
+ detected RX PCS lane number is recorded in the corresponding
+ BGX()_SPU()_BR_LANE_MAP[LNx_MAPPING].
+
+ For QSGMII, [LANE_TO_SDS]\<1:0\> for LMAC 0 selects the physical SerDes lane shared by four
+ LMACs, and [LANE_TO_SDS]\<1:0\> must be unique for each of the four LMACs.
+
+ This field must be programmed to its final value before [ENABLE] is set, and
+ must not be changed when [ENABLE] = 1. */
+ uint64_t lmac_type : 3; /**< [ 10: 8](R/W) Logical MAC/PCS/prt type:
+
+ \<pre\>
+ LMAC_TYPE Name Description NUM_PCS_LANES
+ ----------------------------------------------------------
+ 0x0 SGMII SGMII/1000BASE-X 1
+ 0x1 XAUI 10GBASE-X/XAUI or DXAUI 4
+ 0x2 RXAUI Reduced XAUI 2
+ 0x3 10G_R 10GBASE-R 1
+ 0x4 40G_R 40GBASE-R 4
+ 0x5 -- Reserved -
+ 0x6 QSGMII QSGMII 1
+ Other -- Reserved -
+ \</pre\>
+
+ NUM_PCS_LANES specifies the number of PCS lanes that are valid for
+ each type. Each valid PCS lane is mapped to a physical SerDes lane
+ based on the programming of [LANE_TO_SDS].
+
+ This field must be programmed to its final value before [ENABLE] is set, and must not
+ be changed when [ENABLE] = 1. */
+ uint64_t mix_en : 1; /**< [ 11: 11](R/W) Must be 0. */
+ uint64_t int_beat_gen : 1; /**< [ 12: 12](R/W) Internal beat generation. This bit is used for debug/test purposes and should be clear
+ during normal operation. When set, the LMAC's PCS layer ignores RXVALID and
+ TXREADY/TXCREDIT from the associated SerDes lanes, internally generates fake (idle)
+ RXVALID and TXCREDIT pulses, and suppresses transmission to the SerDes. */
+ uint64_t data_pkt_tx_en : 1; /**< [ 13: 13](R/W) Data packet transmit enable. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 1, the transmission
+ of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 0, the MAC
+ layer
+ suppresses the transmission of new data and packets for the LMAC. */
+ uint64_t data_pkt_rx_en : 1; /**< [ 14: 14](R/W) Data packet receive enable. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 1, the reception of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 0, the MAC
+ layer
+ drops received data and flow-control packets. */
+ uint64_t enable : 1; /**< [ 15: 15](R/W) Logical MAC/PCS enable. This is the master enable for the LMAC. When clear, all the
+ dedicated BGX context state for the LMAC (state machines, FIFOs, counters, etc.) is reset,
+ and LMAC access to shared BGX resources (data path, SerDes lanes) is disabled.
+
+ When set, LMAC operation is enabled, including link bring-up, synchronization, and
+ transmit/receive of idles and fault sequences. Note that configuration registers for an
+ LMAC are not reset when this bit is clear, allowing software to program them before
+ setting this bit to enable the LMAC. This bit together with [LMAC_TYPE] is also used to
+ enable the clocking to the GMP and/or blocks of the Super path (SMU and SPU). CMR clocking
+ is enabled when any of the paths are enabled. */
+ uint64_t x2p_select : 1; /**< [ 16: 16](R/W) Selects interior side X2P interface over which the LMAC will communicate:
+ \<pre\>
+ [X2P_SELECT] Name Connected block
+ -------------------------------------------
+ 0 X2P0 NIC
+ 1 X2P1 PKI
+ \</pre\> */
+ uint64_t p2x_select : 1; /**< [ 17: 17](R/W) Selects interior side P2X interface over which the LMAC will communicate:
+ \<pre\>
+ [P2X_SELECT] Name Connected block
+ -------------------------------------------
+ 0 P2X0 NIC
+ 1 P2X1 PKO
+ \</pre\> */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmrx_config_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t p2x_select : 1; /**< [ 17: 17](R/W) Selects interior side P2X interface over which the LMAC will communicate:
+ \<pre\>
+ [P2X_SELECT] Name Connected block
+ -------------------------------------------
+ 0 P2X0 NIC
+ 1 P2X1 Reserved
+ \</pre\> */
+ uint64_t x2p_select : 1; /**< [ 16: 16](R/W) Selects interior side X2P interface over which the LMAC will communicate:
+ \<pre\>
+ [X2P_SELECT] Name Connected block
+ -------------------------------------------
+ 0 X2P0 NIC
+ 1 X2P1 Reserved
+ \</pre\> */
+ uint64_t enable : 1; /**< [ 15: 15](R/W) Logical MAC/PCS enable. This is the master enable for the LMAC. When clear, all the
+ dedicated BGX context state for the LMAC (state machines, FIFOs, counters, etc.) is reset,
+ and LMAC access to shared BGX resources (data path, SerDes lanes) is disabled.
+
+ When set, LMAC operation is enabled, including link bring-up, synchronization, and
+ transmit/receive of idles and fault sequences. Note that configuration registers for an
+ LMAC are not reset when this bit is clear, allowing software to program them before
+ setting this bit to enable the LMAC. This bit together with [LMAC_TYPE] is also used to
+ enable the clocking to the GMP and/or blocks of the Super path (SMU and SPU). CMR clocking
+ is enabled when any of the paths are enabled. */
+ uint64_t data_pkt_rx_en : 1; /**< [ 14: 14](R/W) Data packet receive enable. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 1, the reception of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 0, the MAC
+ layer
+ drops received data and flow-control packets. */
+ uint64_t data_pkt_tx_en : 1; /**< [ 13: 13](R/W) Data packet transmit enable. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 1, the transmission
+ of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 0, the MAC
+ layer
+ suppresses the transmission of new data and packets for the LMAC. */
+ uint64_t int_beat_gen : 1; /**< [ 12: 12](R/W) Internal beat generation. This bit is used for debug/test purposes and should be clear
+ during normal operation. When set, the LMAC's PCS layer ignores RXVALID and
+ TXREADY/TXCREDIT from the associated SerDes lanes, internally generates fake (idle)
+ RXVALID and TXCREDIT pulses, and suppresses transmission to the SerDes. */
+ uint64_t mix_en : 1; /**< [ 11: 11](R/W) Must be 0. */
+ uint64_t lmac_type : 3; /**< [ 10: 8](R/W) Logical MAC/PCS/prt type:
+
+ \<pre\>
+ LMAC_TYPE Name Description NUM_PCS_LANES
+ ----------------------------------------------------------
+ 0x0 SGMII SGMII/1000BASE-X 1
+ 0x1 XAUI 10GBASE-X/XAUI or DXAUI 4
+ 0x2 RXAUI Reduced XAUI 2
+ 0x3 10G_R 10GBASE-R 1
+ 0x4 40G_R 40GBASE-R 4
+ 0x5 -- Reserved -
+ 0x6 QSGMII QSGMII 1
+ Other -- Reserved -
+ \</pre\>
+
+ NUM_PCS_LANES specifies the number of PCS lanes that are valid for
+ each type. Each valid PCS lane is mapped to a physical SerDes lane
+ based on the programming of [LANE_TO_SDS].
+
+ This field must be programmed to its final value before [ENABLE] is set, and must not
+ be changed when [ENABLE] = 1. */
+ uint64_t lane_to_sds : 8; /**< [ 7: 0](R/W) PCS lane-to-SerDes mapping.
+ This is an array of 2-bit values that map each logical PCS lane to a
+ physical SerDes lane, as follows:
+
+ \<pre\>
+ Bits Description Reset value
+ ------------------------------------------
+ \<7:6\> PCS Lane 3 SerDes ID 0x3
+ \<5:4\> PCS Lane 2 SerDes ID 0x2
+ \<3:2\> PCS Lane 1 SerDes ID 0x1
+ \<1:0\> PCS Lane 0 SerDes ID 0x0
+ \</pre\>
+
+ PCS lanes 0 through NUM_PCS_LANES-1 are valid, where NUM_PCS_LANES is a function of the
+ logical MAC/PCS type (see [LMAC_TYPE]). For example, when [LMAC_TYPE] = SGMII,
+ then NUM_PCS_LANES = 1, PCS lane 0 is valid and the associated physical SerDes lanes
+ are selected by bits \<1:0\>.
+
+ For 40GBASE-R ([LMAC_TYPE] = 40G_R), all four PCS lanes are valid, and the PCS lane IDs
+ determine the block distribution order and associated alignment markers on the transmit
+ side. This is not necessarily the order in which PCS lanes receive data because 802.3
+ allows multilane BASE-R receive lanes to be reordered. When a lane (called service
+ interface in 802.3ba-2010) has achieved alignment marker lock on the receive side (i.e.
+ the associated BGX()_SPU()_BR_ALGN_STATUS[MARKER_LOCK] = 1), then the actual
+ detected RX PCS lane number is recorded in the corresponding
+ BGX()_SPU()_BR_LANE_MAP[LNx_MAPPING].
+
+ For QSGMII, [LANE_TO_SDS]\<1:0\> for LMAC 0 selects the physical SerDes lane shared by four
+ LMACs, and [LANE_TO_SDS]\<1:0\> must be unique for each of the four LMACs.
+
+ This field must be programmed to its final value before [ENABLE] is set, and
+ must not be changed when [ENABLE] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_to_sds : 8; /**< [ 7: 0](R/W) PCS lane-to-SerDes mapping.
+ This is an array of 2-bit values that map each logical PCS lane to a
+ physical SerDes lane, as follows:
+
+ \<pre\>
+ Bits Description Reset value
+ ------------------------------------------
+ \<7:6\> PCS Lane 3 SerDes ID 0x3
+ \<5:4\> PCS Lane 2 SerDes ID 0x2
+ \<3:2\> PCS Lane 1 SerDes ID 0x1
+ \<1:0\> PCS Lane 0 SerDes ID 0x0
+ \</pre\>
+
+ PCS lanes 0 through NUM_PCS_LANES-1 are valid, where NUM_PCS_LANES is a function of the
+ logical MAC/PCS type (see [LMAC_TYPE]). For example, when [LMAC_TYPE] = SGMII,
+ then NUM_PCS_LANES = 1, PCS lane 0 is valid and the associated physical SerDes lanes
+ are selected by bits \<1:0\>.
+
+ For 40GBASE-R ([LMAC_TYPE] = 40G_R), all four PCS lanes are valid, and the PCS lane IDs
+ determine the block distribution order and associated alignment markers on the transmit
+ side. This is not necessarily the order in which PCS lanes receive data because 802.3
+ allows multilane BASE-R receive lanes to be reordered. When a lane (called service
+ interface in 802.3ba-2010) has achieved alignment marker lock on the receive side (i.e.
+ the associated BGX()_SPU()_BR_ALGN_STATUS[MARKER_LOCK] = 1), then the actual
+ detected RX PCS lane number is recorded in the corresponding
+ BGX()_SPU()_BR_LANE_MAP[LNx_MAPPING].
+
+ For QSGMII, [LANE_TO_SDS]\<1:0\> for LMAC 0 selects the physical SerDes lane shared by four
+ LMACs, and [LANE_TO_SDS]\<1:0\> must be unique for each of the four LMACs.
+
+ This field must be programmed to its final value before [ENABLE] is set, and
+ must not be changed when [ENABLE] = 1. */
+ uint64_t lmac_type : 3; /**< [ 10: 8](R/W) Logical MAC/PCS/prt type:
+
+ \<pre\>
+ LMAC_TYPE Name Description NUM_PCS_LANES
+ ----------------------------------------------------------
+ 0x0 SGMII SGMII/1000BASE-X 1
+ 0x1 XAUI 10GBASE-X/XAUI or DXAUI 4
+ 0x2 RXAUI Reduced XAUI 2
+ 0x3 10G_R 10GBASE-R 1
+ 0x4 40G_R 40GBASE-R 4
+ 0x5 -- Reserved -
+ 0x6 QSGMII QSGMII 1
+ Other -- Reserved -
+ \</pre\>
+
+ NUM_PCS_LANES specifies the number of PCS lanes that are valid for
+ each type. Each valid PCS lane is mapped to a physical SerDes lane
+ based on the programming of [LANE_TO_SDS].
+
+ This field must be programmed to its final value before [ENABLE] is set, and must not
+ be changed when [ENABLE] = 1. */
+ uint64_t mix_en : 1; /**< [ 11: 11](R/W) Must be 0. */
+ uint64_t int_beat_gen : 1; /**< [ 12: 12](R/W) Internal beat generation. This bit is used for debug/test purposes and should be clear
+ during normal operation. When set, the LMAC's PCS layer ignores RXVALID and
+ TXREADY/TXCREDIT from the associated SerDes lanes, internally generates fake (idle)
+ RXVALID and TXCREDIT pulses, and suppresses transmission to the SerDes. */
+ uint64_t data_pkt_tx_en : 1; /**< [ 13: 13](R/W) Data packet transmit enable. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 1, the transmission
+ of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 0, the MAC
+ layer
+ suppresses the transmission of new data and packets for the LMAC. */
+ uint64_t data_pkt_rx_en : 1; /**< [ 14: 14](R/W) Data packet receive enable. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 1, the reception of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 0, the MAC
+ layer
+ drops received data and flow-control packets. */
+ uint64_t enable : 1; /**< [ 15: 15](R/W) Logical MAC/PCS enable. This is the master enable for the LMAC. When clear, all the
+ dedicated BGX context state for the LMAC (state machines, FIFOs, counters, etc.) is reset,
+ and LMAC access to shared BGX resources (data path, SerDes lanes) is disabled.
+
+ When set, LMAC operation is enabled, including link bring-up, synchronization, and
+ transmit/receive of idles and fault sequences. Note that configuration registers for an
+ LMAC are not reset when this bit is clear, allowing software to program them before
+ setting this bit to enable the LMAC. This bit together with [LMAC_TYPE] is also used to
+ enable the clocking to the GMP and/or blocks of the Super path (SMU and SPU). CMR clocking
+ is enabled when any of the paths are enabled. */
+ uint64_t x2p_select : 1; /**< [ 16: 16](R/W) Selects interior side X2P interface over which the LMAC will communicate:
+ \<pre\>
+ [X2P_SELECT] Name Connected block
+ -------------------------------------------
+ 0 X2P0 NIC
+ 1 X2P1 Reserved
+ \</pre\> */
+ uint64_t p2x_select : 1; /**< [ 17: 17](R/W) Selects interior side P2X interface over which the LMAC will communicate:
+ \<pre\>
+ [P2X_SELECT] Name Connected block
+ -------------------------------------------
+ 0 P2X0 NIC
+ 1 P2X1 Reserved
+ \</pre\> */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmrx_config_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t enable : 1; /**< [ 15: 15](R/W) Logical MAC/PCS enable. This is the master enable for the LMAC. When clear, all the
+ dedicated BGX context state for the LMAC (state machines, FIFOs, counters, etc.) is reset,
+ and LMAC access to shared BGX resources (SMU/SPU data path, SerDes lanes) is disabled.
+
+ When set, LMAC operation is enabled, including link bring-up, synchronization, and
+ transmit/receive of idles and fault sequences. Note that configuration registers for an
+ LMAC are not reset when this bit is clear, allowing software to program them before
+ setting this bit to enable the LMAC. This bit together with [LMAC_TYPE] is also used to
+ enable the clocking to the GMP and/or blocks of the Super path (SMU and SPU). CMR clocking
+ is enabled when any of the paths are enabled. */
+ uint64_t data_pkt_rx_en : 1; /**< [ 14: 14](R/W) Data packet receive enable. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 1, the reception of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 0, the MAC
+ layer
+ drops received data and flow-control packets. */
+ uint64_t data_pkt_tx_en : 1; /**< [ 13: 13](R/W) Data packet transmit enable. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 1, the transmission
+ of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 0, the MAC
+ layer
+ suppresses the transmission of new data and packets for the LMAC. */
+ uint64_t int_beat_gen : 1; /**< [ 12: 12](R/W) Internal beat generation. This bit is used for debug/test purposes and should be clear
+ during normal operation. When set, the LMAC's PCS layer ignores RXVALID and
+ TXREADY/TXCREDIT from the associated SerDes lanes, internally generates fake (idle)
+ RXVALID and TXCREDIT pulses, and suppresses transmission to the SerDes. */
+ uint64_t mix_en : 1; /**< [ 11: 11](R/W) Must be 0. */
+ uint64_t lmac_type : 3; /**< [ 10: 8](R/W) Logical MAC/PCS/prt type:
+
+ \<pre\>
+ LMAC_TYPE Name Description NUM_PCS_LANES
+ ----------------------------------------------------------
+ 0x0 SGMII SGMII/1000BASE-X 1
+ 0x1 XAUI 10GBASE-X/XAUI or DXAUI 4
+ 0x2 RXAUI Reduced XAUI 2
+ 0x3 10G_R 10GBASE-R 1
+ 0x4 40G_R 40GBASE-R 4
+ Other -- Reserved -
+ \</pre\>
+
+ NUM_PCS_LANES specifies the number of PCS lanes that are valid for
+ each type. Each valid PCS lane is mapped to a physical SerDes lane
+ based on the programming of [LANE_TO_SDS].
+
+ This field must be programmed to its final value before [ENABLE] is set, and must not
+ be changed when [ENABLE] = 1. */
+ uint64_t lane_to_sds : 8; /**< [ 7: 0](R/W) PCS lane-to-SerDes mapping.
+ This is an array of 2-bit values that map each logical PCS lane to a
+ physical SerDes lane, as follows:
+
+ \<pre\>
+ Bits Description Reset value
+ ------------------------------------------
+ \<7:6\> PCS Lane 3 SerDes ID 0x3
+ \<5:4\> PCS Lane 2 SerDes ID 0x2
+ \<3:2\> PCS Lane 1 SerDes ID 0x1
+ \<1:0\> PCS Lane 0 SerDes ID 0x0
+ \</pre\>
+
+ PCS lanes 0 through NUM_PCS_LANES-1 are valid, where NUM_PCS_LANES is a function of the
+ logical MAC/PCS type (see [LMAC_TYPE]). For example, when [LMAC_TYPE] = SGMII,
+ then NUM_PCS_LANES = 1, PCS lane 0 is valid and the associated physical SerDes lanes
+ are selected by bits \<1:0\>.
+
+ For 40GBASE-R ([LMAC_TYPE] = 40G_R), all four PCS lanes are valid, and the PCS lane IDs
+ determine the block distribution order and associated alignment markers on the transmit
+ side. This is not necessarily the order in which PCS lanes receive data because 802.3
+ allows multilane BASE-R receive lanes to be reordered. When a lane (called service
+ interface in 802.3ba-2010) has achieved alignment marker lock on the receive side (i.e.
+ the associated BGX()_SPU()_BR_ALGN_STATUS[MARKER_LOCK] = 1), then the actual
+ detected RX PCS lane number is recorded in the corresponding
+ BGX()_SPU()_BR_LANE_MAP[LNx_MAPPING].
+
+ This field must be programmed to its final value before [ENABLE] is set, and must not
+ be changed when [ENABLE] = 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_to_sds : 8; /**< [ 7: 0](R/W) PCS lane-to-SerDes mapping.
+ This is an array of 2-bit values that map each logical PCS lane to a
+ physical SerDes lane, as follows:
+
+ \<pre\>
+ Bits Description Reset value
+ ------------------------------------------
+ \<7:6\> PCS Lane 3 SerDes ID 0x3
+ \<5:4\> PCS Lane 2 SerDes ID 0x2
+ \<3:2\> PCS Lane 1 SerDes ID 0x1
+ \<1:0\> PCS Lane 0 SerDes ID 0x0
+ \</pre\>
+
+ PCS lanes 0 through NUM_PCS_LANES-1 are valid, where NUM_PCS_LANES is a function of the
+ logical MAC/PCS type (see [LMAC_TYPE]). For example, when [LMAC_TYPE] = SGMII,
+ then NUM_PCS_LANES = 1, PCS lane 0 is valid and the associated physical SerDes lanes
+ are selected by bits \<1:0\>.
+
+ For 40GBASE-R ([LMAC_TYPE] = 40G_R), all four PCS lanes are valid, and the PCS lane IDs
+ determine the block distribution order and associated alignment markers on the transmit
+ side. This is not necessarily the order in which PCS lanes receive data because 802.3
+ allows multilane BASE-R receive lanes to be reordered. When a lane (called service
+ interface in 802.3ba-2010) has achieved alignment marker lock on the receive side (i.e.
+ the associated BGX()_SPU()_BR_ALGN_STATUS[MARKER_LOCK] = 1), then the actual
+ detected RX PCS lane number is recorded in the corresponding
+ BGX()_SPU()_BR_LANE_MAP[LNx_MAPPING].
+
+ This field must be programmed to its final value before [ENABLE] is set, and must not
+ be changed when [ENABLE] = 1. */
+ uint64_t lmac_type : 3; /**< [ 10: 8](R/W) Logical MAC/PCS/prt type:
+
+ \<pre\>
+ LMAC_TYPE Name Description NUM_PCS_LANES
+ ----------------------------------------------------------
+ 0x0 SGMII SGMII/1000BASE-X 1
+ 0x1 XAUI 10GBASE-X/XAUI or DXAUI 4
+ 0x2 RXAUI Reduced XAUI 2
+ 0x3 10G_R 10GBASE-R 1
+ 0x4 40G_R 40GBASE-R 4
+ Other -- Reserved -
+ \</pre\>
+
+ NUM_PCS_LANES specifies the number of PCS lanes that are valid for
+ each type. Each valid PCS lane is mapped to a physical SerDes lane
+ based on the programming of [LANE_TO_SDS].
+
+ This field must be programmed to its final value before [ENABLE] is set, and must not
+ be changed when [ENABLE] = 1. */
+ uint64_t mix_en : 1; /**< [ 11: 11](R/W) Must be 0. */
+ uint64_t int_beat_gen : 1; /**< [ 12: 12](R/W) Internal beat generation. This bit is used for debug/test purposes and should be clear
+ during normal operation. When set, the LMAC's PCS layer ignores RXVALID and
+ TXREADY/TXCREDIT from the associated SerDes lanes, internally generates fake (idle)
+ RXVALID and TXCREDIT pulses, and suppresses transmission to the SerDes. */
+ uint64_t data_pkt_tx_en : 1; /**< [ 13: 13](R/W) Data packet transmit enable. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 1, the transmission
+ of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_TX_EN] = 0, the MAC
+ layer
+ suppresses the transmission of new data and packets for the LMAC. */
+ uint64_t data_pkt_rx_en : 1; /**< [ 14: 14](R/W) Data packet receive enable. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 1, the reception of
+ data
+ packets is enabled in the MAC layer. When [ENABLE] = 1 and [DATA_PKT_RX_EN] = 0, the MAC
+ layer
+ drops received data and flow-control packets. */
+ uint64_t enable : 1; /**< [ 15: 15](R/W) Logical MAC/PCS enable. This is the master enable for the LMAC. When clear, all the
+ dedicated BGX context state for the LMAC (state machines, FIFOs, counters, etc.) is reset,
+ and LMAC access to shared BGX resources (SMU/SPU data path, SerDes lanes) is disabled.
+
+ When set, LMAC operation is enabled, including link bring-up, synchronization, and
+ transmit/receive of idles and fault sequences. Note that configuration registers for an
+ LMAC are not reset when this bit is clear, allowing software to program them before
+ setting this bit to enable the LMAC. This bit together with [LMAC_TYPE] is also used to
+ enable the clocking to the GMP and/or blocks of the Super path (SMU and SPU). CMR clocking
+ is enabled when any of the paths are enabled. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_cmrx_config_s cn83xx; */
+};
+typedef union bdk_bgxx_cmrx_config bdk_bgxx_cmrx_config_t;
+
+static inline uint64_t BDK_BGXX_CMRX_CONFIG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_CONFIG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000000ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000000ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000000ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_CONFIG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_CONFIG(a,b) bdk_bgxx_cmrx_config_t
+#define bustype_BDK_BGXX_CMRX_CONFIG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_CONFIG(a,b) "BGXX_CMRX_CONFIG"
+#define device_bar_BDK_BGXX_CMRX_CONFIG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_CONFIG(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_CONFIG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_int
+ *
+ * BGX CMR Interrupt Register
+ */
+union bdk_bgxx_cmrx_int
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) TX channel out-of-range from NIC interface.
+ Reported on this LMAC for ids in the range of LMAC_ID+4, LMAC_ID+8 and LMAC_ID+12.
+ Reported regardless of LMAC enable or P2X_SELECT association for this LMAC. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reserved. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) RX overflow. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) RX PAUSE packet was dropped due to full RXB FIFO or during partner reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) RX PAUSE packet was dropped due to full RXB FIFO or during partner reset. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) RX overflow. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reserved. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) TX channel out-of-range from NIC interface.
+ Reported on this LMAC for ids in the range of LMAC_ID+4, LMAC_ID+8 and LMAC_ID+12.
+ Reported regardless of LMAC enable or P2X_SELECT association for this LMAC. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmrx_int_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) TX channel out-of-range from NIC interface. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reserved. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) RX overflow. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) RX PAUSE packet was dropped due to full RXB FIFO or during partner reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) RX PAUSE packet was dropped due to full RXB FIFO or during partner reset. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) RX overflow. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reserved. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) TX channel out-of-range from NIC interface. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmrx_int_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) TX channel out-of-range from TNS/NIC interface. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) RX overflow. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) RX PAUSE packet was dropped due to full RXB FIFO or during partner reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) RX PAUSE packet was dropped due to full RXB FIFO or during partner reset. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) RX overflow. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) TX channel out-of-range from TNS/NIC interface. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmrx_int_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) TX channel out-of-range from NIC interface.
+ Reported on this LMAC for ids in the range of LMAC_ID+4, LMAC_ID+8 and LMAC_ID+12.
+ Reported regardless of LMAC enable or P2X_SELECT association for this LMAC. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) TX channel out-of-range from PKO interface.
+ Reported on this LMAC for ids in the range of LMAC_ID+4, LMAC_ID+8 and LMAC_ID+12.
+ Reported regardless of LMAC enable or P2X_SELECT association for this LMAC. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) RX overflow. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) RX PAUSE packet was dropped due to full RXB FIFO or during partner reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) RX PAUSE packet was dropped due to full RXB FIFO or during partner reset. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) RX overflow. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) TX channel out-of-range from PKO interface.
+ Reported on this LMAC for ids in the range of LMAC_ID+4, LMAC_ID+8 and LMAC_ID+12.
+ Reported regardless of LMAC enable or P2X_SELECT association for this LMAC. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) TX channel out-of-range from NIC interface.
+ Reported on this LMAC for ids in the range of LMAC_ID+4, LMAC_ID+8 and LMAC_ID+12.
+ Reported regardless of LMAC enable or P2X_SELECT association for this LMAC. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmrx_int bdk_bgxx_cmrx_int_t;
+
+static inline uint64_t BDK_BGXX_CMRX_INT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_INT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000040ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000040ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000040ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_INT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_INT(a,b) bdk_bgxx_cmrx_int_t
+#define bustype_BDK_BGXX_CMRX_INT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_INT(a,b) "BGXX_CMRX_INT"
+#define device_bar_BDK_BGXX_CMRX_INT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_INT(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_INT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_int_ena_w1c
+ *
+ * BGX CMR Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_bgxx_cmrx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmrx_int_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmrx_int_ena_w1c_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmrx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmrx_int_ena_w1c bdk_bgxx_cmrx_int_ena_w1c_t;
+
+static inline uint64_t BDK_BGXX_CMRX_INT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_INT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000050ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000050ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000050ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_INT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_INT_ENA_W1C(a,b) bdk_bgxx_cmrx_int_ena_w1c_t
+#define bustype_BDK_BGXX_CMRX_INT_ENA_W1C(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_INT_ENA_W1C(a,b) "BGXX_CMRX_INT_ENA_W1C"
+#define device_bar_BDK_BGXX_CMRX_INT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_INT_ENA_W1C(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_INT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_int_ena_w1s
+ *
+ * BGX CMR Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_bgxx_cmrx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmrx_int_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmrx_int_ena_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmrx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmrx_int_ena_w1s bdk_bgxx_cmrx_int_ena_w1s_t;
+
+static inline uint64_t BDK_BGXX_CMRX_INT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_INT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000058ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000058ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000058ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_INT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_INT_ENA_W1S(a,b) bdk_bgxx_cmrx_int_ena_w1s_t
+#define bustype_BDK_BGXX_CMRX_INT_ENA_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_INT_ENA_W1S(a,b) "BGXX_CMRX_INT_ENA_W1S"
+#define device_bar_BDK_BGXX_CMRX_INT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_INT_ENA_W1S(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_INT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_int_w1s
+ *
+ * BGX CMR Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_bgxx_cmrx_int_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmrx_int_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmrx_int_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmrx_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_CMR(0..3)_INT[PAUSE_DRP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_drp : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_CMR(0..3)_INT[PAUSE_DRP]. */
+ uint64_t overflw : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_CMR(0..3)_INT[OVERFLW]. */
+ uint64_t pko_nxc : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_CMR(0..3)_INT[PKO_NXC]. */
+ uint64_t nic_nxc : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_CMR(0..3)_INT[NIC_NXC]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmrx_int_w1s bdk_bgxx_cmrx_int_w1s_t;
+
+static inline uint64_t BDK_BGXX_CMRX_INT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_INT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000048ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000048ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000048ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_INT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_INT_W1S(a,b) bdk_bgxx_cmrx_int_w1s_t
+#define bustype_BDK_BGXX_CMRX_INT_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_INT_W1S(a,b) "BGXX_CMRX_INT_W1S"
+#define device_bar_BDK_BGXX_CMRX_INT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_INT_W1S(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_INT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_prt_cbfc_ctl
+ *
+ * BGX CMR LMAC PFC Control Registers
+ * See XOFF definition listed under BGX()_SMU()_CBFC_CTL.
+ */
+union bdk_bgxx_cmrx_prt_cbfc_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_prt_cbfc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t phys_bp : 16; /**< [ 31: 16](R/W) When the hardware is backpressuring any LMACs. (from either DRF or PFC packets or
+ BGX()_CMR()_TX_OVR_BP[TX_CHAN_BP]) and all channels indicated by [PHYS_BP] are
+ backpressured,
+ simulate physical backpressure by deferring all packets on the transmitter.
+ If LMAC_TYPE != SGMII/QSGMII, BGX()_SMU()_CBFC_CTL[RX_EN] or
+ BGX()_SMU()_HG2_CONTROL[HG2RX_EN]
+ additionally need to be set. */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t phys_bp : 16; /**< [ 31: 16](R/W) When the hardware is backpressuring any LMACs. (from either DRF or PFC packets or
+ BGX()_CMR()_TX_OVR_BP[TX_CHAN_BP]) and all channels indicated by [PHYS_BP] are
+ backpressured,
+ simulate physical backpressure by deferring all packets on the transmitter.
+ If LMAC_TYPE != SGMII/QSGMII, BGX()_SMU()_CBFC_CTL[RX_EN] or
+ BGX()_SMU()_HG2_CONTROL[HG2RX_EN]
+ additionally need to be set. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_prt_cbfc_ctl_s cn81xx; */
+ struct bdk_bgxx_cmrx_prt_cbfc_ctl_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t phys_bp : 16; /**< [ 31: 16](R/W) If LMAC_TYPE != SGMII, and BGX()_SMU()_CBFC_CTL[RX_EN] or
+ BGX()_SMU()_HG2_CONTROL[HG2RX_EN] is set and the hardware is backpressuring any LMACs.
+ (from either PFC packets or BGX()_CMR()_TX_OVR_BP[TX_CHAN_BP]) and all
+ channels indicated by [PHYS_BP] are backpressured, simulate physical backpressure
+ by deferring all packets on the transmitter. */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t phys_bp : 16; /**< [ 31: 16](R/W) If LMAC_TYPE != SGMII, and BGX()_SMU()_CBFC_CTL[RX_EN] or
+ BGX()_SMU()_HG2_CONTROL[HG2RX_EN] is set and the hardware is backpressuring any LMACs.
+ (from either PFC packets or BGX()_CMR()_TX_OVR_BP[TX_CHAN_BP]) and all
+ channels indicated by [PHYS_BP] are backpressured, simulate physical backpressure
+ by deferring all packets on the transmitter. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmrx_prt_cbfc_ctl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t phys_bp : 16; /**< [ 31: 16](R/W) When the hardware is backpressuring any LMACs. (from either DFC or PFC packets or
+ BGX()_CMR()_TX_OVR_BP[TX_CHAN_BP]) and all channels indicated by [PHYS_BP] are
+ backpressured,
+ simulate physical backpressure by deferring all packets on the transmitter.
+ (i.e. signal to the mac an assertion of physical backpressure).
+ If LMAC_TYPE != SGMII/QSGMII, BGX()_SMU()_CBFC_CTL[RX_EN] or
+ BGX()_SMU()_HG2_CONTROL[HG2RX_EN]
+ additionally need to be set. */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t phys_bp : 16; /**< [ 31: 16](R/W) When the hardware is backpressuring any LMACs. (from either DFC or PFC packets or
+ BGX()_CMR()_TX_OVR_BP[TX_CHAN_BP]) and all channels indicated by [PHYS_BP] are
+ backpressured,
+ simulate physical backpressure by deferring all packets on the transmitter.
+ (i.e. signal to the mac an assertion of physical backpressure).
+ If LMAC_TYPE != SGMII/QSGMII, BGX()_SMU()_CBFC_CTL[RX_EN] or
+ BGX()_SMU()_HG2_CONTROL[HG2RX_EN]
+ additionally need to be set. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmrx_prt_cbfc_ctl bdk_bgxx_cmrx_prt_cbfc_ctl_t;
+
+static inline uint64_t BDK_BGXX_CMRX_PRT_CBFC_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_PRT_CBFC_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000508ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000508ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000508ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_PRT_CBFC_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_PRT_CBFC_CTL(a,b) bdk_bgxx_cmrx_prt_cbfc_ctl_t
+#define bustype_BDK_BGXX_CMRX_PRT_CBFC_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_PRT_CBFC_CTL(a,b) "BGXX_CMRX_PRT_CBFC_CTL"
+#define device_bar_BDK_BGXX_CMRX_PRT_CBFC_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_PRT_CBFC_CTL(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_PRT_CBFC_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_bp_drop
+ *
+ * BGX Receive Backpressure Drop Register
+ */
+union bdk_bgxx_cmrx_rx_bp_drop
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_bp_drop_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t mark : 7; /**< [ 6: 0](R/W) Number of eight-byte cycles to reserve in the RX FIFO. When the number of free
+ entries in the RX FIFO is less than or equal to [MARK], incoming packet data is
+ dropped. [MARK] additionally indicates the number of entries to reserve in the RX FIFO for
+ closing partially received packets. [MARK] should typically be programmed to its reset
+ value; failure to program correctly can lead to system instability. */
+#else /* Word 0 - Little Endian */
+ uint64_t mark : 7; /**< [ 6: 0](R/W) Number of eight-byte cycles to reserve in the RX FIFO. When the number of free
+ entries in the RX FIFO is less than or equal to [MARK], incoming packet data is
+ dropped. [MARK] additionally indicates the number of entries to reserve in the RX FIFO for
+ closing partially received packets. [MARK] should typically be programmed to its reset
+ value; failure to program correctly can lead to system instability. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_bp_drop_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_bp_drop bdk_bgxx_cmrx_rx_bp_drop_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_BP_DROP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_BP_DROP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000c8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00000c8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000c8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_BP_DROP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_BP_DROP(a,b) bdk_bgxx_cmrx_rx_bp_drop_t
+#define bustype_BDK_BGXX_CMRX_RX_BP_DROP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_BP_DROP(a,b) "BGXX_CMRX_RX_BP_DROP"
+#define device_bar_BDK_BGXX_CMRX_RX_BP_DROP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_BP_DROP(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_BP_DROP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_bp_off
+ *
+ * BGX Receive Backpressure Off Register
+ */
+union bdk_bgxx_cmrx_rx_bp_off
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_bp_off_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t mark : 7; /**< [ 6: 0](R/W) Low watermark (number of eight-byte cycles to deassert backpressure). Level is also used
+ to exit the overflow dropping state. */
+#else /* Word 0 - Little Endian */
+ uint64_t mark : 7; /**< [ 6: 0](R/W) Low watermark (number of eight-byte cycles to deassert backpressure). Level is also used
+ to exit the overflow dropping state. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_bp_off_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_bp_off bdk_bgxx_cmrx_rx_bp_off_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_BP_OFF(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_BP_OFF(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000d8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00000d8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000d8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_BP_OFF", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_BP_OFF(a,b) bdk_bgxx_cmrx_rx_bp_off_t
+#define bustype_BDK_BGXX_CMRX_RX_BP_OFF(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_BP_OFF(a,b) "BGXX_CMRX_RX_BP_OFF"
+#define device_bar_BDK_BGXX_CMRX_RX_BP_OFF(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_BP_OFF(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_BP_OFF(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_bp_on
+ *
+ * BGX Receive Backpressure On Register
+ */
+union bdk_bgxx_cmrx_rx_bp_on
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_bp_on_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t mark : 12; /**< [ 11: 0](R/W) High watermark. Buffer depth in multiple of 16-bytes, at which BGX will
+ assert backpressure for each individual LMAC. Must satisfy:
+
+ BGX()_CMR()_RX_BP_OFF[MARK] \<= BGX()_CMR()_RX_BP_ON[MARK] \<
+ (FIFO_SIZE - BGX()_CMR()_RX_BP_DROP[MARK]).
+
+ A value of 0x0 immediately asserts backpressure.
+
+ The recommended value is 1/4th the size of the per-LMAC RX FIFO_SIZE as
+ determined by BGX()_CMR_RX_LMACS[LMACS]. For example in SGMII mode with
+ four LMACs of type SGMII, where BGX()_CMR_RX_LMACS[LMACS]=0x4, there is
+ 16 KB of buffering. The recommended 1/4th size of that 16 KB is 4 KB, which
+ in units of 16 bytes gives [MARK] = 0x100 (the reset value). */
+#else /* Word 0 - Little Endian */
+ uint64_t mark : 12; /**< [ 11: 0](R/W) High watermark. Buffer depth in multiple of 16-bytes, at which BGX will
+ assert backpressure for each individual LMAC. Must satisfy:
+
+ BGX()_CMR()_RX_BP_OFF[MARK] \<= BGX()_CMR()_RX_BP_ON[MARK] \<
+ (FIFO_SIZE - BGX()_CMR()_RX_BP_DROP[MARK]).
+
+ A value of 0x0 immediately asserts backpressure.
+
+ The recommended value is 1/4th the size of the per-LMAC RX FIFO_SIZE as
+ determined by BGX()_CMR_RX_LMACS[LMACS]. For example in SGMII mode with
+ four LMACs of type SGMII, where BGX()_CMR_RX_LMACS[LMACS]=0x4, there is
+ 16 KB of buffering. The recommended 1/4th size of that 16 KB is 4 KB, which
+ in units of 16 bytes gives [MARK] = 0x100 (the reset value). */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_bp_on_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_bp_on bdk_bgxx_cmrx_rx_bp_on_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_BP_ON(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_BP_ON(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000d0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00000d0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000d0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_BP_ON", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_BP_ON(a,b) bdk_bgxx_cmrx_rx_bp_on_t
+#define bustype_BDK_BGXX_CMRX_RX_BP_ON(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_BP_ON(a,b) "BGXX_CMRX_RX_BP_ON"
+#define device_bar_BDK_BGXX_CMRX_RX_BP_ON(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_BP_ON(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_BP_ON(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_bp_status
+ *
+ * BGX CMR Receive Backpressure Status Registers
+ */
+union bdk_bgxx_cmrx_rx_bp_status
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_bp_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t bp : 1; /**< [ 0: 0](RO/H) Per-LMAC backpressure status.
+ 0 = LMAC is not backpressured.
+ 1 = LMAC is backpressured. */
+#else /* Word 0 - Little Endian */
+ uint64_t bp : 1; /**< [ 0: 0](RO/H) Per-LMAC backpressure status.
+ 0 = LMAC is not backpressured.
+ 1 = LMAC is backpressured. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_bp_status_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_bp_status bdk_bgxx_cmrx_rx_bp_status_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_BP_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_BP_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000f0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00000f0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000f0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_BP_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_BP_STATUS(a,b) bdk_bgxx_cmrx_rx_bp_status_t
+#define bustype_BDK_BGXX_CMRX_RX_BP_STATUS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_BP_STATUS(a,b) "BGXX_CMRX_RX_BP_STATUS"
+#define device_bar_BDK_BGXX_CMRX_RX_BP_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_BP_STATUS(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_BP_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_dmac_ctl
+ *
+ * BGX CMR Receive DMAC Address-Control Register
+ * Internal:
+ * "* ALGORITHM
+ * Here is some pseudo code that represents the address filter behavior.
+ * dmac_addr_filter(uint8 prt, uint48 dmac) {
+ * for (lmac=0, lmac\<4, lmac++) {
+ * if (is_bcst(dmac)) // broadcast accept
+ * return (BGX()_CMR({lmac})_RX_DMAC_CTL[BCST_ACCEPT] ? ACCEPT : REJECT);
+ * if (is_mcst(dmac) && BGX()_CMR({lmac})_RX_DMAC_CTL[MCST_MODE] == 0) // multicast reject
+ * return REJECT;
+ * if (is_mcst(dmac) && BGX()_CMR({lmac})_RX_DMAC_CTL[MCST_MODE] == 1) // multicast accept
+ * return ACCEPT;
+ * else // DMAC CAM filter
+ * cam_hit = 0;
+ * for (i=0; i\<32; i++) {
+ * cam = BGX()_CMR_RX_DMAC({i})_CAM;
+ * if (cam[EN] && cam[ID] == {lmac} && cam[ADR] == dmac) {
+ * cam_hit = 1;
+ * break;
+ * }
+ * }
+ * if (cam_hit) {
+ * return (BGX()_CMR({lmac})_RX_DMAC_CTL[CAM_ACCEPT] ? ACCEPT : REJECT);
+ * else
+ * return (BGX()_CMR({lmac})_RX_DMAC_CTL[CAM_ACCEPT] ? REJECT : ACCEPT);
+ * }
+ * }"
+ */
+union bdk_bgxx_cmrx_rx_dmac_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_dmac_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t cam_accept : 1; /**< [ 3: 3](R/W) Allow or deny DMAC address filter.
+ 0 = Reject the packet on DMAC CAM address match.
+ 1 = Accept the packet on DMAC CAM address match. */
+ uint64_t mcst_mode : 2; /**< [ 2: 1](R/W) Multicast mode.
+ 0x0 = Force reject all multicast packets.
+ 0x1 = Force accept all multicast packets.
+ 0x2 = Use the address filter CAM.
+ 0x3 = Reserved. */
+ uint64_t bcst_accept : 1; /**< [ 0: 0](R/W) Allow or deny broadcast packets.
+ 0 = Reject all broadcast packets.
+ 1 = Accept all broadcast packets. */
+#else /* Word 0 - Little Endian */
+ uint64_t bcst_accept : 1; /**< [ 0: 0](R/W) Allow or deny broadcast packets.
+ 0 = Reject all broadcast packets.
+ 1 = Accept all broadcast packets. */
+ uint64_t mcst_mode : 2; /**< [ 2: 1](R/W) Multicast mode.
+ 0x0 = Force reject all multicast packets.
+ 0x1 = Force accept all multicast packets.
+ 0x2 = Use the address filter CAM.
+ 0x3 = Reserved. */
+ uint64_t cam_accept : 1; /**< [ 3: 3](R/W) Allow or deny DMAC address filter.
+ 0 = Reject the packet on DMAC CAM address match.
+ 1 = Accept the packet on DMAC CAM address match. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_dmac_ctl_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_dmac_ctl bdk_bgxx_cmrx_rx_dmac_ctl_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_DMAC_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_DMAC_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000e8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00000e8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000e8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_DMAC_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_DMAC_CTL(a,b) bdk_bgxx_cmrx_rx_dmac_ctl_t
+#define bustype_BDK_BGXX_CMRX_RX_DMAC_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_DMAC_CTL(a,b) "BGXX_CMRX_RX_DMAC_CTL"
+#define device_bar_BDK_BGXX_CMRX_RX_DMAC_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_DMAC_CTL(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_DMAC_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_fifo_len
+ *
+ * BGX CMR Receive FIFO Length Registers
+ */
+union bdk_bgxx_cmrx_rx_fifo_len
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_fifo_len_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t busy : 1; /**< [ 13: 13](RO/H) Indicates if GMP and CMR are busy processing a packet. Used when bringing an LMAC
+ down since in low bandwidth cases, as the FIFO length may often appear to be 0x0. */
+ uint64_t fifo_len : 13; /**< [ 12: 0](RO/H) Per-LMAC FIFO length. Useful for determining if FIFO is empty when bringing an LMAC down. */
+#else /* Word 0 - Little Endian */
+ uint64_t fifo_len : 13; /**< [ 12: 0](RO/H) Per-LMAC FIFO length. Useful for determining if FIFO is empty when bringing an LMAC down. */
+ uint64_t busy : 1; /**< [ 13: 13](RO/H) Indicates if GMP and CMR are busy processing a packet. Used when bringing an LMAC
+ down since in low bandwidth cases, as the FIFO length may often appear to be 0x0. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_fifo_len_s cn81xx; */
+ struct bdk_bgxx_cmrx_rx_fifo_len_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t fifo_len : 13; /**< [ 12: 0](RO/H) Per-LMAC FIFO length. Useful for determining if FIFO is empty when bringing an LMAC down. */
+#else /* Word 0 - Little Endian */
+ uint64_t fifo_len : 13; /**< [ 12: 0](RO/H) Per-LMAC FIFO length. Useful for determining if FIFO is empty when bringing an LMAC down. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_cmrx_rx_fifo_len_s cn83xx; */
+};
+typedef union bdk_bgxx_cmrx_rx_fifo_len bdk_bgxx_cmrx_rx_fifo_len_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_FIFO_LEN(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_FIFO_LEN(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000108ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000108ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000108ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_FIFO_LEN", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_FIFO_LEN(a,b) bdk_bgxx_cmrx_rx_fifo_len_t
+#define bustype_BDK_BGXX_CMRX_RX_FIFO_LEN(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_FIFO_LEN(a,b) "BGXX_CMRX_RX_FIFO_LEN"
+#define device_bar_BDK_BGXX_CMRX_RX_FIFO_LEN(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_FIFO_LEN(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_FIFO_LEN(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_id_map
+ *
+ * BGX CMR Receive ID Map Register
+ * These registers set the RX LMAC ID mapping for X2P/NIC.
+ */
+union bdk_bgxx_cmrx_rx_id_map
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_id_map_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t rid : 7; /**< [ 14: 8](R/W) Reserved.
+ Internal:
+ Defeatured. Reassembly ID for Octeon PKI; not used in CN8XXX.
+ Reassembly ID map for this LMAC. A shared pool of 96 reassembly IDs (RIDs) exists for all
+ MACs.
+
+ The RID for this LMAC must be constrained such that it does not overlap with any other MAC
+ in the system. Its reset value has been chosen such that this condition is satisfied:
+
+ _ RID reset value = 4*(BGX_ID + 1) + LMAC_ID
+
+ Changes to RID must only occur when the LMAC is quiescent (i.e. the LMAC receive interface
+ is down and the RX FIFO is empty). */
+ uint64_t unused : 2; /**< [ 7: 6](RAZ) Reserved. */
+ uint64_t pknd : 6; /**< [ 5: 0](R/W) Port kind for this LMAC. */
+#else /* Word 0 - Little Endian */
+ uint64_t pknd : 6; /**< [ 5: 0](R/W) Port kind for this LMAC. */
+ uint64_t unused : 2; /**< [ 7: 6](RAZ) Reserved. */
+ uint64_t rid : 7; /**< [ 14: 8](R/W) Reserved.
+ Internal:
+ Defeatured. Reassembly ID for Octeon PKI; not used in CN8XXX.
+ Reassembly ID map for this LMAC. A shared pool of 96 reassembly IDs (RIDs) exists for all
+ MACs.
+
+ The RID for this LMAC must be constrained such that it does not overlap with any other MAC
+ in the system. Its reset value has been chosen such that this condition is satisfied:
+
+ _ RID reset value = 4*(BGX_ID + 1) + LMAC_ID
+
+ Changes to RID must only occur when the LMAC is quiescent (i.e. the LMAC receive interface
+ is down and the RX FIFO is empty). */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_id_map_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_id_map bdk_bgxx_cmrx_rx_id_map_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_ID_MAP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_ID_MAP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000060ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000060ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000060ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_ID_MAP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_ID_MAP(a,b) bdk_bgxx_cmrx_rx_id_map_t
+#define bustype_BDK_BGXX_CMRX_RX_ID_MAP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_ID_MAP(a,b) "BGXX_CMRX_RX_ID_MAP"
+#define device_bar_BDK_BGXX_CMRX_RX_ID_MAP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_ID_MAP(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_ID_MAP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_logl_xoff
+ *
+ * BGX CMR Receive Logical XOFF Registers
+ */
+union bdk_bgxx_cmrx_rx_logl_xoff
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_logl_xoff_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t xoff : 16; /**< [ 15: 0](R/W1S/H) Together with BGX()_CMR()_RX_LOGL_XON, defines type of channel backpressure to
+ apply to the MAC. In the case of SMU, Do not write when HiGig2 is
+ enabled. Writing 1 sets the same physical register as that which is cleared by
+ BGX()_CMR()_RX_LOGL_XON[XON]. An XOFF value of 1 will cause a backpressure on
+ the MAC. */
+#else /* Word 0 - Little Endian */
+ uint64_t xoff : 16; /**< [ 15: 0](R/W1S/H) Together with BGX()_CMR()_RX_LOGL_XON, defines type of channel backpressure to
+ apply to the MAC. In the case of SMU, Do not write when HiGig2 is
+ enabled. Writing 1 sets the same physical register as that which is cleared by
+ BGX()_CMR()_RX_LOGL_XON[XON]. An XOFF value of 1 will cause a backpressure on
+ the MAC. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_logl_xoff_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_logl_xoff bdk_bgxx_cmrx_rx_logl_xoff_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_LOGL_XOFF(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_LOGL_XOFF(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000f8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00000f8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000f8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_LOGL_XOFF", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_LOGL_XOFF(a,b) bdk_bgxx_cmrx_rx_logl_xoff_t
+#define bustype_BDK_BGXX_CMRX_RX_LOGL_XOFF(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_LOGL_XOFF(a,b) "BGXX_CMRX_RX_LOGL_XOFF"
+#define device_bar_BDK_BGXX_CMRX_RX_LOGL_XOFF(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_LOGL_XOFF(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_LOGL_XOFF(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_logl_xon
+ *
+ * BGX CMR Receive Logical XON Registers
+ */
+union bdk_bgxx_cmrx_rx_logl_xon
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_logl_xon_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t xon : 16; /**< [ 15: 0](R/W1C/H) Together with BGX()_CMR()_RX_LOGL_XOFF, defines type of channel backpressure to
+ apply. Do not write when HiGig2 is enabled. Writing 1 clears the same physical register as
+ that which is set by XOFF. An XON value of 1 means only NIC channel BP can cause a
+ backpressure on the MAC. */
+#else /* Word 0 - Little Endian */
+ uint64_t xon : 16; /**< [ 15: 0](R/W1C/H) Together with BGX()_CMR()_RX_LOGL_XOFF, defines type of channel backpressure to
+ apply. Do not write when HiGig2 is enabled. Writing 1 clears the same physical register as
+ that which is set by XOFF. An XON value of 1 means only NIC channel BP can cause a
+ backpressure on the MAC. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_logl_xon_s cn81xx; */
+ struct bdk_bgxx_cmrx_rx_logl_xon_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t xon : 16; /**< [ 15: 0](R/W1C/H) Together with BGX()_CMR()_RX_LOGL_XOFF, defines type of channel backpressure to
+ apply. Do not write when HiGig2 is enabled. Writing 1 clears the same physical register as
+ that which is set by XOFF. An XON value of 1 means only NIC channel BP can cause a
+ backpressure on SMU. */
+#else /* Word 0 - Little Endian */
+ uint64_t xon : 16; /**< [ 15: 0](R/W1C/H) Together with BGX()_CMR()_RX_LOGL_XOFF, defines type of channel backpressure to
+ apply. Do not write when HiGig2 is enabled. Writing 1 clears the same physical register as
+ that which is set by XOFF. An XON value of 1 means only NIC channel BP can cause a
+ backpressure on SMU. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_cmrx_rx_logl_xon_s cn83xx; */
+};
+typedef union bdk_bgxx_cmrx_rx_logl_xon bdk_bgxx_cmrx_rx_logl_xon_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_LOGL_XON(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_LOGL_XON(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000100ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000100ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000100ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_LOGL_XON", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_LOGL_XON(a,b) bdk_bgxx_cmrx_rx_logl_xon_t
+#define bustype_BDK_BGXX_CMRX_RX_LOGL_XON(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_LOGL_XON(a,b) "BGXX_CMRX_RX_LOGL_XON"
+#define device_bar_BDK_BGXX_CMRX_RX_LOGL_XON(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_LOGL_XON(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_LOGL_XON(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_pause_drop_time
+ *
+ * BGX CMR Receive Pause Drop-Time Register
+ */
+union bdk_bgxx_cmrx_rx_pause_drop_time
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_pause_drop_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t pause_time : 16; /**< [ 15: 0](R/W1C/H) Time extracted from the dropped PAUSE packet dropped due to RXB FIFO full or during partner reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t pause_time : 16; /**< [ 15: 0](R/W1C/H) Time extracted from the dropped PAUSE packet dropped due to RXB FIFO full or during partner reset. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_pause_drop_time_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_pause_drop_time bdk_bgxx_cmrx_rx_pause_drop_time_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_PAUSE_DROP_TIME(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_PAUSE_DROP_TIME(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000068ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000068ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000068ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_PAUSE_DROP_TIME", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_PAUSE_DROP_TIME(a,b) bdk_bgxx_cmrx_rx_pause_drop_time_t
+#define bustype_BDK_BGXX_CMRX_RX_PAUSE_DROP_TIME(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_PAUSE_DROP_TIME(a,b) "BGXX_CMRX_RX_PAUSE_DROP_TIME"
+#define device_bar_BDK_BGXX_CMRX_RX_PAUSE_DROP_TIME(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_PAUSE_DROP_TIME(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_PAUSE_DROP_TIME(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_stat0
+ *
+ * BGX Receive Status Register 0
+ * These registers provide a count of received packets that meet the following conditions:
+ * * are not recognized as PAUSE packets.
+ * * are not dropped due DMAC filtering.
+ * * are not dropped due FIFO full status.
+ * * do not have any other OPCODE (FCS, Length, etc).
+ */
+union bdk_bgxx_cmrx_rx_stat0
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_stat0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of received packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of received packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_stat0_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_stat0 bdk_bgxx_cmrx_rx_stat0_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000070ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000070ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000070ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_STAT0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_STAT0(a,b) bdk_bgxx_cmrx_rx_stat0_t
+#define bustype_BDK_BGXX_CMRX_RX_STAT0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_STAT0(a,b) "BGXX_CMRX_RX_STAT0"
+#define device_bar_BDK_BGXX_CMRX_RX_STAT0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_STAT0(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_STAT0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_stat1
+ *
+ * BGX Receive Status Register 1
+ * These registers provide a count of octets of received packets.
+ */
+union bdk_bgxx_cmrx_rx_stat1
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_stat1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of received packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of received packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_stat1_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_stat1 bdk_bgxx_cmrx_rx_stat1_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000078ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000078ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000078ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_STAT1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_STAT1(a,b) bdk_bgxx_cmrx_rx_stat1_t
+#define bustype_BDK_BGXX_CMRX_RX_STAT1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_STAT1(a,b) "BGXX_CMRX_RX_STAT1"
+#define device_bar_BDK_BGXX_CMRX_RX_STAT1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_STAT1(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_STAT1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_stat2
+ *
+ * BGX Receive Status Register 2
+ * These registers provide a count of all packets received that were recognized as flow-control
+ * or PAUSE packets. PAUSE packets with any kind of error are counted in
+ * BGX()_CMR()_RX_STAT8 (error stats register). Pause packets can be optionally dropped
+ * or forwarded based on BGX()_SMU()_RX_FRM_CTL[CTL_DRP]. This count increments
+ * regardless of whether the packet is dropped. PAUSE packets are never counted in
+ * BGX()_CMR()_RX_STAT0.
+ */
+union bdk_bgxx_cmrx_rx_stat2
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_stat2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of received PAUSE packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of received PAUSE packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_stat2_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_stat2 bdk_bgxx_cmrx_rx_stat2_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000080ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000080ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000080ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_STAT2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_STAT2(a,b) bdk_bgxx_cmrx_rx_stat2_t
+#define bustype_BDK_BGXX_CMRX_RX_STAT2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_STAT2(a,b) "BGXX_CMRX_RX_STAT2"
+#define device_bar_BDK_BGXX_CMRX_RX_STAT2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_STAT2(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_STAT2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_stat3
+ *
+ * BGX Receive Status Register 3
+ * These registers provide a count of octets of received PAUSE and control packets.
+ */
+union bdk_bgxx_cmrx_rx_stat3
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_stat3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of received PAUSE packets. [CNT] will wrap and is cleared if LMAC is disabled
+ with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of received PAUSE packets. [CNT] will wrap and is cleared if LMAC is disabled
+ with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_stat3_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_stat3 bdk_bgxx_cmrx_rx_stat3_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT3(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT3(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000088ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000088ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000088ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_STAT3", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_STAT3(a,b) bdk_bgxx_cmrx_rx_stat3_t
+#define bustype_BDK_BGXX_CMRX_RX_STAT3(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_STAT3(a,b) "BGXX_CMRX_RX_STAT3"
+#define device_bar_BDK_BGXX_CMRX_RX_STAT3(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_STAT3(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_STAT3(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_stat4
+ *
+ * BGX Receive Status Register 4
+ * These registers provide a count of all packets received that were dropped by the DMAC filter.
+ * Packets that match the DMAC are dropped and counted here regardless of whether they were ERR
+ * packets, but does not include those reported in BGX()_CMR()_RX_STAT6. These packets
+ * are never counted in BGX()_CMR()_RX_STAT0. Eight-byte packets as the result of
+ * truncation or other means are not dropped by CNXXXX and will never appear in this count.
+ */
+union bdk_bgxx_cmrx_rx_stat4
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_stat4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of filtered DMAC packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of filtered DMAC packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_stat4_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_stat4 bdk_bgxx_cmrx_rx_stat4_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT4(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT4(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000090ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000090ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000090ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_STAT4", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_STAT4(a,b) bdk_bgxx_cmrx_rx_stat4_t
+#define bustype_BDK_BGXX_CMRX_RX_STAT4(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_STAT4(a,b) "BGXX_CMRX_RX_STAT4"
+#define device_bar_BDK_BGXX_CMRX_RX_STAT4(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_STAT4(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_STAT4(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_stat5
+ *
+ * BGX Receive Status Register 5
+ * These registers provide a count of octets of filtered DMAC packets.
+ */
+union bdk_bgxx_cmrx_rx_stat5
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_stat5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of filtered DMAC packets. [CNT] will wrap and is cleared if LMAC is disabled
+ with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of filtered DMAC packets. [CNT] will wrap and is cleared if LMAC is disabled
+ with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_stat5_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_stat5 bdk_bgxx_cmrx_rx_stat5_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT5(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT5(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000098ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000098ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000098ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_STAT5", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_STAT5(a,b) bdk_bgxx_cmrx_rx_stat5_t
+#define bustype_BDK_BGXX_CMRX_RX_STAT5(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_STAT5(a,b) "BGXX_CMRX_RX_STAT5"
+#define device_bar_BDK_BGXX_CMRX_RX_STAT5(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_STAT5(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_STAT5(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_stat6
+ *
+ * BGX Receive Status Register 6
+ * These registers provide a count of all packets received that were dropped due to a full
+ * receive FIFO. They do not count any packet that is truncated at the point of overflow and sent
+ * on to the NIC. These registers count all entire packets dropped by the FIFO for a given LMAC
+ * regardless of DMAC or PAUSE type.
+ */
+union bdk_bgxx_cmrx_rx_stat6
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_stat6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of dropped packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of dropped packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_stat6_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_stat6 bdk_bgxx_cmrx_rx_stat6_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT6(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT6(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000a0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00000a0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000a0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_STAT6", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_STAT6(a,b) bdk_bgxx_cmrx_rx_stat6_t
+#define bustype_BDK_BGXX_CMRX_RX_STAT6(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_STAT6(a,b) "BGXX_CMRX_RX_STAT6"
+#define device_bar_BDK_BGXX_CMRX_RX_STAT6(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_STAT6(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_STAT6(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_stat7
+ *
+ * BGX Receive Status Register 7
+ * These registers provide a count of octets of received packets that were dropped due to a full
+ * receive FIFO.
+ */
+union bdk_bgxx_cmrx_rx_stat7
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_stat7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of dropped packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of dropped packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_stat7_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_stat7 bdk_bgxx_cmrx_rx_stat7_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT7(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT7(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000a8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00000a8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000a8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_STAT7", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_STAT7(a,b) bdk_bgxx_cmrx_rx_stat7_t
+#define bustype_BDK_BGXX_CMRX_RX_STAT7(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_STAT7(a,b) "BGXX_CMRX_RX_STAT7"
+#define device_bar_BDK_BGXX_CMRX_RX_STAT7(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_STAT7(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_STAT7(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_stat8
+ *
+ * BGX Receive Status Register 8
+ * These registers provide a count of all packets received with some error that were not dropped
+ * either due to the DMAC filter or lack of room in the receive FIFO.
+ * This does not include packets which were counted in
+ * BGX()_CMR()_RX_STAT2, BGX()_CMR()_RX_STAT4 nor
+ * BGX()_CMR()_RX_STAT6.
+ *
+ * Which statistics are updated on control packet errors and drops are shown below:
+ *
+ * \<pre\>
+ * if dropped {
+ * if !errored STAT8
+ * if overflow STAT6
+ * else if dmac drop STAT4
+ * else if filter drop STAT2
+ * } else {
+ * if errored STAT2
+ * else STAT8
+ * }
+ * \</pre\>
+ */
+union bdk_bgxx_cmrx_rx_stat8
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_stat8_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of error packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of error packets. [CNT] will wrap and is cleared if LMAC is disabled with
+ BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_stat8_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_stat8 bdk_bgxx_cmrx_rx_stat8_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT8(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_STAT8(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000b0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00000b0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000b0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_STAT8", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_STAT8(a,b) bdk_bgxx_cmrx_rx_stat8_t
+#define bustype_BDK_BGXX_CMRX_RX_STAT8(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_STAT8(a,b) "BGXX_CMRX_RX_STAT8"
+#define device_bar_BDK_BGXX_CMRX_RX_STAT8(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_STAT8(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_STAT8(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_rx_weight
+ *
+ * BGX CMR Receive-Weight Register
+ */
+union bdk_bgxx_cmrx_rx_weight
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_rx_weight_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t weight : 4; /**< [ 3: 0](R/W) For the weighted round robin algorithm in CMR RXB, weight to assign for this LMAC relative
+ to other LMAC weights. Defaults to round-robin (non-weighted minimum setting of 0x1). A
+ setting of 0x0 effectively takes the LMAC out of eligibility. */
+#else /* Word 0 - Little Endian */
+ uint64_t weight : 4; /**< [ 3: 0](R/W) For the weighted round robin algorithm in CMR RXB, weight to assign for this LMAC relative
+ to other LMAC weights. Defaults to round-robin (non-weighted minimum setting of 0x1). A
+ setting of 0x0 effectively takes the LMAC out of eligibility. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_rx_weight_s cn; */
+};
+typedef union bdk_bgxx_cmrx_rx_weight bdk_bgxx_cmrx_rx_weight_t;
+
+static inline uint64_t BDK_BGXX_CMRX_RX_WEIGHT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_RX_WEIGHT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000e0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00000e0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00000e0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_RX_WEIGHT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_RX_WEIGHT(a,b) bdk_bgxx_cmrx_rx_weight_t
+#define bustype_BDK_BGXX_CMRX_RX_WEIGHT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_RX_WEIGHT(a,b) "BGXX_CMRX_RX_WEIGHT"
+#define device_bar_BDK_BGXX_CMRX_RX_WEIGHT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_RX_WEIGHT(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_RX_WEIGHT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_channel
+ *
+ * BGX CMR Transmit-Channels Registers
+ */
+union bdk_bgxx_cmrx_tx_channel
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_channel_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t msk : 16; /**< [ 31: 16](R/W) Backpressure channel mask. BGX can completely ignore the channel backpressure for channel
+ specified by this field. Any channel in which MSK\<n\> is set never sends backpressure
+ information to NIC. */
+ uint64_t dis : 16; /**< [ 15: 0](R/W) Credit return backpressure disable. BGX stops returning channel credits for any
+ channel that is backpressured. These bits can be used to override that. If
+ [DIS]\<n\> is set, channel credits may flow back regardless of the backpressure
+ for that channel. */
+#else /* Word 0 - Little Endian */
+ uint64_t dis : 16; /**< [ 15: 0](R/W) Credit return backpressure disable. BGX stops returning channel credits for any
+ channel that is backpressured. These bits can be used to override that. If
+ [DIS]\<n\> is set, channel credits may flow back regardless of the backpressure
+ for that channel. */
+ uint64_t msk : 16; /**< [ 31: 16](R/W) Backpressure channel mask. BGX can completely ignore the channel backpressure for channel
+ specified by this field. Any channel in which MSK\<n\> is set never sends backpressure
+ information to NIC. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_channel_s cn81xx; */
+ struct bdk_bgxx_cmrx_tx_channel_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t msk : 16; /**< [ 31: 16](R/W) Backpressure channel mask. BGX can completely ignore the channel backpressure for channel
+ specified by this field. Any channel in which MSK\<n\> is set never sends backpressure
+ information to TNS/NIC. */
+ uint64_t dis : 16; /**< [ 15: 0](R/W) Credit return backpressure disable. BGX stops returning channel credits for any
+ channel that is backpressured. These bits can be used to override that. If
+ [DIS]\<n\> is set, channel credits may flow back regardless of the backpressure
+ for that channel. */
+#else /* Word 0 - Little Endian */
+ uint64_t dis : 16; /**< [ 15: 0](R/W) Credit return backpressure disable. BGX stops returning channel credits for any
+ channel that is backpressured. These bits can be used to override that. If
+ [DIS]\<n\> is set, channel credits may flow back regardless of the backpressure
+ for that channel. */
+ uint64_t msk : 16; /**< [ 31: 16](R/W) Backpressure channel mask. BGX can completely ignore the channel backpressure for channel
+ specified by this field. Any channel in which MSK\<n\> is set never sends backpressure
+ information to TNS/NIC. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmrx_tx_channel_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t msk : 16; /**< [ 31: 16](R/W) Backpressure channel mask. BGX can completely ignore the channel backpressure for channel
+ specified by this field. Any channel in which MSK\<n\> is set never sends backpressure
+ information to PKO. */
+ uint64_t dis : 16; /**< [ 15: 0](R/W) Credit return backpressure disable. BGX stops returning channel credits for any
+ channel that is backpressured. These bits can be used to override that. If
+ [DIS]\<n\> is set, channel credits may flow back regardless of the backpressure
+ for that channel. */
+#else /* Word 0 - Little Endian */
+ uint64_t dis : 16; /**< [ 15: 0](R/W) Credit return backpressure disable. BGX stops returning channel credits for any
+ channel that is backpressured. These bits can be used to override that. If
+ [DIS]\<n\> is set, channel credits may flow back regardless of the backpressure
+ for that channel. */
+ uint64_t msk : 16; /**< [ 31: 16](R/W) Backpressure channel mask. BGX can completely ignore the channel backpressure for channel
+ specified by this field. Any channel in which MSK\<n\> is set never sends backpressure
+ information to PKO. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmrx_tx_channel bdk_bgxx_cmrx_tx_channel_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_CHANNEL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_CHANNEL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000500ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000500ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000500ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_CHANNEL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_CHANNEL(a,b) bdk_bgxx_cmrx_tx_channel_t
+#define bustype_BDK_BGXX_CMRX_TX_CHANNEL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_CHANNEL(a,b) "BGXX_CMRX_TX_CHANNEL"
+#define device_bar_BDK_BGXX_CMRX_TX_CHANNEL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_CHANNEL(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_CHANNEL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_fifo_len
+ *
+ * BGX CMR Transmit FIFO Length Registers
+ */
+union bdk_bgxx_cmrx_tx_fifo_len
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_fifo_len_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t lmac_idle : 1; /**< [ 14: 14](RO/H) Idle signal to identify when all credits and other pipeline buffers are also cleared out
+ and LMAC can be considered IDLE in the BGX CMR TX. */
+ uint64_t fifo_len : 14; /**< [ 13: 0](RO/H) Per-LMAC TXB main FIFO length. Useful for determining if main FIFO is empty when bringing
+ an LMAC down. */
+#else /* Word 0 - Little Endian */
+ uint64_t fifo_len : 14; /**< [ 13: 0](RO/H) Per-LMAC TXB main FIFO length. Useful for determining if main FIFO is empty when bringing
+ an LMAC down. */
+ uint64_t lmac_idle : 1; /**< [ 14: 14](RO/H) Idle signal to identify when all credits and other pipeline buffers are also cleared out
+ and LMAC can be considered IDLE in the BGX CMR TX. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_fifo_len_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_fifo_len bdk_bgxx_cmrx_tx_fifo_len_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_FIFO_LEN(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_FIFO_LEN(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000518ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000518ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000518ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_FIFO_LEN", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_FIFO_LEN(a,b) bdk_bgxx_cmrx_tx_fifo_len_t
+#define bustype_BDK_BGXX_CMRX_TX_FIFO_LEN(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_FIFO_LEN(a,b) "BGXX_CMRX_TX_FIFO_LEN"
+#define device_bar_BDK_BGXX_CMRX_TX_FIFO_LEN(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_FIFO_LEN(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_FIFO_LEN(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_hg2_status
+ *
+ * BGX CMR Transmit HiGig2 Status Registers
+ */
+union bdk_bgxx_cmrx_tx_hg2_status
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_hg2_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t xof : 16; /**< [ 31: 16](RO/H) 16-bit XOF back pressure vector from HiGig2 message packet or from PFC packets. Non-
+ zero only when logical back pressure is active. All bits are 0 when [LGTIM2GO] = 0x0. */
+ uint64_t lgtim2go : 16; /**< [ 15: 0](RO/H) Logical packet flow back pressure time remaining. Initial value set from XOF time field of
+ HiGig2 message packet received or a function of the enabled and current timers for
+ PFC packets. Nonzero only when logical back pressure is active. */
+#else /* Word 0 - Little Endian */
+ uint64_t lgtim2go : 16; /**< [ 15: 0](RO/H) Logical packet flow back pressure time remaining. Initial value set from XOF time field of
+ HiGig2 message packet received or a function of the enabled and current timers for
+ PFC packets. Nonzero only when logical back pressure is active. */
+ uint64_t xof : 16; /**< [ 31: 16](RO/H) 16-bit XOF back pressure vector from HiGig2 message packet or from PFC packets. Non-
+ zero only when logical back pressure is active. All bits are 0 when [LGTIM2GO] = 0x0. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_hg2_status_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_hg2_status bdk_bgxx_cmrx_tx_hg2_status_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_HG2_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_HG2_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000510ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000510ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000510ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_HG2_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_HG2_STATUS(a,b) bdk_bgxx_cmrx_tx_hg2_status_t
+#define bustype_BDK_BGXX_CMRX_TX_HG2_STATUS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_HG2_STATUS(a,b) "BGXX_CMRX_TX_HG2_STATUS"
+#define device_bar_BDK_BGXX_CMRX_TX_HG2_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_HG2_STATUS(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_HG2_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_ovr_bp
+ *
+ * BGX CMR Transmit-Channels Backpressure Override Registers
+ */
+union bdk_bgxx_cmrx_tx_ovr_bp
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_ovr_bp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t tx_chan_bp : 16; /**< [ 15: 0](R/W) Per-channel backpressure status sent to NIC. Also see BGX()_PRT_CBFC_CTL for details on
+ impact to physical backpressure.
+ 0 = Channel is available.
+ 1 = Channel is backpressured. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_chan_bp : 16; /**< [ 15: 0](R/W) Per-channel backpressure status sent to NIC. Also see BGX()_PRT_CBFC_CTL for details on
+ impact to physical backpressure.
+ 0 = Channel is available.
+ 1 = Channel is backpressured. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_ovr_bp_s cn81xx; */
+ struct bdk_bgxx_cmrx_tx_ovr_bp_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t tx_chan_bp : 16; /**< [ 15: 0](R/W) Per-channel backpressure status sent to TNS/NIC.
+ 0 = Channel is available.
+ 1 = Channel is backpressured. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_chan_bp : 16; /**< [ 15: 0](R/W) Per-channel backpressure status sent to TNS/NIC.
+ 0 = Channel is available.
+ 1 = Channel is backpressured. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmrx_tx_ovr_bp_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t tx_chan_bp : 16; /**< [ 15: 0](R/W) Per-channel backpressure status sent to PKO/NIC. Also see BGX()_PRT_CBFC_CTL for
+ details on impact to physical backpressure.
+ 0 = Channel is available.
+ 1 = Channel is backpressured. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_chan_bp : 16; /**< [ 15: 0](R/W) Per-channel backpressure status sent to PKO/NIC. Also see BGX()_PRT_CBFC_CTL for
+ details on impact to physical backpressure.
+ 0 = Channel is available.
+ 1 = Channel is backpressured. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmrx_tx_ovr_bp bdk_bgxx_cmrx_tx_ovr_bp_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_OVR_BP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_OVR_BP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000520ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000520ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000520ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_OVR_BP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_OVR_BP(a,b) bdk_bgxx_cmrx_tx_ovr_bp_t
+#define bustype_BDK_BGXX_CMRX_TX_OVR_BP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_OVR_BP(a,b) "BGXX_CMRX_TX_OVR_BP"
+#define device_bar_BDK_BGXX_CMRX_TX_OVR_BP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_OVR_BP(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_OVR_BP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat0
+ *
+ * BGX CMR Transmit Statistics Registers 0
+ */
+union bdk_bgxx_cmrx_tx_stat0
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t xscol : 48; /**< [ 47: 0](R/W/H) Number of packets dropped (never successfully sent) due to excessive collision. Defined by
+ BGX()_GMP_GMI_TX_COL_ATTEMPT[LIMIT]. Half-duplex mode only and does not account for late
+ collisions.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t xscol : 48; /**< [ 47: 0](R/W/H) Number of packets dropped (never successfully sent) due to excessive collision. Defined by
+ BGX()_GMP_GMI_TX_COL_ATTEMPT[LIMIT]. Half-duplex mode only and does not account for late
+ collisions.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat0_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat0 bdk_bgxx_cmrx_tx_stat0_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000600ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000600ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000600ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT0(a,b) bdk_bgxx_cmrx_tx_stat0_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT0(a,b) "BGXX_CMRX_TX_STAT0"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT0(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat1
+ *
+ * BGX CMR Transmit Statistics Registers 1
+ */
+union bdk_bgxx_cmrx_tx_stat1
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t xsdef : 48; /**< [ 47: 0](R/W/H) A count of the number of times any frame was deferred for an excessive period of time.
+ See maxDeferTime in the IEEE 802.3 specification. Half-duplex mode only and not updated
+ for late collisions.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t xsdef : 48; /**< [ 47: 0](R/W/H) A count of the number of times any frame was deferred for an excessive period of time.
+ See maxDeferTime in the IEEE 802.3 specification. Half-duplex mode only and not updated
+ for late collisions.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat1_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat1 bdk_bgxx_cmrx_tx_stat1_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000608ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000608ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000608ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT1(a,b) bdk_bgxx_cmrx_tx_stat1_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT1(a,b) "BGXX_CMRX_TX_STAT1"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT1(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat10
+ *
+ * BGX CMR Transmit Statistics Registers 10
+ */
+union bdk_bgxx_cmrx_tx_stat10
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat10_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t hist4 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count between 256-511. Packet length is the sum of
+ all data transmitted on the wire for the given packet including packet data, pad bytes,
+ FCS bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t hist4 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count between 256-511. Packet length is the sum of
+ all data transmitted on the wire for the given packet including packet data, pad bytes,
+ FCS bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat10_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat10 bdk_bgxx_cmrx_tx_stat10_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT10(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT10(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000650ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000650ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000650ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT10", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT10(a,b) bdk_bgxx_cmrx_tx_stat10_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT10(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT10(a,b) "BGXX_CMRX_TX_STAT10"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT10(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT10(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT10(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat11
+ *
+ * BGX CMR Transmit Statistics Registers 11
+ */
+union bdk_bgxx_cmrx_tx_stat11
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat11_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t hist5 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count between 512-1023. Packet length is the sum of
+ all data transmitted on the wire for the given packet including packet data, pad bytes,
+ FCS bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t hist5 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count between 512-1023. Packet length is the sum of
+ all data transmitted on the wire for the given packet including packet data, pad bytes,
+ FCS bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat11_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat11 bdk_bgxx_cmrx_tx_stat11_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT11(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT11(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000658ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000658ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000658ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT11", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT11(a,b) bdk_bgxx_cmrx_tx_stat11_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT11(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT11(a,b) "BGXX_CMRX_TX_STAT11"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT11(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT11(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT11(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat12
+ *
+ * BGX CMR Transmit Statistics Registers 12
+ */
+union bdk_bgxx_cmrx_tx_stat12
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t hist6 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count between 1024-1518. Packet length is the sum of
+ all data transmitted on the wire for the given packet including packet data, pad bytes,
+ FCS bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t hist6 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count between 1024-1518. Packet length is the sum of
+ all data transmitted on the wire for the given packet including packet data, pad bytes,
+ FCS bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat12_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat12 bdk_bgxx_cmrx_tx_stat12_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT12(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT12(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000660ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000660ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000660ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT12", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT12(a,b) bdk_bgxx_cmrx_tx_stat12_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT12(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT12(a,b) "BGXX_CMRX_TX_STAT12"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT12(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT12(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT12(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat13
+ *
+ * BGX CMR Transmit Statistics Registers 13
+ */
+union bdk_bgxx_cmrx_tx_stat13
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat13_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t hist7 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count \> 1518. Packet length is the sum of all data
+ transmitted on the wire for the given packet including packet data, pad bytes, FCS bytes,
+ and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t hist7 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count \> 1518. Packet length is the sum of all data
+ transmitted on the wire for the given packet including packet data, pad bytes, FCS bytes,
+ and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat13_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat13 bdk_bgxx_cmrx_tx_stat13_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT13(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT13(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000668ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000668ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000668ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT13", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT13(a,b) bdk_bgxx_cmrx_tx_stat13_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT13(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT13(a,b) "BGXX_CMRX_TX_STAT13"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT13(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT13(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT13(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat14
+ *
+ * BGX CMR Transmit Statistics Registers 14
+ */
+union bdk_bgxx_cmrx_tx_stat14
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat14_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t bcst : 48; /**< [ 47: 0](R/W/H) Number of packets sent to broadcast DMAC, excluding PAUSE or PFC control packets generated
+ by BGX. Does not include MCST packets.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap.
+
+ Note that BGX determines if the packet is MCST or BCST from the DMAC of the packet. BGX
+ assumes that the DMAC lies in the first six bytes of the packet as per the 802.3 frame
+ definition. If the system requires additional data before the L2 header, the MCST and BCST
+ counters may not reflect reality and should be ignored by software. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t bcst : 48; /**< [ 47: 0](R/W/H) Number of packets sent to broadcast DMAC, excluding PAUSE or PFC control packets generated
+ by BGX. Does not include MCST packets.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap.
+
+ Note that BGX determines if the packet is MCST or BCST from the DMAC of the packet. BGX
+ assumes that the DMAC lies in the first six bytes of the packet as per the 802.3 frame
+ definition. If the system requires additional data before the L2 header, the MCST and BCST
+ counters may not reflect reality and should be ignored by software. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat14_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat14 bdk_bgxx_cmrx_tx_stat14_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT14(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT14(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000670ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000670ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000670ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT14", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT14(a,b) bdk_bgxx_cmrx_tx_stat14_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT14(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT14(a,b) "BGXX_CMRX_TX_STAT14"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT14(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT14(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT14(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat15
+ *
+ * BGX CMR Transmit Statistics Registers 15
+ */
+union bdk_bgxx_cmrx_tx_stat15
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat15_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t mcst : 48; /**< [ 47: 0](R/W/H) Number of packets sent to multicast DMAC, excluding PAUSE or PFC control packets generated
+ by BGX. Does not include BCST packets.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap.
+
+ Note that BGX determines if the packet is MCST or BCST from the DMAC of the packet. BGX
+ assumes that the DMAC lies in the first six bytes of the packet as per the 802.3 frame
+ definition. If the system requires additional data before the L2 header, then the MCST and
+ BCST counters may not reflect reality and should be ignored by software. Cleared if LMAC
+ is disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t mcst : 48; /**< [ 47: 0](R/W/H) Number of packets sent to multicast DMAC, excluding PAUSE or PFC control packets generated
+ by BGX. Does not include BCST packets.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap.
+
+ Note that BGX determines if the packet is MCST or BCST from the DMAC of the packet. BGX
+ assumes that the DMAC lies in the first six bytes of the packet as per the 802.3 frame
+ definition. If the system requires additional data before the L2 header, then the MCST and
+ BCST counters may not reflect reality and should be ignored by software. Cleared if LMAC
+ is disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat15_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat15 bdk_bgxx_cmrx_tx_stat15_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT15(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT15(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000678ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000678ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000678ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT15", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT15(a,b) bdk_bgxx_cmrx_tx_stat15_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT15(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT15(a,b) "BGXX_CMRX_TX_STAT15"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT15(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT15(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT15(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat16
+ *
+ * BGX CMR Transmit Statistics Registers 16
+ */
+union bdk_bgxx_cmrx_tx_stat16
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat16_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t undflw : 48; /**< [ 47: 0](R/W/H) Number of underflow packets.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 48; /**< [ 47: 0](R/W/H) Number of underflow packets.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat16_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat16 bdk_bgxx_cmrx_tx_stat16_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT16(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT16(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000680ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000680ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000680ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT16", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT16(a,b) bdk_bgxx_cmrx_tx_stat16_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT16(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT16(a,b) "BGXX_CMRX_TX_STAT16"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT16(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT16(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT16(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat17
+ *
+ * BGX CMR Transmit Statistics Registers 17
+ */
+union bdk_bgxx_cmrx_tx_stat17
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat17_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t ctl : 48; /**< [ 47: 0](R/W/H) Number of PAUSE or PFC control packets generated by BGX. It does not include control
+ packets forwarded or generated by the cores. Does not track the number of generated HG2
+ messages.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t ctl : 48; /**< [ 47: 0](R/W/H) Number of PAUSE or PFC control packets generated by BGX. It does not include control
+ packets forwarded or generated by the cores. Does not track the number of generated HG2
+ messages.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat17_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat17 bdk_bgxx_cmrx_tx_stat17_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT17(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT17(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000688ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000688ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000688ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT17", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT17(a,b) bdk_bgxx_cmrx_tx_stat17_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT17(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT17(a,b) "BGXX_CMRX_TX_STAT17"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT17(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT17(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT17(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat2
+ *
+ * BGX CMR Transmit Statistics Registers 2
+ */
+union bdk_bgxx_cmrx_tx_stat2
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t mcol : 48; /**< [ 47: 0](R/W/H) Number of packets sent with multiple collisions. Must be less than
+ BGX()_GMP_GMI_TX_COL_ATTEMPT[LIMIT]. Half-duplex mode only and not updated
+ for late collisions.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t mcol : 48; /**< [ 47: 0](R/W/H) Number of packets sent with multiple collisions. Must be less than
+ BGX()_GMP_GMI_TX_COL_ATTEMPT[LIMIT]. Half-duplex mode only and not updated
+ for late collisions.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat2_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat2 bdk_bgxx_cmrx_tx_stat2_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000610ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000610ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000610ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT2(a,b) bdk_bgxx_cmrx_tx_stat2_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT2(a,b) "BGXX_CMRX_TX_STAT2"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT2(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat3
+ *
+ * BGX CMR Transmit Statistics Registers 3
+ */
+union bdk_bgxx_cmrx_tx_stat3
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t scol : 48; /**< [ 47: 0](R/W/H) Number of packets sent with a single collision. Half-duplex mode only and not updated
+ for late collisions.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t scol : 48; /**< [ 47: 0](R/W/H) Number of packets sent with a single collision. Half-duplex mode only and not updated
+ for late collisions.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat3_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat3 bdk_bgxx_cmrx_tx_stat3_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT3(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT3(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000618ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000618ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000618ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT3", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT3(a,b) bdk_bgxx_cmrx_tx_stat3_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT3(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT3(a,b) "BGXX_CMRX_TX_STAT3"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT3(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT3(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT3(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat4
+ *
+ * BGX CMR Transmit Statistics Registers 4
+ */
+union bdk_bgxx_cmrx_tx_stat4
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t octs : 48; /**< [ 47: 0](R/W/H) Number of total octets sent on the interface, excluding PAUSE or PFC control packets
+ generated by BGX. Does not count octets from frames that were truncated due to collisions
+ in half-duplex mode.
+ Octet counts are the sum of all data transmitted on the wire including packet data, pad
+ bytes, FCS bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND
+ cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t octs : 48; /**< [ 47: 0](R/W/H) Number of total octets sent on the interface, excluding PAUSE or PFC control packets
+ generated by BGX. Does not count octets from frames that were truncated due to collisions
+ in half-duplex mode.
+ Octet counts are the sum of all data transmitted on the wire including packet data, pad
+ bytes, FCS bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND
+ cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat4_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat4 bdk_bgxx_cmrx_tx_stat4_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT4(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT4(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000620ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000620ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000620ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT4", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT4(a,b) bdk_bgxx_cmrx_tx_stat4_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT4(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT4(a,b) "BGXX_CMRX_TX_STAT4"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT4(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT4(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT4(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat5
+ *
+ * BGX CMR Transmit Statistics Registers 5
+ */
+union bdk_bgxx_cmrx_tx_stat5
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t pkts : 48; /**< [ 47: 0](R/W/H) Number of total frames sent on the interface, excluding PAUSE or PFC control packets
+ generated by BGX. Does not count octets from frames that were truncated due to collisions
+ in half-duplex mode.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pkts : 48; /**< [ 47: 0](R/W/H) Number of total frames sent on the interface, excluding PAUSE or PFC control packets
+ generated by BGX. Does not count octets from frames that were truncated due to collisions
+ in half-duplex mode.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat5_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat5 bdk_bgxx_cmrx_tx_stat5_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT5(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT5(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000628ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000628ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000628ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT5", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT5(a,b) bdk_bgxx_cmrx_tx_stat5_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT5(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT5(a,b) "BGXX_CMRX_TX_STAT5"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT5(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT5(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT5(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat6
+ *
+ * BGX CMR Transmit Statistics Registers 6
+ */
+union bdk_bgxx_cmrx_tx_stat6
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t hist0 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count \< 64, excluding PAUSE or PFC control packets
+ generated by BGX. Packet length is the sum of all data transmitted on the wire for the
+ given packet including packet data, pad bytes, FCS bytes, and JAM bytes. The octet counts
+ do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t hist0 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count \< 64, excluding PAUSE or PFC control packets
+ generated by BGX. Packet length is the sum of all data transmitted on the wire for the
+ given packet including packet data, pad bytes, FCS bytes, and JAM bytes. The octet counts
+ do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat6_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat6 bdk_bgxx_cmrx_tx_stat6_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT6(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT6(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000630ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000630ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000630ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT6", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT6(a,b) bdk_bgxx_cmrx_tx_stat6_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT6(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT6(a,b) "BGXX_CMRX_TX_STAT6"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT6(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT6(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT6(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat7
+ *
+ * BGX CMR Transmit Statistics Registers 7
+ */
+union bdk_bgxx_cmrx_tx_stat7
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t hist1 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count of 64, excluding PAUSE or PFC control packets
+ generated by BGX. Packet length is the sum of all data transmitted on the wire for the
+ given packet including packet data, pad bytes, FCS bytes, and JAM bytes. The octet counts
+ do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t hist1 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count of 64, excluding PAUSE or PFC control packets
+ generated by BGX. Packet length is the sum of all data transmitted on the wire for the
+ given packet including packet data, pad bytes, FCS bytes, and JAM bytes. The octet counts
+ do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat7_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat7 bdk_bgxx_cmrx_tx_stat7_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT7(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT7(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000638ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000638ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000638ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT7", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT7(a,b) bdk_bgxx_cmrx_tx_stat7_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT7(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT7(a,b) "BGXX_CMRX_TX_STAT7"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT7(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT7(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT7(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat8
+ *
+ * BGX CMR Transmit Statistics Registers 8
+ */
+union bdk_bgxx_cmrx_tx_stat8
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat8_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t hist2 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count between 65-127. Packet length is the sum of all
+ data transmitted on the wire for the given packet including packet data, pad bytes, FCS
+ bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t hist2 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count between 65-127. Packet length is the sum of all
+ data transmitted on the wire for the given packet including packet data, pad bytes, FCS
+ bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat8_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat8 bdk_bgxx_cmrx_tx_stat8_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT8(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT8(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000640ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000640ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000640ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT8", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT8(a,b) bdk_bgxx_cmrx_tx_stat8_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT8(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT8(a,b) "BGXX_CMRX_TX_STAT8"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT8(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT8(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT8(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr#_tx_stat9
+ *
+ * BGX CMR Transmit Statistics Registers 9
+ */
+union bdk_bgxx_cmrx_tx_stat9
+{
+ uint64_t u;
+ struct bdk_bgxx_cmrx_tx_stat9_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t hist3 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count between 128-255. Packet length is the sum of
+ all data transmitted on the wire for the given packet including packet data, pad bytes,
+ FCS bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+#else /* Word 0 - Little Endian */
+ uint64_t hist3 : 48; /**< [ 47: 0](R/W/H) Number of packets sent with an octet count between 128-255. Packet length is the sum of
+ all data transmitted on the wire for the given packet including packet data, pad bytes,
+ FCS bytes, and JAM bytes. The octet counts do not include PREAMBLE byte or EXTEND cycles.
+
+ Not cleared on read; cleared on a write with 0x0. Counters will wrap. Cleared if LMAC is
+ disabled with BGX()_CMR()_CONFIG[ENABLE]=0. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmrx_tx_stat9_s cn; */
+};
+typedef union bdk_bgxx_cmrx_tx_stat9 bdk_bgxx_cmrx_tx_stat9_t;
+
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT9(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMRX_TX_STAT9(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000648ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0000648ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0000648ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_CMRX_TX_STAT9", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMRX_TX_STAT9(a,b) bdk_bgxx_cmrx_tx_stat9_t
+#define bustype_BDK_BGXX_CMRX_TX_STAT9(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMRX_TX_STAT9(a,b) "BGXX_CMRX_TX_STAT9"
+#define device_bar_BDK_BGXX_CMRX_TX_STAT9(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMRX_TX_STAT9(a,b) (a)
+#define arguments_BDK_BGXX_CMRX_TX_STAT9(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_bad
+ *
+ * BGX CMR Bad Registers
+ */
+union bdk_bgxx_cmr_bad
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_bad_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t rxb_nxl : 1; /**< [ 0: 0](R/W1C/H) Receive side LMAC ID \> BGX()_CMR_RX_LMACS. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_nxl : 1; /**< [ 0: 0](R/W1C/H) Receive side LMAC ID \> BGX()_CMR_RX_LMACS. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_bad_s cn; */
+};
+typedef union bdk_bgxx_cmr_bad bdk_bgxx_cmr_bad_t;
+
+static inline uint64_t BDK_BGXX_CMR_BAD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_BAD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0001020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0001020ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0001020ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_BAD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_BAD(a) bdk_bgxx_cmr_bad_t
+#define bustype_BDK_BGXX_CMR_BAD(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_BAD(a) "BGXX_CMR_BAD"
+#define device_bar_BDK_BGXX_CMR_BAD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_BAD(a) (a)
+#define arguments_BDK_BGXX_CMR_BAD(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_bist_status
+ *
+ * BGX Built-in Self-Test Registers
+ */
+union bdk_bgxx_cmr_bist_status
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t status : 27; /**< [ 26: 0](RO/H) '"BIST results. Hardware sets a bit to 1 for memory that fails; 0 indicates pass or never
+ run.'
+
+ Internal:
+ "\<0\> = bgx#.rxb.infif_gmp.
+ \<1\> = bgx#.rxb.infif_smu.
+ \<2\> = bgx#.rxb.fif_bnk00.
+ \<3\> = bgx#.rxb.fif_bnk01.
+ \<4\> = bgx#.rxb.fif_bnk10.
+ \<5\> = bgx#.rxb.fif_bnk11.
+ \<6\> = bgx#.rxb.pki_skd_fif.
+ \<7\> = bgx#.rxb.nic_skd_fif.
+ \<8\> = bgx#.rxb_mix0_fif.
+ \<9\> = bgx#.rxb_mix1_fif.
+ \<10\> = 0.
+ \<11\> = bgx#.txb_fif_bnk0.
+ \<12\> = bgx#.txb_fif_bnk1.
+ \<13\> = bgx#.txb_skd_m0_pko_fif.
+ \<14\> = bgx#.txb_skd_m1_pko_fif.
+ \<15\> = bgx#.txb_skd_m2_pko_fif.
+ \<16\> = bgx#.txb_skd_m3_pko_fif.
+ \<17\> = bgx#.txb_skd_m0_nic_fif.
+ \<18\> = bgx#.txb_skd_m1_nic_fif.
+ \<19\> = bgx#.txb_skd_m2_nic_fif.
+ \<20\> = bgx#.txb_skd_m3_nic_fif.
+ \<21\> = bgx#.txb_mix0_fif.
+ \<22\> = bgx#.txb_mix1_fif.
+ \<23\> = bgx#.txb_ncsi_fif.
+ \<24\> = 0." */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 27; /**< [ 26: 0](RO/H) '"BIST results. Hardware sets a bit to 1 for memory that fails; 0 indicates pass or never
+ run.'
+
+ Internal:
+ "\<0\> = bgx#.rxb.infif_gmp.
+ \<1\> = bgx#.rxb.infif_smu.
+ \<2\> = bgx#.rxb.fif_bnk00.
+ \<3\> = bgx#.rxb.fif_bnk01.
+ \<4\> = bgx#.rxb.fif_bnk10.
+ \<5\> = bgx#.rxb.fif_bnk11.
+ \<6\> = bgx#.rxb.pki_skd_fif.
+ \<7\> = bgx#.rxb.nic_skd_fif.
+ \<8\> = bgx#.rxb_mix0_fif.
+ \<9\> = bgx#.rxb_mix1_fif.
+ \<10\> = 0.
+ \<11\> = bgx#.txb_fif_bnk0.
+ \<12\> = bgx#.txb_fif_bnk1.
+ \<13\> = bgx#.txb_skd_m0_pko_fif.
+ \<14\> = bgx#.txb_skd_m1_pko_fif.
+ \<15\> = bgx#.txb_skd_m2_pko_fif.
+ \<16\> = bgx#.txb_skd_m3_pko_fif.
+ \<17\> = bgx#.txb_skd_m0_nic_fif.
+ \<18\> = bgx#.txb_skd_m1_nic_fif.
+ \<19\> = bgx#.txb_skd_m2_nic_fif.
+ \<20\> = bgx#.txb_skd_m3_nic_fif.
+ \<21\> = bgx#.txb_mix0_fif.
+ \<22\> = bgx#.txb_mix1_fif.
+ \<23\> = bgx#.txb_ncsi_fif.
+ \<24\> = 0." */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_bist_status_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_25_63 : 39;
+ uint64_t status : 25; /**< [ 24: 0](RO/H) '"BIST results. Hardware sets a bit to 1 for memory that fails; 0 indicates pass or never
+ run.'
+
+ Internal:
+ "\<0\> = bgx#.rxb.infif_gmp.
+ \<1\> = bgx#.rxb.infif_smu.
+ \<2\> = bgx#.rxb.fif_bnk00.
+ \<3\> = bgx#.rxb.fif_bnk01.
+ \<4\> = bgx#.rxb.fif_bnk10.
+ \<5\> = bgx#.rxb.fif_bnk11.
+ \<6\> = bgx#.rxb.pki_skd_fif.
+ \<7\> = bgx#.rxb.nic_skd_fif.
+ \<8\> = bgx#.rxb_mix0_fif.
+ \<9\> = bgx#.rxb_mix1_fif.
+ \<10\> = 0.
+ \<11\> = bgx#.txb_fif_bnk0.
+ \<12\> = bgx#.txb_fif_bnk1.
+ \<13\> = bgx#.txb_skd_m0_pko_fif.
+ \<14\> = bgx#.txb_skd_m1_pko_fif.
+ \<15\> = bgx#.txb_skd_m2_pko_fif.
+ \<16\> = bgx#.txb_skd_m3_pko_fif.
+ \<17\> = bgx#.txb_skd_m0_nic_fif.
+ \<18\> = bgx#.txb_skd_m1_nic_fif.
+ \<19\> = bgx#.txb_skd_m2_nic_fif.
+ \<20\> = bgx#.txb_skd_m3_nic_fif.
+ \<21\> = bgx#.txb_mix0_fif.
+ \<22\> = bgx#.txb_mix1_fif.
+ \<23\> = bgx#.txb_ncsi_fif.
+ \<24\> = 0." */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 25; /**< [ 24: 0](RO/H) '"BIST results. Hardware sets a bit to 1 for memory that fails; 0 indicates pass or never
+ run.'
+
+ Internal:
+ "\<0\> = bgx#.rxb.infif_gmp.
+ \<1\> = bgx#.rxb.infif_smu.
+ \<2\> = bgx#.rxb.fif_bnk00.
+ \<3\> = bgx#.rxb.fif_bnk01.
+ \<4\> = bgx#.rxb.fif_bnk10.
+ \<5\> = bgx#.rxb.fif_bnk11.
+ \<6\> = bgx#.rxb.pki_skd_fif.
+ \<7\> = bgx#.rxb.nic_skd_fif.
+ \<8\> = bgx#.rxb_mix0_fif.
+ \<9\> = bgx#.rxb_mix1_fif.
+ \<10\> = 0.
+ \<11\> = bgx#.txb_fif_bnk0.
+ \<12\> = bgx#.txb_fif_bnk1.
+ \<13\> = bgx#.txb_skd_m0_pko_fif.
+ \<14\> = bgx#.txb_skd_m1_pko_fif.
+ \<15\> = bgx#.txb_skd_m2_pko_fif.
+ \<16\> = bgx#.txb_skd_m3_pko_fif.
+ \<17\> = bgx#.txb_skd_m0_nic_fif.
+ \<18\> = bgx#.txb_skd_m1_nic_fif.
+ \<19\> = bgx#.txb_skd_m2_nic_fif.
+ \<20\> = bgx#.txb_skd_m3_nic_fif.
+ \<21\> = bgx#.txb_mix0_fif.
+ \<22\> = bgx#.txb_mix1_fif.
+ \<23\> = bgx#.txb_ncsi_fif.
+ \<24\> = 0." */
+ uint64_t reserved_25_63 : 39;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmr_bist_status_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_25_63 : 39;
+ uint64_t status : 25; /**< [ 24: 0](RO/H) '"BIST results. Hardware sets a bit to 1 for memory that fails; 0 indicates pass or never
+ run.'
+
+ Internal:
+ "\<0\> = bgx#.rxb.infif_gmp.
+ \<1\> = bgx#.rxb.infif_smu.
+ \<2\> = bgx#.rxb.fif_bnk00.
+ \<3\> = bgx#.rxb.fif_bnk01.
+ \<4\> = bgx#.rxb.fif_bnk10.
+ \<5\> = bgx#.rxb.fif_bnk11.
+ \<6\> = bgx#.rxb.skd_fif.
+ \<7\> = bgx#.rxb_mix0_fif.
+ \<8\> = bgx#.rxb_mix1_fif.
+ \<9\> = 0.
+ \<10\> = bgx#.txb_fif_bnk0.
+ \<11\> = bgx#.txb_fif_bnk1.
+ \<12\> = bgx#.txb_skd_m0_fif.
+ \<13\> = bgx#.txb_skd_m1_fif.
+ \<14\> = bgx#.txb_skd_m2_fif.
+ \<15\> = bgx#.txb_skd_m3_fif.
+ \<16\> = bgx#.txb_mix0_fif.
+ \<17\> = bgx#.txb_mix1_fif.
+ \<18\> = bgx#.txb_ncsi_fif.
+ \<24:19\> = 0x0." */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 25; /**< [ 24: 0](RO/H) '"BIST results. Hardware sets a bit to 1 for memory that fails; 0 indicates pass or never
+ run.'
+
+ Internal:
+ "\<0\> = bgx#.rxb.infif_gmp.
+ \<1\> = bgx#.rxb.infif_smu.
+ \<2\> = bgx#.rxb.fif_bnk00.
+ \<3\> = bgx#.rxb.fif_bnk01.
+ \<4\> = bgx#.rxb.fif_bnk10.
+ \<5\> = bgx#.rxb.fif_bnk11.
+ \<6\> = bgx#.rxb.skd_fif.
+ \<7\> = bgx#.rxb_mix0_fif.
+ \<8\> = bgx#.rxb_mix1_fif.
+ \<9\> = 0.
+ \<10\> = bgx#.txb_fif_bnk0.
+ \<11\> = bgx#.txb_fif_bnk1.
+ \<12\> = bgx#.txb_skd_m0_fif.
+ \<13\> = bgx#.txb_skd_m1_fif.
+ \<14\> = bgx#.txb_skd_m2_fif.
+ \<15\> = bgx#.txb_skd_m3_fif.
+ \<16\> = bgx#.txb_mix0_fif.
+ \<17\> = bgx#.txb_mix1_fif.
+ \<18\> = bgx#.txb_ncsi_fif.
+ \<24:19\> = 0x0." */
+ uint64_t reserved_25_63 : 39;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmr_bist_status_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_27_63 : 37;
+ uint64_t status : 27; /**< [ 26: 0](RO/H) '"BIST results. Hardware sets a bit to 1 for memory that fails; 0 indicates pass or never
+ run.'
+
+ Internal:
+ "\<0\> = bgx#.rxb.infif_gmp.
+ \<1\> = bgx#.rxb.infif_smu.
+ \<2\> = bgx#.rxb.fif_bnk00.
+ \<3\> = bgx#.rxb.fif_bnk01.
+ \<4\> = bgx#.rxb.fif_bnk10.
+ \<5\> = bgx#.rxb.fif_bnk11.
+ \<6\> = bgx#.rxb.pki_skd_fif.
+ \<7\> = bgx#.rxb.nic_skd_fif.
+ \<8\> = bgx#.rxb_mix0_fif.
+ \<9\> = bgx#.rxb_mix1_fif.
+ \<10\> = 0.
+ \<11\> = bgx#.txb_fif_mem0.
+ \<12\> = bgx#.txb_fif_mem1.
+ \<13\> = bgx#.txb_fif_mem2.
+ \<14\> = bgx#.txb_fif_mem3.
+ \<15\> = bgx#.txb_skd_m0_pko_fif.
+ \<16\> = bgx#.txb_skd_m1_pko_fif.
+ \<17\> = bgx#.txb_skd_m2_pko_fif.
+ \<18\> = bgx#.txb_skd_m3_pko_fif.
+ \<19\> = bgx#.txb_skd_m0_nic_fif.
+ \<20\> = bgx#.txb_skd_m1_nic_fif.
+ \<21\> = bgx#.txb_skd_m2_nic_fif.
+ \<22\> = bgx#.txb_skd_m3_nic_fif.
+ \<23\> = bgx#.txb_mix0_fif.
+ \<24\> = bgx#.txb_mix1_fif.
+ \<25\> = bgx#.txb_ncsi_fif.
+ \<26\> = 0." */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 27; /**< [ 26: 0](RO/H) '"BIST results. Hardware sets a bit to 1 for memory that fails; 0 indicates pass or never
+ run.'
+
+ Internal:
+ "\<0\> = bgx#.rxb.infif_gmp.
+ \<1\> = bgx#.rxb.infif_smu.
+ \<2\> = bgx#.rxb.fif_bnk00.
+ \<3\> = bgx#.rxb.fif_bnk01.
+ \<4\> = bgx#.rxb.fif_bnk10.
+ \<5\> = bgx#.rxb.fif_bnk11.
+ \<6\> = bgx#.rxb.pki_skd_fif.
+ \<7\> = bgx#.rxb.nic_skd_fif.
+ \<8\> = bgx#.rxb_mix0_fif.
+ \<9\> = bgx#.rxb_mix1_fif.
+ \<10\> = 0.
+ \<11\> = bgx#.txb_fif_mem0.
+ \<12\> = bgx#.txb_fif_mem1.
+ \<13\> = bgx#.txb_fif_mem2.
+ \<14\> = bgx#.txb_fif_mem3.
+ \<15\> = bgx#.txb_skd_m0_pko_fif.
+ \<16\> = bgx#.txb_skd_m1_pko_fif.
+ \<17\> = bgx#.txb_skd_m2_pko_fif.
+ \<18\> = bgx#.txb_skd_m3_pko_fif.
+ \<19\> = bgx#.txb_skd_m0_nic_fif.
+ \<20\> = bgx#.txb_skd_m1_nic_fif.
+ \<21\> = bgx#.txb_skd_m2_nic_fif.
+ \<22\> = bgx#.txb_skd_m3_nic_fif.
+ \<23\> = bgx#.txb_mix0_fif.
+ \<24\> = bgx#.txb_mix1_fif.
+ \<25\> = bgx#.txb_ncsi_fif.
+ \<26\> = 0." */
+ uint64_t reserved_27_63 : 37;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmr_bist_status bdk_bgxx_cmr_bist_status_t;
+
+static inline uint64_t BDK_BGXX_CMR_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000460ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000460ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000460ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_BIST_STATUS(a) bdk_bgxx_cmr_bist_status_t
+#define bustype_BDK_BGXX_CMR_BIST_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_BIST_STATUS(a) "BGXX_CMR_BIST_STATUS"
+#define device_bar_BDK_BGXX_CMR_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_BIST_STATUS(a) (a)
+#define arguments_BDK_BGXX_CMR_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_chan_msk_and
+ *
+ * BGX CMR Backpressure Channel Mask AND Registers
+ */
+union bdk_bgxx_cmr_chan_msk_and
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_chan_msk_and_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t msk_and : 64; /**< [ 63: 0](R/W) Assert physical backpressure when the backpressure channel vector combined with [MSK_AND]
+ indicates backpressure as follows:
+ _ phys_bp_msk_and = [MSK_AND]\<x:y\> != 0 && (chan_vector\<x:y\> & [MSK_AND]\<x:y\>) ==
+ [MSK_AND]\<x:y\>
+ _ phys_bp = phys_bp_msk_or || phys_bp_msk_and
+
+ x/y are as follows:
+ _ LMAC 0: \<x:y\> = \<15:0\>.
+ _ LMAC 1: \<x:y\> = \<31:16\>.
+ _ LMAC 2: \<x:y\> = \<47:32\>.
+ _ LMAC 3: \<x:y\> = \<63:48\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t msk_and : 64; /**< [ 63: 0](R/W) Assert physical backpressure when the backpressure channel vector combined with [MSK_AND]
+ indicates backpressure as follows:
+ _ phys_bp_msk_and = [MSK_AND]\<x:y\> != 0 && (chan_vector\<x:y\> & [MSK_AND]\<x:y\>) ==
+ [MSK_AND]\<x:y\>
+ _ phys_bp = phys_bp_msk_or || phys_bp_msk_and
+
+ x/y are as follows:
+ _ LMAC 0: \<x:y\> = \<15:0\>.
+ _ LMAC 1: \<x:y\> = \<31:16\>.
+ _ LMAC 2: \<x:y\> = \<47:32\>.
+ _ LMAC 3: \<x:y\> = \<63:48\>. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_chan_msk_and_s cn; */
+};
+typedef union bdk_bgxx_cmr_chan_msk_and bdk_bgxx_cmr_chan_msk_and_t;
+
+static inline uint64_t BDK_BGXX_CMR_CHAN_MSK_AND(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_CHAN_MSK_AND(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000450ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000450ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000450ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_CHAN_MSK_AND", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_CHAN_MSK_AND(a) bdk_bgxx_cmr_chan_msk_and_t
+#define bustype_BDK_BGXX_CMR_CHAN_MSK_AND(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_CHAN_MSK_AND(a) "BGXX_CMR_CHAN_MSK_AND"
+#define device_bar_BDK_BGXX_CMR_CHAN_MSK_AND(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_CHAN_MSK_AND(a) (a)
+#define arguments_BDK_BGXX_CMR_CHAN_MSK_AND(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_chan_msk_or
+ *
+ * BGX Backpressure Channel Mask OR Registers
+ */
+union bdk_bgxx_cmr_chan_msk_or
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_chan_msk_or_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t msk_or : 64; /**< [ 63: 0](R/W) Assert physical backpressure when the backpressure channel vector combined with [MSK_OR]
+ indicates backpressure as follows:
+
+ _ phys_bp_msk_or = (chan_vector\<x:y\> & [MSK_OR]\<x:y\>) != 0
+ _ phys_bp = phys_bp_msk_or || phys_bp_msk_and
+
+ x/y are as follows:
+ _ LMAC 0: \<x:y\> = \<15:0\>.
+ _ LMAC 1: \<x:y\> = \<31:16\>.
+ _ LMAC 2: \<x:y\> = \<47:32\>.
+ _ LMAC 3: \<x:y\> = \<63:48\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t msk_or : 64; /**< [ 63: 0](R/W) Assert physical backpressure when the backpressure channel vector combined with [MSK_OR]
+ indicates backpressure as follows:
+
+ _ phys_bp_msk_or = (chan_vector\<x:y\> & [MSK_OR]\<x:y\>) != 0
+ _ phys_bp = phys_bp_msk_or || phys_bp_msk_and
+
+ x/y are as follows:
+ _ LMAC 0: \<x:y\> = \<15:0\>.
+ _ LMAC 1: \<x:y\> = \<31:16\>.
+ _ LMAC 2: \<x:y\> = \<47:32\>.
+ _ LMAC 3: \<x:y\> = \<63:48\>. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_chan_msk_or_s cn; */
+};
+typedef union bdk_bgxx_cmr_chan_msk_or bdk_bgxx_cmr_chan_msk_or_t;
+
+static inline uint64_t BDK_BGXX_CMR_CHAN_MSK_OR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_CHAN_MSK_OR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000458ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000458ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000458ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_CHAN_MSK_OR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_CHAN_MSK_OR(a) bdk_bgxx_cmr_chan_msk_or_t
+#define bustype_BDK_BGXX_CMR_CHAN_MSK_OR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_CHAN_MSK_OR(a) "BGXX_CMR_CHAN_MSK_OR"
+#define device_bar_BDK_BGXX_CMR_CHAN_MSK_OR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_CHAN_MSK_OR(a) (a)
+#define arguments_BDK_BGXX_CMR_CHAN_MSK_OR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_eco
+ *
+ * INTERNAL: BGX ECO Registers
+ */
+union bdk_bgxx_cmr_eco
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t eco_ro : 32; /**< [ 63: 32](RO) Internal:
+ Reserved for ECO usage. */
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) Internal:
+ Reserved for ECO usage. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) Internal:
+ Reserved for ECO usage. */
+ uint64_t eco_ro : 32; /**< [ 63: 32](RO) Internal:
+ Reserved for ECO usage. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_eco_s cn; */
+};
+typedef union bdk_bgxx_cmr_eco bdk_bgxx_cmr_eco_t;
+
+static inline uint64_t BDK_BGXX_CMR_ECO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_ECO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0001028ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0001028ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=1))
+ return 0x87e0e0001028ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_ECO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_ECO(a) bdk_bgxx_cmr_eco_t
+#define bustype_BDK_BGXX_CMR_ECO(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_ECO(a) "BGXX_CMR_ECO"
+#define device_bar_BDK_BGXX_CMR_ECO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_ECO(a) (a)
+#define arguments_BDK_BGXX_CMR_ECO(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_global_config
+ *
+ * BGX CMR Global Configuration Register
+ * These registers configure the global CMR, PCS, and MAC.
+ */
+union bdk_bgxx_cmr_global_config
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_global_config_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t cmr_x2p1_reset : 1; /**< [ 11: 11](R/W) If the master block connected to X2P interface 1 is reset, software also needs
+ to reset the X2P interface in the BGX by setting this bit. It resets the X2P
+ interface state in the BGX (skid FIFO and pending requests to the master block)
+ and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface. Because the X2P and NCSI interfaces share the main RXB FIFOs it will
+ also impact the NCSI interface therefore it is required to set [CMR_NCSI_DROP]
+ bit first before setting this bit.
+
+ Clearing this does not reset the X2P interface nor NCSI interface. After the
+ master block comes out of reset, software should clear this bit. */
+ uint64_t cmr_ncsi_reset : 1; /**< [ 10: 10](R/W) Interface reset for the CMR NCSI block.
+ Upon power up the CMR NCSI is in reset and the companion CNXXXX NCSI block will be
+ commanded by the
+ external BMC to enable one of the CNXXXX BGX NCSI interfaces for passing network traffic.
+ Only one NCSI interface can be enabled in CNXXXX. The BMC/NCSI will then proceed to
+ configure
+ the rest of the BGX csr for pass through traffic.
+
+ When set, will reset the CMR NCSI interface effectively disabling it at a traffic boundary
+ should traffic be flowing. This bit will not reset the main RXB fifos. */
+ uint64_t cmr_ncsi_drop : 1; /**< [ 9: 9](R/W) NCSI drop.
+ 1 = Cleanly drop traffic going into the NCSI block of BGX. Must set asserted
+ with with [CMR_X2P_RESET]=1 (in the same write operation) to avoid partial packets
+ to the NCSI interface while performing a X2P partner reset.
+ 0 = Allow traffic to flow through the NCSI block. */
+ uint64_t ncsi_lmac_id : 2; /**< [ 8: 7](R/W) Logical MAC ID that carries NCSI traffic for both RX and TX side of CMR. On the RX side
+ is
+ also the LMAC_ID that is eligible for steering. */
+ uint64_t fcs_strip : 1; /**< [ 6: 6](R/W) A setting of 1 means the BGX strip the FCS bytes of every packet. For packets less than 4
+ bytes, the packet will be removed.
+ A setting of 0 means the BGX will not modify or remove the FCS bytes. */
+ uint64_t interleave_mode : 1; /**< [ 5: 5](R/W) A setting of 0 means the BGX will operate in non-interleaved mode where there is 1 packet
+ from a given lmac in flight on the X2P interface to TNS/NIC. A setting of 1 means the BGX
+ will operate in interleaved mode where each valid consecutive cycle on the X2P interface
+ may contain words from different lmacs. In other words there will be multiple packets in
+ flight from different lmacs at the same time. */
+ uint64_t cmr_mix1_reset : 1; /**< [ 4: 4](R/W) Must be 0. */
+ uint64_t cmr_mix0_reset : 1; /**< [ 3: 3](R/W) Must be 0. */
+ uint64_t cmr_x2p_reset : 1; /**< [ 2: 2](R/W) If the NIC block is reset, software also needs to reset the X2P interface in the
+ BGX by
+ setting this bit to 1. It resets the X2P interface state in the BGX (skid FIFO and pending
+ requests to NIC) and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface.
+
+ Setting this bit to 0 does not reset the X2P interface.
+ After NIC comes out of reset, software should clear this bit. */
+ uint64_t bgx_clk_enable : 1; /**< [ 1: 1](R/W) The global clock enable for BGX. Setting this bit overrides clock enables set by
+ BGX()_CMR()_CONFIG[ENABLE] and BGX()_CMR()_CONFIG[LMAC_TYPE], essentially
+ turning on clocks for the entire BGX. Setting this bit to 0 results in not overriding
+ clock enables set by BGX()_CMR()_CONFIG[ENABLE] and
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. */
+ uint64_t pmux_sds_sel : 1; /**< [ 0: 0](R/W) SerDes/GSER output select. Must be 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pmux_sds_sel : 1; /**< [ 0: 0](R/W) SerDes/GSER output select. Must be 0. */
+ uint64_t bgx_clk_enable : 1; /**< [ 1: 1](R/W) The global clock enable for BGX. Setting this bit overrides clock enables set by
+ BGX()_CMR()_CONFIG[ENABLE] and BGX()_CMR()_CONFIG[LMAC_TYPE], essentially
+ turning on clocks for the entire BGX. Setting this bit to 0 results in not overriding
+ clock enables set by BGX()_CMR()_CONFIG[ENABLE] and
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. */
+ uint64_t cmr_x2p_reset : 1; /**< [ 2: 2](R/W) If the NIC block is reset, software also needs to reset the X2P interface in the
+ BGX by
+ setting this bit to 1. It resets the X2P interface state in the BGX (skid FIFO and pending
+ requests to NIC) and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface.
+
+ Setting this bit to 0 does not reset the X2P interface.
+ After NIC comes out of reset, software should clear this bit. */
+ uint64_t cmr_mix0_reset : 1; /**< [ 3: 3](R/W) Must be 0. */
+ uint64_t cmr_mix1_reset : 1; /**< [ 4: 4](R/W) Must be 0. */
+ uint64_t interleave_mode : 1; /**< [ 5: 5](R/W) A setting of 0 means the BGX will operate in non-interleaved mode where there is 1 packet
+ from a given lmac in flight on the X2P interface to TNS/NIC. A setting of 1 means the BGX
+ will operate in interleaved mode where each valid consecutive cycle on the X2P interface
+ may contain words from different lmacs. In other words there will be multiple packets in
+ flight from different lmacs at the same time. */
+ uint64_t fcs_strip : 1; /**< [ 6: 6](R/W) A setting of 1 means the BGX strip the FCS bytes of every packet. For packets less than 4
+ bytes, the packet will be removed.
+ A setting of 0 means the BGX will not modify or remove the FCS bytes. */
+ uint64_t ncsi_lmac_id : 2; /**< [ 8: 7](R/W) Logical MAC ID that carries NCSI traffic for both RX and TX side of CMR. On the RX side
+ is
+ also the LMAC_ID that is eligible for steering. */
+ uint64_t cmr_ncsi_drop : 1; /**< [ 9: 9](R/W) NCSI drop.
+ 1 = Cleanly drop traffic going into the NCSI block of BGX. Must set asserted
+ with with [CMR_X2P_RESET]=1 (in the same write operation) to avoid partial packets
+ to the NCSI interface while performing a X2P partner reset.
+ 0 = Allow traffic to flow through the NCSI block. */
+ uint64_t cmr_ncsi_reset : 1; /**< [ 10: 10](R/W) Interface reset for the CMR NCSI block.
+ Upon power up the CMR NCSI is in reset and the companion CNXXXX NCSI block will be
+ commanded by the
+ external BMC to enable one of the CNXXXX BGX NCSI interfaces for passing network traffic.
+ Only one NCSI interface can be enabled in CNXXXX. The BMC/NCSI will then proceed to
+ configure
+ the rest of the BGX csr for pass through traffic.
+
+ When set, will reset the CMR NCSI interface effectively disabling it at a traffic boundary
+ should traffic be flowing. This bit will not reset the main RXB fifos. */
+ uint64_t cmr_x2p1_reset : 1; /**< [ 11: 11](R/W) If the master block connected to X2P interface 1 is reset, software also needs
+ to reset the X2P interface in the BGX by setting this bit. It resets the X2P
+ interface state in the BGX (skid FIFO and pending requests to the master block)
+ and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface. Because the X2P and NCSI interfaces share the main RXB FIFOs it will
+ also impact the NCSI interface therefore it is required to set [CMR_NCSI_DROP]
+ bit first before setting this bit.
+
+ Clearing this does not reset the X2P interface nor NCSI interface. After the
+ master block comes out of reset, software should clear this bit. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_global_config_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t cmr_ncsi_reset : 1; /**< [ 10: 10](R/W) Interface reset for the CMR NCSI block.
+ Upon power up the CMR NCSI is in reset and the companion CNXXXX NCSI block will be
+ commanded by the
+ external BMC to enable one of the CNXXXX BGX NCSI interfaces for passing network traffic.
+ Only one NCSI interface can be enabled in CNXXXX. The BMC/NCSI will then proceed to
+ configure
+ the rest of the BGX csr for pass through traffic.
+
+ When set, will reset the CMR NCSI interface effectively disabling it at a traffic boundary
+ should traffic be flowing. This bit will not reset the main RXB fifos. */
+ uint64_t cmr_ncsi_drop : 1; /**< [ 9: 9](R/W) NCSI drop.
+ 1 = Cleanly drop traffic going into the NCSI block of BGX. Must set asserted
+ with with [CMR_X2P_RESET]=1 (in the same write operation) to avoid partial packets
+ to the NCSI interface while performing a X2P partner reset.
+ 0 = Allow traffic to flow through the NCSI block. */
+ uint64_t ncsi_lmac_id : 2; /**< [ 8: 7](R/W) Logical MAC ID that carries NCSI traffic for both RX and TX side of CMR. On the RX side
+ is
+ also the LMAC_ID that is eligible for steering. */
+ uint64_t fcs_strip : 1; /**< [ 6: 6](R/W) A setting of 1 means the BGX strip the FCS bytes of every packet. For packets less than 4
+ bytes, the packet will be removed.
+ A setting of 0 means the BGX will not modify or remove the FCS bytes. */
+ uint64_t interleave_mode : 1; /**< [ 5: 5](R/W) A setting of 0 means the BGX will operate in non-interleaved mode where there is 1 packet
+ from a given lmac in flight on the X2P interface to TNS/NIC. A setting of 1 means the BGX
+ will operate in interleaved mode where each valid consecutive cycle on the X2P interface
+ may contain words from different lmacs. In other words there will be multiple packets in
+ flight from different lmacs at the same time. */
+ uint64_t cmr_mix1_reset : 1; /**< [ 4: 4](R/W) Must be 0. */
+ uint64_t cmr_mix0_reset : 1; /**< [ 3: 3](R/W) Must be 0. */
+ uint64_t cmr_x2p_reset : 1; /**< [ 2: 2](R/W) If the NIC or TNS block is reset, software also needs to reset the X2P interface in the
+ BGX by
+ setting this bit to 1. It resets the X2P interface state in the BGX (skid FIFO and pending
+ requests to NIC) and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface. Because the X2P and NCSI interfaces share the main RXB fifos it will also
+ impact the NCSI interface therefore it is required to set [CMR_NCSI_DROP] bit first before
+ setting this bit.
+
+ Setting this bit to 0 does not reset the X2P interface nor NCSI interface.
+ After NIC/TNS comes out of reset, software should clear this bit. */
+ uint64_t bgx_clk_enable : 1; /**< [ 1: 1](R/W) The global clock enable for BGX. Setting this bit overrides clock enables set by
+ BGX()_CMR()_CONFIG[ENABLE] and BGX()_CMR()_CONFIG[LMAC_TYPE], essentially
+ turning on clocks for the entire BGX. Setting this bit to 0 results in not overriding
+ clock enables set by BGX()_CMR()_CONFIG[ENABLE] and
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. */
+ uint64_t pmux_sds_sel : 1; /**< [ 0: 0](R/W) SerDes/GSER output select. Must be 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pmux_sds_sel : 1; /**< [ 0: 0](R/W) SerDes/GSER output select. Must be 0. */
+ uint64_t bgx_clk_enable : 1; /**< [ 1: 1](R/W) The global clock enable for BGX. Setting this bit overrides clock enables set by
+ BGX()_CMR()_CONFIG[ENABLE] and BGX()_CMR()_CONFIG[LMAC_TYPE], essentially
+ turning on clocks for the entire BGX. Setting this bit to 0 results in not overriding
+ clock enables set by BGX()_CMR()_CONFIG[ENABLE] and
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. */
+ uint64_t cmr_x2p_reset : 1; /**< [ 2: 2](R/W) If the NIC or TNS block is reset, software also needs to reset the X2P interface in the
+ BGX by
+ setting this bit to 1. It resets the X2P interface state in the BGX (skid FIFO and pending
+ requests to NIC) and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface. Because the X2P and NCSI interfaces share the main RXB fifos it will also
+ impact the NCSI interface therefore it is required to set [CMR_NCSI_DROP] bit first before
+ setting this bit.
+
+ Setting this bit to 0 does not reset the X2P interface nor NCSI interface.
+ After NIC/TNS comes out of reset, software should clear this bit. */
+ uint64_t cmr_mix0_reset : 1; /**< [ 3: 3](R/W) Must be 0. */
+ uint64_t cmr_mix1_reset : 1; /**< [ 4: 4](R/W) Must be 0. */
+ uint64_t interleave_mode : 1; /**< [ 5: 5](R/W) A setting of 0 means the BGX will operate in non-interleaved mode where there is 1 packet
+ from a given lmac in flight on the X2P interface to TNS/NIC. A setting of 1 means the BGX
+ will operate in interleaved mode where each valid consecutive cycle on the X2P interface
+ may contain words from different lmacs. In other words there will be multiple packets in
+ flight from different lmacs at the same time. */
+ uint64_t fcs_strip : 1; /**< [ 6: 6](R/W) A setting of 1 means the BGX strip the FCS bytes of every packet. For packets less than 4
+ bytes, the packet will be removed.
+ A setting of 0 means the BGX will not modify or remove the FCS bytes. */
+ uint64_t ncsi_lmac_id : 2; /**< [ 8: 7](R/W) Logical MAC ID that carries NCSI traffic for both RX and TX side of CMR. On the RX side
+ is
+ also the LMAC_ID that is eligible for steering. */
+ uint64_t cmr_ncsi_drop : 1; /**< [ 9: 9](R/W) NCSI drop.
+ 1 = Cleanly drop traffic going into the NCSI block of BGX. Must set asserted
+ with with [CMR_X2P_RESET]=1 (in the same write operation) to avoid partial packets
+ to the NCSI interface while performing a X2P partner reset.
+ 0 = Allow traffic to flow through the NCSI block. */
+ uint64_t cmr_ncsi_reset : 1; /**< [ 10: 10](R/W) Interface reset for the CMR NCSI block.
+ Upon power up the CMR NCSI is in reset and the companion CNXXXX NCSI block will be
+ commanded by the
+ external BMC to enable one of the CNXXXX BGX NCSI interfaces for passing network traffic.
+ Only one NCSI interface can be enabled in CNXXXX. The BMC/NCSI will then proceed to
+ configure
+ the rest of the BGX csr for pass through traffic.
+
+ When set, will reset the CMR NCSI interface effectively disabling it at a traffic boundary
+ should traffic be flowing. This bit will not reset the main RXB fifos. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_bgxx_cmr_global_config_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t cmr_ncsi_reset : 1; /**< [ 10: 10](R/W) Reserved. */
+ uint64_t cmr_ncsi_drop : 1; /**< [ 9: 9](R/W) Reserved. */
+ uint64_t ncsi_lmac_id : 2; /**< [ 8: 7](R/W) Reserved. */
+ uint64_t fcs_strip : 1; /**< [ 6: 6](R/W) A setting of 1 means the BGX strip the FCS bytes of every packet. For packets less than 4
+ bytes, the packet will be removed.
+ A setting of 0 means the BGX will not modify or remove the FCS bytes. */
+ uint64_t interleave_mode : 1; /**< [ 5: 5](RAZ) Reserved. */
+ uint64_t cmr_mix1_reset : 1; /**< [ 4: 4](R/W) Must be 0. */
+ uint64_t cmr_mix0_reset : 1; /**< [ 3: 3](R/W) Must be 0. */
+ uint64_t cmr_x2p_reset : 1; /**< [ 2: 2](R/W) If the NIC block is reset, software also needs to reset the X2P interface in the
+ BGX by
+ setting this bit to 1. It resets the X2P interface state in the BGX (skid FIFO and pending
+ requests to NIC) and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface.
+
+ Setting this bit to 0 does not reset the X2P interface.
+ After NIC comes out of reset, software should clear this bit. */
+ uint64_t bgx_clk_enable : 1; /**< [ 1: 1](R/W) The global clock enable for BGX. Setting this bit overrides clock enables set by
+ BGX()_CMR()_CONFIG[ENABLE] and BGX()_CMR()_CONFIG[LMAC_TYPE], essentially
+ turning on clocks for the entire BGX. Setting this bit to 0 results in not overriding
+ clock enables set by BGX()_CMR()_CONFIG[ENABLE] and
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. */
+ uint64_t pmux_sds_sel : 1; /**< [ 0: 0](R/W) SerDes/GSER output select. Must be 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pmux_sds_sel : 1; /**< [ 0: 0](R/W) SerDes/GSER output select. Must be 0. */
+ uint64_t bgx_clk_enable : 1; /**< [ 1: 1](R/W) The global clock enable for BGX. Setting this bit overrides clock enables set by
+ BGX()_CMR()_CONFIG[ENABLE] and BGX()_CMR()_CONFIG[LMAC_TYPE], essentially
+ turning on clocks for the entire BGX. Setting this bit to 0 results in not overriding
+ clock enables set by BGX()_CMR()_CONFIG[ENABLE] and
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. */
+ uint64_t cmr_x2p_reset : 1; /**< [ 2: 2](R/W) If the NIC block is reset, software also needs to reset the X2P interface in the
+ BGX by
+ setting this bit to 1. It resets the X2P interface state in the BGX (skid FIFO and pending
+ requests to NIC) and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface.
+
+ Setting this bit to 0 does not reset the X2P interface.
+ After NIC comes out of reset, software should clear this bit. */
+ uint64_t cmr_mix0_reset : 1; /**< [ 3: 3](R/W) Must be 0. */
+ uint64_t cmr_mix1_reset : 1; /**< [ 4: 4](R/W) Must be 0. */
+ uint64_t interleave_mode : 1; /**< [ 5: 5](RAZ) Reserved. */
+ uint64_t fcs_strip : 1; /**< [ 6: 6](R/W) A setting of 1 means the BGX strip the FCS bytes of every packet. For packets less than 4
+ bytes, the packet will be removed.
+ A setting of 0 means the BGX will not modify or remove the FCS bytes. */
+ uint64_t ncsi_lmac_id : 2; /**< [ 8: 7](R/W) Reserved. */
+ uint64_t cmr_ncsi_drop : 1; /**< [ 9: 9](R/W) Reserved. */
+ uint64_t cmr_ncsi_reset : 1; /**< [ 10: 10](R/W) Reserved. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmr_global_config_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t cmr_x2p1_reset : 1; /**< [ 11: 11](R/W) If the master block connected to X2P interface 1 is reset, software also needs
+ to reset the X2P interface in the BGX by setting this bit. It resets the X2P
+ interface state in the BGX (skid FIFO and pending requests to the master block)
+ and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface. Because the X2P and NCSI interfaces share the main RXB FIFOs it will
+ also impact the NCSI interface therefore it is required to set [CMR_NCSI_DROP]
+ bit first before setting this bit.
+
+ Clearing this does not reset the X2P interface nor NCSI interface. After the
+ master block comes out of reset, software should clear this bit. */
+ uint64_t cmr_ncsi_reset : 1; /**< [ 10: 10](R/W) Interface reset for the CMR NCSI block.
+ Upon power up the CMR NCSI is in reset and the companion CNXXXX NCSI block will be
+ commanded by the
+ external BMC to enable one of the CNXXXX BGX NCSI interfaces for passing network traffic.
+ Only one NCSI interface can be enabled in CNXXXX. The BMC/NCSI will then proceed to
+ configure
+ the rest of the BGX csr for pass through traffic.
+
+ When set, will reset the CMR NCSI interface effectively disabling it at a traffic boundary
+ should traffic be flowing. This bit will not reset the main RXB fifos. */
+ uint64_t cmr_ncsi_drop : 1; /**< [ 9: 9](R/W) NCSI drop.
+ 1 = Cleanly drop traffic going into the NCSI block of BGX. Must set asserted
+ with with [CMR_X2P_RESET]=1 (in the same write operation) to avoid partial packets
+ to the NCSI interface while performing a X2P partner reset.
+ 0 = Allow traffic to flow through the NCSI block. */
+ uint64_t ncsi_lmac_id : 2; /**< [ 8: 7](R/W) Logical MAC ID that carries NCSI traffic for both RX and TX side of CMR. On the RX side
+ is
+ also the LMAC_ID that is eligible for steering. */
+ uint64_t fcs_strip : 1; /**< [ 6: 6](R/W) A setting of 1 means the BGX strip the FCS bytes of every packet. For packets less than 4
+ bytes, the packet will be removed.
+ A setting of 0 means the BGX will not modify or remove the FCS bytes. */
+ uint64_t interleave_mode : 1; /**< [ 5: 5](RAZ) Reserved. */
+ uint64_t cmr_mix1_reset : 1; /**< [ 4: 4](R/W) Must be 0. */
+ uint64_t cmr_mix0_reset : 1; /**< [ 3: 3](R/W) Must be 0. */
+ uint64_t cmr_x2p_reset : 1; /**< [ 2: 2](R/W) If the master block connected to X2P interface 0 is reset, software also needs
+ to reset the X2P interface in the BGX by setting this bit. It resets the X2P
+ interface state in the BGX (skid FIFO and pending requests to the master block)
+ and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface. Because the X2P and NCSI interfaces share the main RXB FIFOs it will
+ also impact the NCSI interface therefore it is required to set [CMR_NCSI_DROP]
+ bit first before setting this bit.
+
+ Clearing this does not reset the X2P interface nor NCSI interface. After the
+ master block comes out of reset, software should clear this bit. */
+ uint64_t bgx_clk_enable : 1; /**< [ 1: 1](R/W) The global clock enable for BGX. Setting this bit overrides clock enables set by
+ BGX()_CMR()_CONFIG[ENABLE] and BGX()_CMR()_CONFIG[LMAC_TYPE], essentially
+ turning on clocks for the entire BGX. Setting this bit to 0 results in not overriding
+ clock enables set by BGX()_CMR()_CONFIG[ENABLE] and
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. */
+ uint64_t pmux_sds_sel : 1; /**< [ 0: 0](R/W) SerDes/GSER output select. Must be 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pmux_sds_sel : 1; /**< [ 0: 0](R/W) SerDes/GSER output select. Must be 0. */
+ uint64_t bgx_clk_enable : 1; /**< [ 1: 1](R/W) The global clock enable for BGX. Setting this bit overrides clock enables set by
+ BGX()_CMR()_CONFIG[ENABLE] and BGX()_CMR()_CONFIG[LMAC_TYPE], essentially
+ turning on clocks for the entire BGX. Setting this bit to 0 results in not overriding
+ clock enables set by BGX()_CMR()_CONFIG[ENABLE] and
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. */
+ uint64_t cmr_x2p_reset : 1; /**< [ 2: 2](R/W) If the master block connected to X2P interface 0 is reset, software also needs
+ to reset the X2P interface in the BGX by setting this bit. It resets the X2P
+ interface state in the BGX (skid FIFO and pending requests to the master block)
+ and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface. Because the X2P and NCSI interfaces share the main RXB FIFOs it will
+ also impact the NCSI interface therefore it is required to set [CMR_NCSI_DROP]
+ bit first before setting this bit.
+
+ Clearing this does not reset the X2P interface nor NCSI interface. After the
+ master block comes out of reset, software should clear this bit. */
+ uint64_t cmr_mix0_reset : 1; /**< [ 3: 3](R/W) Must be 0. */
+ uint64_t cmr_mix1_reset : 1; /**< [ 4: 4](R/W) Must be 0. */
+ uint64_t interleave_mode : 1; /**< [ 5: 5](RAZ) Reserved. */
+ uint64_t fcs_strip : 1; /**< [ 6: 6](R/W) A setting of 1 means the BGX strip the FCS bytes of every packet. For packets less than 4
+ bytes, the packet will be removed.
+ A setting of 0 means the BGX will not modify or remove the FCS bytes. */
+ uint64_t ncsi_lmac_id : 2; /**< [ 8: 7](R/W) Logical MAC ID that carries NCSI traffic for both RX and TX side of CMR. On the RX side
+ is
+ also the LMAC_ID that is eligible for steering. */
+ uint64_t cmr_ncsi_drop : 1; /**< [ 9: 9](R/W) NCSI drop.
+ 1 = Cleanly drop traffic going into the NCSI block of BGX. Must set asserted
+ with with [CMR_X2P_RESET]=1 (in the same write operation) to avoid partial packets
+ to the NCSI interface while performing a X2P partner reset.
+ 0 = Allow traffic to flow through the NCSI block. */
+ uint64_t cmr_ncsi_reset : 1; /**< [ 10: 10](R/W) Interface reset for the CMR NCSI block.
+ Upon power up the CMR NCSI is in reset and the companion CNXXXX NCSI block will be
+ commanded by the
+ external BMC to enable one of the CNXXXX BGX NCSI interfaces for passing network traffic.
+ Only one NCSI interface can be enabled in CNXXXX. The BMC/NCSI will then proceed to
+ configure
+ the rest of the BGX csr for pass through traffic.
+
+ When set, will reset the CMR NCSI interface effectively disabling it at a traffic boundary
+ should traffic be flowing. This bit will not reset the main RXB fifos. */
+ uint64_t cmr_x2p1_reset : 1; /**< [ 11: 11](R/W) If the master block connected to X2P interface 1 is reset, software also needs
+ to reset the X2P interface in the BGX by setting this bit. It resets the X2P
+ interface state in the BGX (skid FIFO and pending requests to the master block)
+ and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface. Because the X2P and NCSI interfaces share the main RXB FIFOs it will
+ also impact the NCSI interface therefore it is required to set [CMR_NCSI_DROP]
+ bit first before setting this bit.
+
+ Clearing this does not reset the X2P interface nor NCSI interface. After the
+ master block comes out of reset, software should clear this bit. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_bgxx_cmr_global_config_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t cmr_ncsi_reset : 1; /**< [ 10: 10](R/W) Interface reset for the CMR NCSI block.
+ Upon power up the CMR NCSI is in reset and the companion CNXXXX NCSI block will be
+ commanded by the
+ external BMC to enable one of the CNXXXX BGX NCSI interfaces for passing network traffic.
+ Only one NCSI interface can be enabled in CNXXXX. The BMC/NCSI will then proceed to
+ configure
+ the rest of the BGX csr for pass through traffic.
+
+ When set, will reset the CMR NCSI interface effectively disabling it at a traffic boundary
+ should traffic be flowing. This bit will not reset the main RXB fifos. */
+ uint64_t cmr_ncsi_drop : 1; /**< [ 9: 9](R/W) NCSI drop.
+ 1 = Cleanly drop traffic going into the NCSI block of BGX. Must set asserted
+ with with [CMR_X2P_RESET]=1 (in the same write operation) to avoid partial packets
+ to the NCSI interface while performing a X2P partner reset.
+ 0 = Allow traffic to flow through the NCSI block. */
+ uint64_t ncsi_lmac_id : 2; /**< [ 8: 7](R/W) Logical MAC ID that carries NCSI traffic for both RX and TX side of CMR. On the RX side
+ is
+ also the LMAC_ID that is eligible for steering. */
+ uint64_t fcs_strip : 1; /**< [ 6: 6](R/W) A setting of 1 means the BGX strip the FCS bytes of every packet. For packets less than 4
+ bytes, the packet will be removed.
+ A setting of 0 means the BGX will not modify or remove the FCS bytes. */
+ uint64_t interleave_mode : 1; /**< [ 5: 5](RAZ) Reserved. */
+ uint64_t cmr_mix1_reset : 1; /**< [ 4: 4](R/W) Must be 0. */
+ uint64_t cmr_mix0_reset : 1; /**< [ 3: 3](R/W) Must be 0. */
+ uint64_t cmr_x2p_reset : 1; /**< [ 2: 2](R/W) If the NIC or TNS block is reset, software also needs to reset the X2P interface in the
+ BGX by
+ setting this bit to 1. It resets the X2P interface state in the BGX (skid FIFO and pending
+ requests to NIC) and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface. Because the X2P and NCSI interfaces share the main RXB fifos it will also
+ impact the NCSI interface therefore it is required to set [CMR_NCSI_DROP] bit first before
+ setting this bit.
+
+ Setting this bit to 0 does not reset the X2P interface nor NCSI interface.
+ After NIC/TNS comes out of reset, software should clear this bit. */
+ uint64_t bgx_clk_enable : 1; /**< [ 1: 1](R/W) The global clock enable for BGX. Setting this bit overrides clock enables set by
+ BGX()_CMR()_CONFIG[ENABLE] and BGX()_CMR()_CONFIG[LMAC_TYPE], essentially
+ turning on clocks for the entire BGX. Setting this bit to 0 results in not overriding
+ clock enables set by BGX()_CMR()_CONFIG[ENABLE] and
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. */
+ uint64_t pmux_sds_sel : 1; /**< [ 0: 0](R/W) SerDes/GSER output select. Must be 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pmux_sds_sel : 1; /**< [ 0: 0](R/W) SerDes/GSER output select. Must be 0. */
+ uint64_t bgx_clk_enable : 1; /**< [ 1: 1](R/W) The global clock enable for BGX. Setting this bit overrides clock enables set by
+ BGX()_CMR()_CONFIG[ENABLE] and BGX()_CMR()_CONFIG[LMAC_TYPE], essentially
+ turning on clocks for the entire BGX. Setting this bit to 0 results in not overriding
+ clock enables set by BGX()_CMR()_CONFIG[ENABLE] and
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. */
+ uint64_t cmr_x2p_reset : 1; /**< [ 2: 2](R/W) If the NIC or TNS block is reset, software also needs to reset the X2P interface in the
+ BGX by
+ setting this bit to 1. It resets the X2P interface state in the BGX (skid FIFO and pending
+ requests to NIC) and prevents the RXB FIFOs for all LMACs from pushing data to the
+ interface. Because the X2P and NCSI interfaces share the main RXB fifos it will also
+ impact the NCSI interface therefore it is required to set [CMR_NCSI_DROP] bit first before
+ setting this bit.
+
+ Setting this bit to 0 does not reset the X2P interface nor NCSI interface.
+ After NIC/TNS comes out of reset, software should clear this bit. */
+ uint64_t cmr_mix0_reset : 1; /**< [ 3: 3](R/W) Must be 0. */
+ uint64_t cmr_mix1_reset : 1; /**< [ 4: 4](R/W) Must be 0. */
+ uint64_t interleave_mode : 1; /**< [ 5: 5](RAZ) Reserved. */
+ uint64_t fcs_strip : 1; /**< [ 6: 6](R/W) A setting of 1 means the BGX strip the FCS bytes of every packet. For packets less than 4
+ bytes, the packet will be removed.
+ A setting of 0 means the BGX will not modify or remove the FCS bytes. */
+ uint64_t ncsi_lmac_id : 2; /**< [ 8: 7](R/W) Logical MAC ID that carries NCSI traffic for both RX and TX side of CMR. On the RX side
+ is
+ also the LMAC_ID that is eligible for steering. */
+ uint64_t cmr_ncsi_drop : 1; /**< [ 9: 9](R/W) NCSI drop.
+ 1 = Cleanly drop traffic going into the NCSI block of BGX. Must set asserted
+ with with [CMR_X2P_RESET]=1 (in the same write operation) to avoid partial packets
+ to the NCSI interface while performing a X2P partner reset.
+ 0 = Allow traffic to flow through the NCSI block. */
+ uint64_t cmr_ncsi_reset : 1; /**< [ 10: 10](R/W) Interface reset for the CMR NCSI block.
+ Upon power up the CMR NCSI is in reset and the companion CNXXXX NCSI block will be
+ commanded by the
+ external BMC to enable one of the CNXXXX BGX NCSI interfaces for passing network traffic.
+ Only one NCSI interface can be enabled in CNXXXX. The BMC/NCSI will then proceed to
+ configure
+ the rest of the BGX csr for pass through traffic.
+
+ When set, will reset the CMR NCSI interface effectively disabling it at a traffic boundary
+ should traffic be flowing. This bit will not reset the main RXB fifos. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_bgxx_cmr_global_config bdk_bgxx_cmr_global_config_t;
+
+static inline uint64_t BDK_BGXX_CMR_GLOBAL_CONFIG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_GLOBAL_CONFIG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000008ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000008ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000008ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_GLOBAL_CONFIG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_GLOBAL_CONFIG(a) bdk_bgxx_cmr_global_config_t
+#define bustype_BDK_BGXX_CMR_GLOBAL_CONFIG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_GLOBAL_CONFIG(a) "BGXX_CMR_GLOBAL_CONFIG"
+#define device_bar_BDK_BGXX_CMR_GLOBAL_CONFIG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_GLOBAL_CONFIG(a) (a)
+#define arguments_BDK_BGXX_CMR_GLOBAL_CONFIG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_mem_ctrl
+ *
+ * BGX CMR Memory Control Register
+ */
+union bdk_bgxx_cmr_mem_ctrl
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_mem_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_42_63 : 22;
+ uint64_t txb_fif_m3_syn : 2; /**< [ 41: 40](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem3. */
+ uint64_t txb_fif_m3_cdis : 1; /**< [ 39: 39](R/W) ECC-correction disable for the TXB main mem3. */
+ uint64_t txb_fif_m2_syn : 2; /**< [ 38: 37](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem2. */
+ uint64_t txb_fif_m2_cdis : 1; /**< [ 36: 36](R/W) ECC-correction disable for the TXB main mem2. */
+ uint64_t txb_ncsi_synd : 2; /**< [ 35: 34](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_ncsi_cor_dis : 1; /**< [ 33: 33](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m3_synd : 2; /**< [ 32: 31](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m3_cor_dis : 1; /**< [ 30: 30](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m2_synd : 2; /**< [ 29: 28](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m2_cor_dis : 1; /**< [ 27: 27](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m1_synd : 2; /**< [ 26: 25](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m1_cor_dis : 1; /**< [ 24: 24](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m0_synd : 2; /**< [ 23: 22](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO. */
+ uint64_t txb_skid_m0_cor_dis : 1; /**< [ 21: 21](R/W) ECC-correction disable for the TXB SKID FIFO. */
+ uint64_t reserved_15_20 : 6;
+ uint64_t rxb_skid_synd : 2; /**< [ 14: 13](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB SKID FIFO. */
+ uint64_t rxb_skid_cor_dis : 1; /**< [ 12: 12](R/W) ECC-correction disable for the RXB SKID FIFO. */
+ uint64_t rxb_fif_bk1_syn1 : 2; /**< [ 11: 10](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_cdis1 : 1; /**< [ 9: 9](R/W) ECC-correction disable for the RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_syn0 : 2; /**< [ 8: 7](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_cdis0 : 1; /**< [ 6: 6](R/W) ECC-correction disable for the RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk0_syn1 : 2; /**< [ 5: 4](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_cdis1 : 1; /**< [ 3: 3](R/W) ECC-correction disable for the RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_syn0 : 2; /**< [ 2: 1](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_cdis0 : 1; /**< [ 0: 0](R/W) ECC-correction disable for the RXB main bank0 srf0. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_cdis0 : 1; /**< [ 0: 0](R/W) ECC-correction disable for the RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_syn0 : 2; /**< [ 2: 1](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_cdis1 : 1; /**< [ 3: 3](R/W) ECC-correction disable for the RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_syn1 : 2; /**< [ 5: 4](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk1_cdis0 : 1; /**< [ 6: 6](R/W) ECC-correction disable for the RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_syn0 : 2; /**< [ 8: 7](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_cdis1 : 1; /**< [ 9: 9](R/W) ECC-correction disable for the RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_syn1 : 2; /**< [ 11: 10](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf1. */
+ uint64_t rxb_skid_cor_dis : 1; /**< [ 12: 12](R/W) ECC-correction disable for the RXB SKID FIFO. */
+ uint64_t rxb_skid_synd : 2; /**< [ 14: 13](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB SKID FIFO. */
+ uint64_t reserved_15_20 : 6;
+ uint64_t txb_skid_m0_cor_dis : 1; /**< [ 21: 21](R/W) ECC-correction disable for the TXB SKID FIFO. */
+ uint64_t txb_skid_m0_synd : 2; /**< [ 23: 22](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO. */
+ uint64_t txb_skid_m1_cor_dis : 1; /**< [ 24: 24](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m1_synd : 2; /**< [ 26: 25](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m2_cor_dis : 1; /**< [ 27: 27](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m2_synd : 2; /**< [ 29: 28](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m3_cor_dis : 1; /**< [ 30: 30](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m3_synd : 2; /**< [ 32: 31](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_ncsi_cor_dis : 1; /**< [ 33: 33](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_ncsi_synd : 2; /**< [ 35: 34](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_fif_m2_cdis : 1; /**< [ 36: 36](R/W) ECC-correction disable for the TXB main mem2. */
+ uint64_t txb_fif_m2_syn : 2; /**< [ 38: 37](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem2. */
+ uint64_t txb_fif_m3_cdis : 1; /**< [ 39: 39](R/W) ECC-correction disable for the TXB main mem3. */
+ uint64_t txb_fif_m3_syn : 2; /**< [ 41: 40](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem3. */
+ uint64_t reserved_42_63 : 22;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_mem_ctrl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t txb_ncsi_synd : 2; /**< [ 35: 34](R/W) Reserved. */
+ uint64_t txb_ncsi_cor_dis : 1; /**< [ 33: 33](R/W) Reserved. */
+ uint64_t txb_skid_m3_synd : 2; /**< [ 32: 31](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m3_cor_dis : 1; /**< [ 30: 30](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m2_synd : 2; /**< [ 29: 28](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m2_cor_dis : 1; /**< [ 27: 27](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m1_synd : 2; /**< [ 26: 25](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m1_cor_dis : 1; /**< [ 24: 24](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m0_synd : 2; /**< [ 23: 22](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO. */
+ uint64_t txb_skid_m0_cor_dis : 1; /**< [ 21: 21](R/W) ECC-correction disable for the TXB SKID FIFO. */
+ uint64_t txb_fif_bk1_syn : 2; /**< [ 20: 19](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main bank1. */
+ uint64_t txb_fif_bk1_cdis : 1; /**< [ 18: 18](R/W) ECC-correction disable for the TXB main bank1. */
+ uint64_t txb_fif_bk0_syn : 2; /**< [ 17: 16](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main bank0. */
+ uint64_t txb_fif_bk0_cdis : 1; /**< [ 15: 15](R/W) ECC-correction disable for the TXB main bank0. */
+ uint64_t rxb_skid_synd : 2; /**< [ 14: 13](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB SKID FIFO. */
+ uint64_t rxb_skid_cor_dis : 1; /**< [ 12: 12](R/W) ECC-correction disable for the RXB SKID FIFO. */
+ uint64_t rxb_fif_bk1_syn1 : 2; /**< [ 11: 10](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_cdis1 : 1; /**< [ 9: 9](R/W) ECC-correction disable for the RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_syn0 : 2; /**< [ 8: 7](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_cdis0 : 1; /**< [ 6: 6](R/W) ECC-correction disable for the RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk0_syn1 : 2; /**< [ 5: 4](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_cdis1 : 1; /**< [ 3: 3](R/W) ECC-correction disable for the RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_syn0 : 2; /**< [ 2: 1](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_cdis0 : 1; /**< [ 0: 0](R/W) ECC-correction disable for the RXB main bank0 srf0. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_cdis0 : 1; /**< [ 0: 0](R/W) ECC-correction disable for the RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_syn0 : 2; /**< [ 2: 1](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_cdis1 : 1; /**< [ 3: 3](R/W) ECC-correction disable for the RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_syn1 : 2; /**< [ 5: 4](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk1_cdis0 : 1; /**< [ 6: 6](R/W) ECC-correction disable for the RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_syn0 : 2; /**< [ 8: 7](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_cdis1 : 1; /**< [ 9: 9](R/W) ECC-correction disable for the RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_syn1 : 2; /**< [ 11: 10](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf1. */
+ uint64_t rxb_skid_cor_dis : 1; /**< [ 12: 12](R/W) ECC-correction disable for the RXB SKID FIFO. */
+ uint64_t rxb_skid_synd : 2; /**< [ 14: 13](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB SKID FIFO. */
+ uint64_t txb_fif_bk0_cdis : 1; /**< [ 15: 15](R/W) ECC-correction disable for the TXB main bank0. */
+ uint64_t txb_fif_bk0_syn : 2; /**< [ 17: 16](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main bank0. */
+ uint64_t txb_fif_bk1_cdis : 1; /**< [ 18: 18](R/W) ECC-correction disable for the TXB main bank1. */
+ uint64_t txb_fif_bk1_syn : 2; /**< [ 20: 19](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main bank1. */
+ uint64_t txb_skid_m0_cor_dis : 1; /**< [ 21: 21](R/W) ECC-correction disable for the TXB SKID FIFO. */
+ uint64_t txb_skid_m0_synd : 2; /**< [ 23: 22](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO. */
+ uint64_t txb_skid_m1_cor_dis : 1; /**< [ 24: 24](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m1_synd : 2; /**< [ 26: 25](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m2_cor_dis : 1; /**< [ 27: 27](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m2_synd : 2; /**< [ 29: 28](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m3_cor_dis : 1; /**< [ 30: 30](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m3_synd : 2; /**< [ 32: 31](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_ncsi_cor_dis : 1; /**< [ 33: 33](R/W) Reserved. */
+ uint64_t txb_ncsi_synd : 2; /**< [ 35: 34](R/W) Reserved. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmr_mem_ctrl_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t txb_ncsi_synd : 2; /**< [ 35: 34](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_ncsi_cor_dis : 1; /**< [ 33: 33](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m3_synd : 2; /**< [ 32: 31](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m3_cor_dis : 1; /**< [ 30: 30](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m2_synd : 2; /**< [ 29: 28](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m2_cor_dis : 1; /**< [ 27: 27](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m1_synd : 2; /**< [ 26: 25](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m1_cor_dis : 1; /**< [ 24: 24](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m0_synd : 2; /**< [ 23: 22](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO. */
+ uint64_t txb_skid_m0_cor_dis : 1; /**< [ 21: 21](R/W) ECC-correction disable for the TXB SKID FIFO. */
+ uint64_t txb_fif_bk1_syn : 2; /**< [ 20: 19](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main bank1. */
+ uint64_t txb_fif_bk1_cdis : 1; /**< [ 18: 18](R/W) ECC-correction disable for the TXB main bank1. */
+ uint64_t txb_fif_bk0_syn : 2; /**< [ 17: 16](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main bank0. */
+ uint64_t txb_fif_bk0_cdis : 1; /**< [ 15: 15](R/W) ECC-correction disable for the TXB main bank0. */
+ uint64_t rxb_skid_synd : 2; /**< [ 14: 13](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB SKID FIFO. */
+ uint64_t rxb_skid_cor_dis : 1; /**< [ 12: 12](R/W) ECC-correction disable for the RXB SKID FIFO. */
+ uint64_t rxb_fif_bk1_syn1 : 2; /**< [ 11: 10](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_cdis1 : 1; /**< [ 9: 9](R/W) ECC-correction disable for the RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_syn0 : 2; /**< [ 8: 7](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_cdis0 : 1; /**< [ 6: 6](R/W) ECC-correction disable for the RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk0_syn1 : 2; /**< [ 5: 4](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_cdis1 : 1; /**< [ 3: 3](R/W) ECC-correction disable for the RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_syn0 : 2; /**< [ 2: 1](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_cdis0 : 1; /**< [ 0: 0](R/W) ECC-correction disable for the RXB main bank0 srf0. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_cdis0 : 1; /**< [ 0: 0](R/W) ECC-correction disable for the RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_syn0 : 2; /**< [ 2: 1](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_cdis1 : 1; /**< [ 3: 3](R/W) ECC-correction disable for the RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_syn1 : 2; /**< [ 5: 4](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk1_cdis0 : 1; /**< [ 6: 6](R/W) ECC-correction disable for the RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_syn0 : 2; /**< [ 8: 7](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_cdis1 : 1; /**< [ 9: 9](R/W) ECC-correction disable for the RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_syn1 : 2; /**< [ 11: 10](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf1. */
+ uint64_t rxb_skid_cor_dis : 1; /**< [ 12: 12](R/W) ECC-correction disable for the RXB SKID FIFO. */
+ uint64_t rxb_skid_synd : 2; /**< [ 14: 13](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB SKID FIFO. */
+ uint64_t txb_fif_bk0_cdis : 1; /**< [ 15: 15](R/W) ECC-correction disable for the TXB main bank0. */
+ uint64_t txb_fif_bk0_syn : 2; /**< [ 17: 16](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main bank0. */
+ uint64_t txb_fif_bk1_cdis : 1; /**< [ 18: 18](R/W) ECC-correction disable for the TXB main bank1. */
+ uint64_t txb_fif_bk1_syn : 2; /**< [ 20: 19](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main bank1. */
+ uint64_t txb_skid_m0_cor_dis : 1; /**< [ 21: 21](R/W) ECC-correction disable for the TXB SKID FIFO. */
+ uint64_t txb_skid_m0_synd : 2; /**< [ 23: 22](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO. */
+ uint64_t txb_skid_m1_cor_dis : 1; /**< [ 24: 24](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m1_synd : 2; /**< [ 26: 25](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m2_cor_dis : 1; /**< [ 27: 27](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m2_synd : 2; /**< [ 29: 28](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m3_cor_dis : 1; /**< [ 30: 30](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m3_synd : 2; /**< [ 32: 31](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_ncsi_cor_dis : 1; /**< [ 33: 33](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_ncsi_synd : 2; /**< [ 35: 34](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmr_mem_ctrl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_42_63 : 22;
+ uint64_t txb_fif_m3_syn : 2; /**< [ 41: 40](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem3. */
+ uint64_t txb_fif_m3_cdis : 1; /**< [ 39: 39](R/W) ECC-correction disable for the TXB main mem3. */
+ uint64_t txb_fif_m2_syn : 2; /**< [ 38: 37](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem2. */
+ uint64_t txb_fif_m2_cdis : 1; /**< [ 36: 36](R/W) ECC-correction disable for the TXB main mem2. */
+ uint64_t txb_ncsi_synd : 2; /**< [ 35: 34](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_ncsi_cor_dis : 1; /**< [ 33: 33](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m3_synd : 2; /**< [ 32: 31](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m3_cor_dis : 1; /**< [ 30: 30](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m2_synd : 2; /**< [ 29: 28](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m2_cor_dis : 1; /**< [ 27: 27](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m1_synd : 2; /**< [ 26: 25](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m1_cor_dis : 1; /**< [ 24: 24](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m0_synd : 2; /**< [ 23: 22](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO. */
+ uint64_t txb_skid_m0_cor_dis : 1; /**< [ 21: 21](R/W) ECC-correction disable for the TXB SKID FIFO. */
+ uint64_t txb_fif_m1_syn : 2; /**< [ 20: 19](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem1. */
+ uint64_t txb_fif_m1_cdis : 1; /**< [ 18: 18](R/W) ECC-correction disable for the TXB main mem1. */
+ uint64_t txb_fif_m0_syn : 2; /**< [ 17: 16](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem0. */
+ uint64_t txb_fif_m0_cdis : 1; /**< [ 15: 15](R/W) ECC-correction disable for the TXB main mem0. */
+ uint64_t rxb_skid_synd : 2; /**< [ 14: 13](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB SKID FIFO. */
+ uint64_t rxb_skid_cor_dis : 1; /**< [ 12: 12](R/W) ECC-correction disable for the RXB SKID FIFO. */
+ uint64_t rxb_fif_bk1_syn1 : 2; /**< [ 11: 10](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_cdis1 : 1; /**< [ 9: 9](R/W) ECC-correction disable for the RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_syn0 : 2; /**< [ 8: 7](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_cdis0 : 1; /**< [ 6: 6](R/W) ECC-correction disable for the RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk0_syn1 : 2; /**< [ 5: 4](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_cdis1 : 1; /**< [ 3: 3](R/W) ECC-correction disable for the RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_syn0 : 2; /**< [ 2: 1](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_cdis0 : 1; /**< [ 0: 0](R/W) ECC-correction disable for the RXB main bank0 srf0. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_cdis0 : 1; /**< [ 0: 0](R/W) ECC-correction disable for the RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_syn0 : 2; /**< [ 2: 1](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf0. */
+ uint64_t rxb_fif_bk0_cdis1 : 1; /**< [ 3: 3](R/W) ECC-correction disable for the RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk0_syn1 : 2; /**< [ 5: 4](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank0 srf1. */
+ uint64_t rxb_fif_bk1_cdis0 : 1; /**< [ 6: 6](R/W) ECC-correction disable for the RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_syn0 : 2; /**< [ 8: 7](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf0. */
+ uint64_t rxb_fif_bk1_cdis1 : 1; /**< [ 9: 9](R/W) ECC-correction disable for the RXB main bank1 srf1. */
+ uint64_t rxb_fif_bk1_syn1 : 2; /**< [ 11: 10](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB main bank1 srf1. */
+ uint64_t rxb_skid_cor_dis : 1; /**< [ 12: 12](R/W) ECC-correction disable for the RXB SKID FIFO. */
+ uint64_t rxb_skid_synd : 2; /**< [ 14: 13](R/W) Syndrome to flip and generate single-bit/double-bit error for RXB SKID FIFO. */
+ uint64_t txb_fif_m0_cdis : 1; /**< [ 15: 15](R/W) ECC-correction disable for the TXB main mem0. */
+ uint64_t txb_fif_m0_syn : 2; /**< [ 17: 16](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem0. */
+ uint64_t txb_fif_m1_cdis : 1; /**< [ 18: 18](R/W) ECC-correction disable for the TXB main mem1. */
+ uint64_t txb_fif_m1_syn : 2; /**< [ 20: 19](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem1. */
+ uint64_t txb_skid_m0_cor_dis : 1; /**< [ 21: 21](R/W) ECC-correction disable for the TXB SKID FIFO. */
+ uint64_t txb_skid_m0_synd : 2; /**< [ 23: 22](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO. */
+ uint64_t txb_skid_m1_cor_dis : 1; /**< [ 24: 24](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m1_synd : 2; /**< [ 26: 25](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m2_cor_dis : 1; /**< [ 27: 27](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m2_synd : 2; /**< [ 29: 28](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_skid_m3_cor_dis : 1; /**< [ 30: 30](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_skid_m3_synd : 2; /**< [ 32: 31](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_ncsi_cor_dis : 1; /**< [ 33: 33](R/W) ECC-correction disable for the TXB SKID FIFO */
+ uint64_t txb_ncsi_synd : 2; /**< [ 35: 34](R/W) Syndrome to flip and generate single-bit/double-bit for TXB SKID FIFO */
+ uint64_t txb_fif_m2_cdis : 1; /**< [ 36: 36](R/W) ECC-correction disable for the TXB main mem2. */
+ uint64_t txb_fif_m2_syn : 2; /**< [ 38: 37](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem2. */
+ uint64_t txb_fif_m3_cdis : 1; /**< [ 39: 39](R/W) ECC-correction disable for the TXB main mem3. */
+ uint64_t txb_fif_m3_syn : 2; /**< [ 41: 40](R/W) Syndrome to flip and generate single-bit/double-bit error for TXB main mem3. */
+ uint64_t reserved_42_63 : 22;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmr_mem_ctrl bdk_bgxx_cmr_mem_ctrl_t;
+
+static inline uint64_t BDK_BGXX_CMR_MEM_CTRL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_MEM_CTRL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000030ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000030ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000030ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_MEM_CTRL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_MEM_CTRL(a) bdk_bgxx_cmr_mem_ctrl_t
+#define bustype_BDK_BGXX_CMR_MEM_CTRL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_MEM_CTRL(a) "BGXX_CMR_MEM_CTRL"
+#define device_bar_BDK_BGXX_CMR_MEM_CTRL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_MEM_CTRL(a) (a)
+#define arguments_BDK_BGXX_CMR_MEM_CTRL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_mem_int
+ *
+ * BGX CMR Memory Interrupt Register
+ */
+union bdk_bgxx_cmr_mem_int
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_mem_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1C/H) TXB Main FIFO Mem3 single-bit error. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1C/H) TXB Main FIFO Mem3 double-bit error. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1C/H) TXB Main FIFO Mem2 single-bit error. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1C/H) TXB Main FIFO Mem2 double-bit error. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t reserved_8_27 : 20;
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) RXB main FIFO bank1 srf1 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) RXB main FIFO bank1 srf1 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) RXB main FIFO bank1 srf0 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) RXB main FIFO bank1 srf0 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) RXB main FIFO bank0 srf1 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) RXB main FIFO bank0 srf1 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) RXB main FIFO bank0 srf0 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) RXB main FIFO bank0 srf0 double-bit error. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) RXB main FIFO bank0 srf0 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) RXB main FIFO bank0 srf0 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) RXB main FIFO bank0 srf1 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) RXB main FIFO bank0 srf1 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) RXB main FIFO bank1 srf0 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) RXB main FIFO bank1 srf0 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) RXB main FIFO bank1 srf1 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) RXB main FIFO bank1 srf1 single-bit error. */
+ uint64_t reserved_8_27 : 20;
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1C/H) TXB Main FIFO Mem2 double-bit error. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1C/H) TXB Main FIFO Mem2 single-bit error. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1C/H) TXB Main FIFO Mem3 double-bit error. */
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1C/H) TXB Main FIFO Mem3 single-bit error. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_mem_int_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1C/H) Reserved. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1C/H) Reserved. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1C/H) RX SMU INFIFO overflow. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1C/H) RX GMP INFIFO overflow. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1C/H) Reserved. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 15: 15](R/W1C/H) TXB Main FIFO Bank1 single-bit error. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 14: 14](R/W1C/H) TXB Main FIFO Bank1 double-bit error. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 13: 13](R/W1C/H) TXB Main FIFO Bank0 single-bit error. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 12: 12](R/W1C/H) TXB Main FIFO Bank0 double-bit error. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1C/H) RXB NIC SKID FIFO single-bit error. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1C/H) RXB NIC SKID FIFO double-bit error. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) RXB PKI SKID FIFO single-bit error. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) RXB PKI SKID FIFO double-bit error. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) RXB main FIFO bank1 srf1 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) RXB main FIFO bank1 srf1 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) RXB main FIFO bank1 srf0 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) RXB main FIFO bank1 srf0 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) RXB main FIFO bank0 srf1 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) RXB main FIFO bank0 srf1 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) RXB main FIFO bank0 srf0 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) RXB main FIFO bank0 srf0 double-bit error. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) RXB main FIFO bank0 srf0 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) RXB main FIFO bank0 srf0 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) RXB main FIFO bank0 srf1 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) RXB main FIFO bank0 srf1 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) RXB main FIFO bank1 srf0 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) RXB main FIFO bank1 srf0 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) RXB main FIFO bank1 srf1 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) RXB main FIFO bank1 srf1 single-bit error. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) RXB PKI SKID FIFO double-bit error. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) RXB PKI SKID FIFO single-bit error. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1C/H) RXB NIC SKID FIFO double-bit error. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1C/H) RXB NIC SKID FIFO single-bit error. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 12: 12](R/W1C/H) TXB Main FIFO Bank0 double-bit error. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 13: 13](R/W1C/H) TXB Main FIFO Bank0 single-bit error. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 14: 14](R/W1C/H) TXB Main FIFO Bank1 double-bit error. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 15: 15](R/W1C/H) TXB Main FIFO Bank1 single-bit error. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1C/H) Reserved. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1C/H) RX GMP INFIFO overflow. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1C/H) RX SMU INFIFO overflow. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1C/H) Reserved. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1C/H) Reserved. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1C/H) Reserved. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmr_mem_int_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t txb_ncsi_sbe : 1; /**< [ 25: 25](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 24: 24](R/W1C/H) TXB SKID FIFO double-bit error */
+ uint64_t txb_skid_m3_sbe : 1; /**< [ 23: 23](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m3_dbe : 1; /**< [ 22: 22](R/W1C/H) TXB SKID FIFO double-bit error */
+ uint64_t txb_skid_m2_sbe : 1; /**< [ 21: 21](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m2_dbe : 1; /**< [ 20: 20](R/W1C/H) TXB SKID FIFO double-bit error */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1C/H) TXB SKID FIFO double-bit error */
+ uint64_t smu_in_overfl : 1; /**< [ 17: 17](R/W1C/H) RX SMU INFIFO overflow. */
+ uint64_t gmp_in_overfl : 1; /**< [ 16: 16](R/W1C/H) RX GMP INFIFO overflow. */
+ uint64_t txb_skid_m0_sbe : 1; /**< [ 15: 15](R/W1C/H) TXB SKID FIFO single-bit error. */
+ uint64_t txb_skid_m0_dbe : 1; /**< [ 14: 14](R/W1C/H) TXB SKID FIFO double-bit error. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 13: 13](R/W1C/H) TXB Main FIFO Bank1 single-bit error. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 12: 12](R/W1C/H) TXB Main FIFO Bank1 double-bit error. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 11: 11](R/W1C/H) TXB Main FIFO Bank0 single-bit error. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 10: 10](R/W1C/H) TXB Main FIFO Bank0 double-bit error. */
+ uint64_t rxb_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) RXB SKID FIFO single-bit error. */
+ uint64_t rxb_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) RXB SKID FIFO double-bit error. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) RXB main FIFO bank1 srf1 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) RXB main FIFO bank1 srf1 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) RXB main FIFO bank1 srf0 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) RXB main FIFO bank1 srf0 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) RXB main FIFO bank0 srf1 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) RXB main FIFO bank0 srf1 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) RXB main FIFO bank0 srf0 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) RXB main FIFO bank0 srf0 double-bit error. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) RXB main FIFO bank0 srf0 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) RXB main FIFO bank0 srf0 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) RXB main FIFO bank0 srf1 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) RXB main FIFO bank0 srf1 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) RXB main FIFO bank1 srf0 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) RXB main FIFO bank1 srf0 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) RXB main FIFO bank1 srf1 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) RXB main FIFO bank1 srf1 single-bit error. */
+ uint64_t rxb_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) RXB SKID FIFO double-bit error. */
+ uint64_t rxb_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) RXB SKID FIFO single-bit error. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 10: 10](R/W1C/H) TXB Main FIFO Bank0 double-bit error. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 11: 11](R/W1C/H) TXB Main FIFO Bank0 single-bit error. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 12: 12](R/W1C/H) TXB Main FIFO Bank1 double-bit error. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 13: 13](R/W1C/H) TXB Main FIFO Bank1 single-bit error. */
+ uint64_t txb_skid_m0_dbe : 1; /**< [ 14: 14](R/W1C/H) TXB SKID FIFO double-bit error. */
+ uint64_t txb_skid_m0_sbe : 1; /**< [ 15: 15](R/W1C/H) TXB SKID FIFO single-bit error. */
+ uint64_t gmp_in_overfl : 1; /**< [ 16: 16](R/W1C/H) RX GMP INFIFO overflow. */
+ uint64_t smu_in_overfl : 1; /**< [ 17: 17](R/W1C/H) RX SMU INFIFO overflow. */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1C/H) TXB SKID FIFO double-bit error */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m2_dbe : 1; /**< [ 20: 20](R/W1C/H) TXB SKID FIFO double-bit error */
+ uint64_t txb_skid_m2_sbe : 1; /**< [ 21: 21](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m3_dbe : 1; /**< [ 22: 22](R/W1C/H) TXB SKID FIFO double-bit error */
+ uint64_t txb_skid_m3_sbe : 1; /**< [ 23: 23](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 24: 24](R/W1C/H) TXB SKID FIFO double-bit error */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 25: 25](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmr_mem_int_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1C/H) TXB Main FIFO Mem3 single-bit error. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1C/H) TXB Main FIFO Mem3 double-bit error. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1C/H) TXB Main FIFO Mem2 single-bit error. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1C/H) TXB Main FIFO Mem2 double-bit error. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1C/H) TXB SKID NCSI FIFO single-bit error */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1C/H) TXB SKID NCSI FIFO double-bit error */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1C/H) TXB SKID PKO FIFO single-bit error */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1C/H) TXB SKID PKO FIFO double-bit error */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1C/H) TXB SKID PKO FIFO single-bit error */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1C/H) TXB SKID PKO FIFO double-bit error */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1C/H) TXB SKID PKO FIFO single-bit error */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1C/H) TXB SKID PKO FIFO double-bit error */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1C/H) RX SMU INFIFO overflow. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1C/H) RX GMP INFIFO overflow. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1C/H) TXB SKID PKO FIFO single-bit error. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1C/H) TXB SKID PKO FIFO double-bit error. */
+ uint64_t txb_fif_m1_sbe : 1; /**< [ 15: 15](R/W1C/H) TXB Main FIFO Mem1 single-bit error. */
+ uint64_t txb_fif_m1_dbe : 1; /**< [ 14: 14](R/W1C/H) TXB Main FIFO Mem1 double-bit error. */
+ uint64_t txb_fif_m0_sbe : 1; /**< [ 13: 13](R/W1C/H) TXB Main FIFO Mem0 single-bit error. */
+ uint64_t txb_fif_m0_dbe : 1; /**< [ 12: 12](R/W1C/H) TXB Main FIFO Mem0 double-bit error. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1C/H) RXB NIC SKID FIFO single-bit error. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1C/H) RXB NIC SKID FIFO double-bit error. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) RXB PKI SKID FIFO single-bit error. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) RXB PKI SKID FIFO double-bit error. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) RXB main FIFO bank1 srf1 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) RXB main FIFO bank1 srf1 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) RXB main FIFO bank1 srf0 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) RXB main FIFO bank1 srf0 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) RXB main FIFO bank0 srf1 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) RXB main FIFO bank0 srf1 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) RXB main FIFO bank0 srf0 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) RXB main FIFO bank0 srf0 double-bit error. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) RXB main FIFO bank0 srf0 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) RXB main FIFO bank0 srf0 single-bit error. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) RXB main FIFO bank0 srf1 double-bit error. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) RXB main FIFO bank0 srf1 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) RXB main FIFO bank1 srf0 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) RXB main FIFO bank1 srf0 single-bit error. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) RXB main FIFO bank1 srf1 double-bit error. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) RXB main FIFO bank1 srf1 single-bit error. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) RXB PKI SKID FIFO double-bit error. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) RXB PKI SKID FIFO single-bit error. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1C/H) RXB NIC SKID FIFO double-bit error. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1C/H) RXB NIC SKID FIFO single-bit error. */
+ uint64_t txb_fif_m0_dbe : 1; /**< [ 12: 12](R/W1C/H) TXB Main FIFO Mem0 double-bit error. */
+ uint64_t txb_fif_m0_sbe : 1; /**< [ 13: 13](R/W1C/H) TXB Main FIFO Mem0 single-bit error. */
+ uint64_t txb_fif_m1_dbe : 1; /**< [ 14: 14](R/W1C/H) TXB Main FIFO Mem1 double-bit error. */
+ uint64_t txb_fif_m1_sbe : 1; /**< [ 15: 15](R/W1C/H) TXB Main FIFO Mem1 single-bit error. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1C/H) TXB SKID PKO FIFO double-bit error. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1C/H) TXB SKID PKO FIFO single-bit error. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1C/H) RX GMP INFIFO overflow. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1C/H) RX SMU INFIFO overflow. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1C/H) TXB SKID PKO FIFO double-bit error */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1C/H) TXB SKID PKO FIFO single-bit error */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1C/H) TXB SKID PKO FIFO double-bit error */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1C/H) TXB SKID PKO FIFO single-bit error */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1C/H) TXB SKID PKO FIFO double-bit error */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1C/H) TXB SKID PKO FIFO single-bit error */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1C/H) TXB SKID NCSI FIFO double-bit error */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1C/H) TXB SKID NCSI FIFO single-bit error */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) TXB SKID FIFO single-bit error */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) TXB SKID NIC FIFO double-bit error */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) TXB SKID NIC FIFO single-bit error */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1C/H) TXB Main FIFO Mem2 double-bit error. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1C/H) TXB Main FIFO Mem2 single-bit error. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1C/H) TXB Main FIFO Mem3 double-bit error. */
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1C/H) TXB Main FIFO Mem3 single-bit error. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmr_mem_int bdk_bgxx_cmr_mem_int_t;
+
+static inline uint64_t BDK_BGXX_CMR_MEM_INT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_MEM_INT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000010ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000010ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000010ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_MEM_INT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_MEM_INT(a) bdk_bgxx_cmr_mem_int_t
+#define bustype_BDK_BGXX_CMR_MEM_INT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_MEM_INT(a) "BGXX_CMR_MEM_INT"
+#define device_bar_BDK_BGXX_CMR_MEM_INT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_MEM_INT(a) (a)
+#define arguments_BDK_BGXX_CMR_MEM_INT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_mem_int_ena_w1c
+ *
+ * BGX CMR Memory Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_bgxx_cmr_mem_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_mem_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t reserved_8_17 : 10;
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t reserved_8_17 : 10;
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_mem_int_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmr_mem_int_ena_w1c_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t txb_ncsi_sbe : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_skid_m3_sbe : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_SBE]. */
+ uint64_t txb_skid_m3_dbe : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_DBE]. */
+ uint64_t txb_skid_m2_sbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_SBE]. */
+ uint64_t txb_skid_m2_dbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_DBE]. */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t smu_in_overfl : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t txb_skid_m0_sbe : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_SBE]. */
+ uint64_t txb_skid_m0_dbe : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t rxb_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_SKID_SBE]. */
+ uint64_t rxb_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_SKID_DBE]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_SKID_DBE]. */
+ uint64_t rxb_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[RXB_SKID_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_skid_m0_dbe : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_DBE]. */
+ uint64_t txb_skid_m0_sbe : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_SBE]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t smu_in_overfl : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t txb_skid_m2_dbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_DBE]. */
+ uint64_t txb_skid_m2_sbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_SBE]. */
+ uint64_t txb_skid_m3_dbe : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_DBE]. */
+ uint64_t txb_skid_m3_sbe : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmr_mem_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_fif_m1_sbe : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_SBE]. */
+ uint64_t txb_fif_m1_dbe : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_DBE]. */
+ uint64_t txb_fif_m0_sbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_SBE]. */
+ uint64_t txb_fif_m0_dbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t txb_fif_m0_dbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_DBE]. */
+ uint64_t txb_fif_m0_sbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_SBE]. */
+ uint64_t txb_fif_m1_dbe : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_DBE]. */
+ uint64_t txb_fif_m1_sbe : 1; /**< [ 15: 15](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1C/H) Reads or clears enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmr_mem_int_ena_w1c bdk_bgxx_cmr_mem_int_ena_w1c_t;
+
+static inline uint64_t BDK_BGXX_CMR_MEM_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_MEM_INT_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000020ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000020ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_MEM_INT_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_MEM_INT_ENA_W1C(a) bdk_bgxx_cmr_mem_int_ena_w1c_t
+#define bustype_BDK_BGXX_CMR_MEM_INT_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_MEM_INT_ENA_W1C(a) "BGXX_CMR_MEM_INT_ENA_W1C"
+#define device_bar_BDK_BGXX_CMR_MEM_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_MEM_INT_ENA_W1C(a) (a)
+#define arguments_BDK_BGXX_CMR_MEM_INT_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_mem_int_ena_w1s
+ *
+ * BGX CMR Memory Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_bgxx_cmr_mem_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_mem_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t reserved_8_17 : 10;
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t reserved_8_17 : 10;
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_mem_int_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmr_mem_int_ena_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t txb_ncsi_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_skid_m3_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_SBE]. */
+ uint64_t txb_skid_m3_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_DBE]. */
+ uint64_t txb_skid_m2_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_SBE]. */
+ uint64_t txb_skid_m2_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_DBE]. */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t smu_in_overfl : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t txb_skid_m0_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_SBE]. */
+ uint64_t txb_skid_m0_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t rxb_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_SKID_SBE]. */
+ uint64_t rxb_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_SKID_DBE]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_SKID_DBE]. */
+ uint64_t rxb_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[RXB_SKID_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_skid_m0_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_DBE]. */
+ uint64_t txb_skid_m0_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_SBE]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t smu_in_overfl : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t txb_skid_m2_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_DBE]. */
+ uint64_t txb_skid_m2_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_SBE]. */
+ uint64_t txb_skid_m3_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_DBE]. */
+ uint64_t txb_skid_m3_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmr_mem_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_fif_m1_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_SBE]. */
+ uint64_t txb_fif_m1_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_DBE]. */
+ uint64_t txb_fif_m0_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_SBE]. */
+ uint64_t txb_fif_m0_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t txb_fif_m0_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_DBE]. */
+ uint64_t txb_fif_m0_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_SBE]. */
+ uint64_t txb_fif_m1_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_DBE]. */
+ uint64_t txb_fif_m1_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets enable for BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmr_mem_int_ena_w1s bdk_bgxx_cmr_mem_int_ena_w1s_t;
+
+static inline uint64_t BDK_BGXX_CMR_MEM_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_MEM_INT_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000028ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000028ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000028ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_MEM_INT_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_MEM_INT_ENA_W1S(a) bdk_bgxx_cmr_mem_int_ena_w1s_t
+#define bustype_BDK_BGXX_CMR_MEM_INT_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_MEM_INT_ENA_W1S(a) "BGXX_CMR_MEM_INT_ENA_W1S"
+#define device_bar_BDK_BGXX_CMR_MEM_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_MEM_INT_ENA_W1S(a) (a)
+#define arguments_BDK_BGXX_CMR_MEM_INT_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_mem_int_w1s
+ *
+ * BGX CMR Memory Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_bgxx_cmr_mem_int_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_mem_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t reserved_8_17 : 10;
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t reserved_8_17 : 10;
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_mem_int_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_cmr_mem_int_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_26_63 : 38;
+ uint64_t txb_ncsi_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_skid_m3_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_SBE]. */
+ uint64_t txb_skid_m3_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_DBE]. */
+ uint64_t txb_skid_m2_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_SBE]. */
+ uint64_t txb_skid_m2_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_DBE]. */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t smu_in_overfl : 1; /**< [ 17: 17](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 16: 16](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t txb_skid_m0_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_SBE]. */
+ uint64_t txb_skid_m0_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t rxb_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_SKID_SBE]. */
+ uint64_t rxb_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_SKID_DBE]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_SKID_DBE]. */
+ uint64_t rxb_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[RXB_SKID_SBE]. */
+ uint64_t txb_fif_bk0_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_DBE]. */
+ uint64_t txb_fif_bk0_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK0_SBE]. */
+ uint64_t txb_fif_bk1_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_DBE]. */
+ uint64_t txb_fif_bk1_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_FIF_BK1_SBE]. */
+ uint64_t txb_skid_m0_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_DBE]. */
+ uint64_t txb_skid_m0_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M0_SBE]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 16: 16](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t smu_in_overfl : 1; /**< [ 17: 17](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t txb_skid_m1_dbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_DBE]. */
+ uint64_t txb_skid_m1_sbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M1_SBE]. */
+ uint64_t txb_skid_m2_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_DBE]. */
+ uint64_t txb_skid_m2_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M2_SBE]. */
+ uint64_t txb_skid_m3_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_DBE]. */
+ uint64_t txb_skid_m3_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_SKID_M3_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets BGX(0..1)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t reserved_26_63 : 38;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_bgxx_cmr_mem_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_fif_m1_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_SBE]. */
+ uint64_t txb_fif_m1_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_DBE]. */
+ uint64_t txb_fif_m0_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_SBE]. */
+ uint64_t txb_fif_m0_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxb_fif_bk0_dbe0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE0]. */
+ uint64_t rxb_fif_bk0_sbe0 : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE0]. */
+ uint64_t rxb_fif_bk0_dbe1 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_DBE1]. */
+ uint64_t rxb_fif_bk0_sbe1 : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK0_SBE1]. */
+ uint64_t rxb_fif_bk1_dbe0 : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE0]. */
+ uint64_t rxb_fif_bk1_sbe0 : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE0]. */
+ uint64_t rxb_fif_bk1_dbe1 : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_DBE1]. */
+ uint64_t rxb_fif_bk1_sbe1 : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_FIF_BK1_SBE1]. */
+ uint64_t rxb_pki_skid_dbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_DBE]. */
+ uint64_t rxb_pki_skid_sbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_PKI_SKID_SBE]. */
+ uint64_t rxb_nic_skid_dbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_DBE]. */
+ uint64_t rxb_nic_skid_sbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[RXB_NIC_SKID_SBE]. */
+ uint64_t txb_fif_m0_dbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_DBE]. */
+ uint64_t txb_fif_m0_sbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M0_SBE]. */
+ uint64_t txb_fif_m1_dbe : 1; /**< [ 14: 14](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_DBE]. */
+ uint64_t txb_fif_m1_sbe : 1; /**< [ 15: 15](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M1_SBE]. */
+ uint64_t txb_skid_m0_pko_dbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_DBE]. */
+ uint64_t txb_skid_m0_pko_sbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_PKO_SBE]. */
+ uint64_t gmp_in_overfl : 1; /**< [ 18: 18](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[GMP_IN_OVERFL]. */
+ uint64_t smu_in_overfl : 1; /**< [ 19: 19](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[SMU_IN_OVERFL]. */
+ uint64_t txb_skid_m1_pko_dbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_DBE]. */
+ uint64_t txb_skid_m1_pko_sbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_PKO_SBE]. */
+ uint64_t txb_skid_m2_pko_dbe : 1; /**< [ 22: 22](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_DBE]. */
+ uint64_t txb_skid_m2_pko_sbe : 1; /**< [ 23: 23](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_PKO_SBE]. */
+ uint64_t txb_skid_m3_pko_dbe : 1; /**< [ 24: 24](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_DBE]. */
+ uint64_t txb_skid_m3_pko_sbe : 1; /**< [ 25: 25](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_PKO_SBE]. */
+ uint64_t txb_ncsi_dbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_NCSI_DBE]. */
+ uint64_t txb_ncsi_sbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_NCSI_SBE]. */
+ uint64_t txb_skid_m0_nic_dbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_DBE]. */
+ uint64_t txb_skid_m0_nic_sbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M0_NIC_SBE]. */
+ uint64_t txb_skid_m1_nic_dbe : 1; /**< [ 30: 30](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_DBE]. */
+ uint64_t txb_skid_m1_nic_sbe : 1; /**< [ 31: 31](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M1_NIC_SBE]. */
+ uint64_t txb_skid_m2_nic_dbe : 1; /**< [ 32: 32](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_DBE]. */
+ uint64_t txb_skid_m2_nic_sbe : 1; /**< [ 33: 33](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M2_NIC_SBE]. */
+ uint64_t txb_skid_m3_nic_dbe : 1; /**< [ 34: 34](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_DBE]. */
+ uint64_t txb_skid_m3_nic_sbe : 1; /**< [ 35: 35](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_SKID_M3_NIC_SBE]. */
+ uint64_t txb_fif_m2_dbe : 1; /**< [ 36: 36](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_DBE]. */
+ uint64_t txb_fif_m2_sbe : 1; /**< [ 37: 37](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M2_SBE]. */
+ uint64_t txb_fif_m3_dbe : 1; /**< [ 38: 38](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_DBE]. */
+ uint64_t txb_fif_m3_sbe : 1; /**< [ 39: 39](R/W1S/H) Reads or sets BGX(0..3)_CMR_MEM_INT[TXB_FIF_M3_SBE]. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmr_mem_int_w1s bdk_bgxx_cmr_mem_int_w1s_t;
+
+static inline uint64_t BDK_BGXX_CMR_MEM_INT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_MEM_INT_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000018ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000018ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000018ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_MEM_INT_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_MEM_INT_W1S(a) bdk_bgxx_cmr_mem_int_w1s_t
+#define bustype_BDK_BGXX_CMR_MEM_INT_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_MEM_INT_W1S(a) "BGXX_CMR_MEM_INT_W1S"
+#define device_bar_BDK_BGXX_CMR_MEM_INT_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_MEM_INT_W1S(a) (a)
+#define arguments_BDK_BGXX_CMR_MEM_INT_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_nic_nxc_adr
+ *
+ * BGX CMR NIC NXC Exception Registers
+ */
+union bdk_bgxx_cmr_nic_nxc_adr
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_nic_nxc_adr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t lmac_id : 4; /**< [ 15: 12](RO/H) Logged LMAC ID associated with NXC exceptions associated with NIC. */
+ uint64_t channel : 12; /**< [ 11: 0](RO/H) Logged channel for NXC exceptions associated with NIC. */
+#else /* Word 0 - Little Endian */
+ uint64_t channel : 12; /**< [ 11: 0](RO/H) Logged channel for NXC exceptions associated with NIC. */
+ uint64_t lmac_id : 4; /**< [ 15: 12](RO/H) Logged LMAC ID associated with NXC exceptions associated with NIC. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_nic_nxc_adr_s cn; */
+};
+typedef union bdk_bgxx_cmr_nic_nxc_adr bdk_bgxx_cmr_nic_nxc_adr_t;
+
+static inline uint64_t BDK_BGXX_CMR_NIC_NXC_ADR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_NIC_NXC_ADR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0001030ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0001030ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("BGXX_CMR_NIC_NXC_ADR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_NIC_NXC_ADR(a) bdk_bgxx_cmr_nic_nxc_adr_t
+#define bustype_BDK_BGXX_CMR_NIC_NXC_ADR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_NIC_NXC_ADR(a) "BGXX_CMR_NIC_NXC_ADR"
+#define device_bar_BDK_BGXX_CMR_NIC_NXC_ADR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_NIC_NXC_ADR(a) (a)
+#define arguments_BDK_BGXX_CMR_NIC_NXC_ADR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_nxc_adr
+ *
+ * BGX CMR NCX Exception Registers
+ */
+union bdk_bgxx_cmr_nxc_adr
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_nxc_adr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t lmac_id : 4; /**< [ 15: 12](RO/H) Logged LMAC ID associated with NXC exceptions. */
+ uint64_t channel : 12; /**< [ 11: 0](RO/H) Logged channel for NXC exceptions. */
+#else /* Word 0 - Little Endian */
+ uint64_t channel : 12; /**< [ 11: 0](RO/H) Logged channel for NXC exceptions. */
+ uint64_t lmac_id : 4; /**< [ 15: 12](RO/H) Logged LMAC ID associated with NXC exceptions. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_nxc_adr_s cn; */
+};
+typedef union bdk_bgxx_cmr_nxc_adr bdk_bgxx_cmr_nxc_adr_t;
+
+static inline uint64_t BDK_BGXX_CMR_NXC_ADR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_NXC_ADR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0001018ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_NXC_ADR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_NXC_ADR(a) bdk_bgxx_cmr_nxc_adr_t
+#define bustype_BDK_BGXX_CMR_NXC_ADR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_NXC_ADR(a) "BGXX_CMR_NXC_ADR"
+#define device_bar_BDK_BGXX_CMR_NXC_ADR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_NXC_ADR(a) (a)
+#define arguments_BDK_BGXX_CMR_NXC_ADR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_pko_nxc_adr
+ *
+ * BGX CMR PKO NXC Exception Registers
+ */
+union bdk_bgxx_cmr_pko_nxc_adr
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_pko_nxc_adr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t lmac_id : 4; /**< [ 15: 12](RO/H) Logged LMAC ID associated with NXC exceptions associated with PKO. */
+ uint64_t channel : 12; /**< [ 11: 0](RO/H) Logged channel for NXC exceptions associated with PKO. */
+#else /* Word 0 - Little Endian */
+ uint64_t channel : 12; /**< [ 11: 0](RO/H) Logged channel for NXC exceptions associated with PKO. */
+ uint64_t lmac_id : 4; /**< [ 15: 12](RO/H) Logged LMAC ID associated with NXC exceptions associated with PKO. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_pko_nxc_adr_s cn; */
+};
+typedef union bdk_bgxx_cmr_pko_nxc_adr bdk_bgxx_cmr_pko_nxc_adr_t;
+
+static inline uint64_t BDK_BGXX_CMR_PKO_NXC_ADR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_PKO_NXC_ADR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0001018ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0001018ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("BGXX_CMR_PKO_NXC_ADR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_PKO_NXC_ADR(a) bdk_bgxx_cmr_pko_nxc_adr_t
+#define bustype_BDK_BGXX_CMR_PKO_NXC_ADR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_PKO_NXC_ADR(a) "BGXX_CMR_PKO_NXC_ADR"
+#define device_bar_BDK_BGXX_CMR_PKO_NXC_ADR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_PKO_NXC_ADR(a) (a)
+#define arguments_BDK_BGXX_CMR_PKO_NXC_ADR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_rx_dmac#_cam
+ *
+ * BGX CMR Receive CAM Registers
+ * These registers provide access to the 32 DMAC CAM entries in BGX.
+ */
+union bdk_bgxx_cmr_rx_dmacx_cam
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_rx_dmacx_cam_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t id : 2; /**< [ 50: 49](R/W) Logical MAC ID that this DMAC CAM address applies to. BGX has 32 DMAC CAM entries that can
+ be accessed with the BGX()_CMR_RX_DMAC()_CAM CSRs. These 32 DMAC entries can be used by
+ any of the four SGMII MACs or the 10G/40G MACs using these register bits.
+
+ A typical configuration is to provide eight CAM entries per LMAC ID, which is configured
+ using the following settings:
+ * LMAC interface 0: BGX()_CMR_RX_DMAC(0..7)_CAM[ID] = 0x0.
+ * LMAC interface 1: BGX()_CMR_RX_DMAC(8..15)_CAM[ID] = 0x1.
+ * LMAC interface 2: BGX()_CMR_RX_DMAC(16..23)_CAM[ID] = 0x2.
+ * LMAC interface 3: BGX()_CMR_RX_DMAC(24..31)_CAM[ID] = 0x3. */
+ uint64_t en : 1; /**< [ 48: 48](R/W) CAM entry enable for this DMAC address.
+ 1 = Include this address in the matching algorithm.
+ 0 = Don't include this address in the matching algorithm. */
+ uint64_t adr : 48; /**< [ 47: 0](R/W) DMAC address in the CAM used for matching. Specified in network byte order, i.e.
+ ADR\<47:40\> is for the first DMAC byte on the wire. The CAM matches against unicast or
+ multicast DMAC addresses. All BGX()_CMR_RX_DMAC()_CAM CSRs can be used in any of the LMAC
+ combinations such that any BGX MAC can use any of the 32 common DMAC entries. */
+#else /* Word 0 - Little Endian */
+ uint64_t adr : 48; /**< [ 47: 0](R/W) DMAC address in the CAM used for matching. Specified in network byte order, i.e.
+ ADR\<47:40\> is for the first DMAC byte on the wire. The CAM matches against unicast or
+ multicast DMAC addresses. All BGX()_CMR_RX_DMAC()_CAM CSRs can be used in any of the LMAC
+ combinations such that any BGX MAC can use any of the 32 common DMAC entries. */
+ uint64_t en : 1; /**< [ 48: 48](R/W) CAM entry enable for this DMAC address.
+ 1 = Include this address in the matching algorithm.
+ 0 = Don't include this address in the matching algorithm. */
+ uint64_t id : 2; /**< [ 50: 49](R/W) Logical MAC ID that this DMAC CAM address applies to. BGX has 32 DMAC CAM entries that can
+ be accessed with the BGX()_CMR_RX_DMAC()_CAM CSRs. These 32 DMAC entries can be used by
+ any of the four SGMII MACs or the 10G/40G MACs using these register bits.
+
+ A typical configuration is to provide eight CAM entries per LMAC ID, which is configured
+ using the following settings:
+ * LMAC interface 0: BGX()_CMR_RX_DMAC(0..7)_CAM[ID] = 0x0.
+ * LMAC interface 1: BGX()_CMR_RX_DMAC(8..15)_CAM[ID] = 0x1.
+ * LMAC interface 2: BGX()_CMR_RX_DMAC(16..23)_CAM[ID] = 0x2.
+ * LMAC interface 3: BGX()_CMR_RX_DMAC(24..31)_CAM[ID] = 0x3. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_rx_dmacx_cam_s cn; */
+};
+typedef union bdk_bgxx_cmr_rx_dmacx_cam bdk_bgxx_cmr_rx_dmacx_cam_t;
+
+static inline uint64_t BDK_BGXX_CMR_RX_DMACX_CAM(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_RX_DMACX_CAM(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=31)))
+ return 0x87e0e0000200ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=31)))
+ return 0x87e0e0000200ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=31)))
+ return 0x87e0e0000200ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1f);
+ __bdk_csr_fatal("BGXX_CMR_RX_DMACX_CAM", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_RX_DMACX_CAM(a,b) bdk_bgxx_cmr_rx_dmacx_cam_t
+#define bustype_BDK_BGXX_CMR_RX_DMACX_CAM(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_RX_DMACX_CAM(a,b) "BGXX_CMR_RX_DMACX_CAM"
+#define device_bar_BDK_BGXX_CMR_RX_DMACX_CAM(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_RX_DMACX_CAM(a,b) (a)
+#define arguments_BDK_BGXX_CMR_RX_DMACX_CAM(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_rx_lmacs
+ *
+ * BGX CMR Receive Logical MACs Registers
+ */
+union bdk_bgxx_cmr_rx_lmacs
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_rx_lmacs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t lmacs : 3; /**< [ 2: 0](R/W) Number of LMACS. Specifies the number of LMACs that can be enabled.
+ This determines the logical RX buffer size per LMAC and the maximum
+ LMAC ID that can be used:
+
+ 0x0 = Reserved.
+ 0x1 = BGX()_CONST[TX_FIFOSZ] bytes per LMAC, maximum LMAC ID is 0.
+ 0x2 = BGX()_CONST[TX_FIFOSZ]/2 bytes per LMAC, maximum LMAC ID is 1.
+ 0x3 = BGX()_CONST[TX_FIFOSZ]/4 bytes per LMAC, maximum LMAC ID is 2.
+ 0x4 = BGX()_CONST[TX_FIFOSZ]/4 bytes per LMAC, maximum LMAC ID is 3.
+ 0x5-0x7 = Reserved.
+
+ Note the maximum LMAC ID is determined by the smaller of
+ BGX()_CMR_RX_LMACS[LMACS] and BGX()_CMR_TX_LMACS[LMACS]. The two fields
+ should be set to the same value for normal operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t lmacs : 3; /**< [ 2: 0](R/W) Number of LMACS. Specifies the number of LMACs that can be enabled.
+ This determines the logical RX buffer size per LMAC and the maximum
+ LMAC ID that can be used:
+
+ 0x0 = Reserved.
+ 0x1 = BGX()_CONST[TX_FIFOSZ] bytes per LMAC, maximum LMAC ID is 0.
+ 0x2 = BGX()_CONST[TX_FIFOSZ]/2 bytes per LMAC, maximum LMAC ID is 1.
+ 0x3 = BGX()_CONST[TX_FIFOSZ]/4 bytes per LMAC, maximum LMAC ID is 2.
+ 0x4 = BGX()_CONST[TX_FIFOSZ]/4 bytes per LMAC, maximum LMAC ID is 3.
+ 0x5-0x7 = Reserved.
+
+ Note the maximum LMAC ID is determined by the smaller of
+ BGX()_CMR_RX_LMACS[LMACS] and BGX()_CMR_TX_LMACS[LMACS]. The two fields
+ should be set to the same value for normal operation. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_rx_lmacs_s cn81xx; */
+ struct bdk_bgxx_cmr_rx_lmacs_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t lmacs : 3; /**< [ 2: 0](R/W) Number of LMACS. Specifies the number of LMACs that can be enabled.
+ This determines the logical RX buffer size per LMAC and the maximum
+ LMAC ID that can be used:
+
+ 0x0 = Reserved.
+ 0x1 = 64 KB per LMAC, maximum LMAC ID is 0.
+ 0x2 = 32 KB per LMAC, maximum LMAC ID is 1.
+ 0x3 = 16 KB per LMAC, maximum LMAC ID is 2.
+ 0x4 = 16 KB per LMAC, maximum LMAC ID is 3.
+ 0x5-0x7 = Reserved.
+
+ Note the maximum LMAC ID is determined by the smaller of
+ BGX()_CMR_RX_LMACS[LMACS] and BGX()_CMR_TX_LMACS[LMACS]. The two fields
+ should be set to the same value for normal operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t lmacs : 3; /**< [ 2: 0](R/W) Number of LMACS. Specifies the number of LMACs that can be enabled.
+ This determines the logical RX buffer size per LMAC and the maximum
+ LMAC ID that can be used:
+
+ 0x0 = Reserved.
+ 0x1 = 64 KB per LMAC, maximum LMAC ID is 0.
+ 0x2 = 32 KB per LMAC, maximum LMAC ID is 1.
+ 0x3 = 16 KB per LMAC, maximum LMAC ID is 2.
+ 0x4 = 16 KB per LMAC, maximum LMAC ID is 3.
+ 0x5-0x7 = Reserved.
+
+ Note the maximum LMAC ID is determined by the smaller of
+ BGX()_CMR_RX_LMACS[LMACS] and BGX()_CMR_TX_LMACS[LMACS]. The two fields
+ should be set to the same value for normal operation. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_cmr_rx_lmacs_s cn83xx; */
+};
+typedef union bdk_bgxx_cmr_rx_lmacs bdk_bgxx_cmr_rx_lmacs_t;
+
+static inline uint64_t BDK_BGXX_CMR_RX_LMACS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_RX_LMACS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000468ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000468ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000468ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_RX_LMACS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_RX_LMACS(a) bdk_bgxx_cmr_rx_lmacs_t
+#define bustype_BDK_BGXX_CMR_RX_LMACS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_RX_LMACS(a) "BGXX_CMR_RX_LMACS"
+#define device_bar_BDK_BGXX_CMR_RX_LMACS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_RX_LMACS(a) (a)
+#define arguments_BDK_BGXX_CMR_RX_LMACS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_rx_ovr_bp
+ *
+ * BGX CMR Receive-Ports Backpressure Override Registers
+ * BGX()_CMR_RX_OVR_BP[EN\<0\>] must be set to one and BGX()_CMR_RX_OVR_BP[BP\<0\>] must be
+ * cleared to zero (to forcibly disable hardware-automatic 802.3 PAUSE packet generation) with
+ * the HiGig2 Protocol when BGX()_SMU()_HG2_CONTROL[HG2TX_EN]=0. (The HiGig2 protocol is
+ * indicated by BGX()_SMU()_TX_CTL[HG_EN]=1 and BGX()_SMU()_RX_UDD_SKP[LEN]=16).
+ * Hardware can only auto-generate backpressure through HiGig2 messages (optionally, when
+ * BGX()_SMU()_HG2_CONTROL[HG2TX_EN]=1) with the HiGig2 protocol.
+ */
+union bdk_bgxx_cmr_rx_ovr_bp
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_rx_ovr_bp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t en : 4; /**< [ 11: 8](R/W) Per-LMAC enable backpressure override.
+ 0 = Don't enable.
+ 1 = Enable override.
+
+ Bit\<8\> represents LMAC 0, ..., bit\<11\> represents LMAC 3. */
+ uint64_t bp : 4; /**< [ 7: 4](R/W) Per-LMAC backpressure status to use:
+ 0 = LMAC is available.
+ 1 = LMAC should be backpressured.
+
+ Bit\<4\> represents LMAC 0, ..., bit\<7\> represents LMAC 3. */
+ uint64_t ign_fifo_bp : 4; /**< [ 3: 0](R/W) Ignore BGX()_CMR()_RX_BP_ON[MARK] when computing backpressure. CMR does not backpressure
+ the
+ MAC due to the FIFO length passing BGX()_CMR()_RX_BP_ON[MARK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ign_fifo_bp : 4; /**< [ 3: 0](R/W) Ignore BGX()_CMR()_RX_BP_ON[MARK] when computing backpressure. CMR does not backpressure
+ the
+ MAC due to the FIFO length passing BGX()_CMR()_RX_BP_ON[MARK]. */
+ uint64_t bp : 4; /**< [ 7: 4](R/W) Per-LMAC backpressure status to use:
+ 0 = LMAC is available.
+ 1 = LMAC should be backpressured.
+
+ Bit\<4\> represents LMAC 0, ..., bit\<7\> represents LMAC 3. */
+ uint64_t en : 4; /**< [ 11: 8](R/W) Per-LMAC enable backpressure override.
+ 0 = Don't enable.
+ 1 = Enable override.
+
+ Bit\<8\> represents LMAC 0, ..., bit\<11\> represents LMAC 3. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_rx_ovr_bp_s cn; */
+};
+typedef union bdk_bgxx_cmr_rx_ovr_bp bdk_bgxx_cmr_rx_ovr_bp_t;
+
+static inline uint64_t BDK_BGXX_CMR_RX_OVR_BP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_RX_OVR_BP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000470ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000470ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000470ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_RX_OVR_BP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_RX_OVR_BP(a) bdk_bgxx_cmr_rx_ovr_bp_t
+#define bustype_BDK_BGXX_CMR_RX_OVR_BP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_RX_OVR_BP(a) "BGXX_CMR_RX_OVR_BP"
+#define device_bar_BDK_BGXX_CMR_RX_OVR_BP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_RX_OVR_BP(a) (a)
+#define arguments_BDK_BGXX_CMR_RX_OVR_BP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_rx_stat10
+ *
+ * BGX Receive Status Register 10
+ * This register provide a count of octets of dropped at the NCSI interface.
+ */
+union bdk_bgxx_cmr_rx_stat10
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_rx_stat10_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of dropped NCSI packets. CNT will wrap and is cleared if the NCSI interface is
+ reset with BGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_RESET]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of dropped NCSI packets. CNT will wrap and is cleared if the NCSI interface is
+ reset with BGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_RESET]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_rx_stat10_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of dropped NCSI packets. [CNT] will wrap and is cleared if NCSI is reset with
+ BGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_RESET]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Octet count of dropped NCSI packets. [CNT] will wrap and is cleared if NCSI is reset with
+ BGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_RESET]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_bgxx_cmr_rx_stat10_s cn88xx; */
+ /* struct bdk_bgxx_cmr_rx_stat10_cn81xx cn83xx; */
+};
+typedef union bdk_bgxx_cmr_rx_stat10 bdk_bgxx_cmr_rx_stat10_t;
+
+static inline uint64_t BDK_BGXX_CMR_RX_STAT10(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_RX_STAT10(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e00000c0ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e00000c0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e00000c0ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_RX_STAT10", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_RX_STAT10(a) bdk_bgxx_cmr_rx_stat10_t
+#define bustype_BDK_BGXX_CMR_RX_STAT10(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_RX_STAT10(a) "BGXX_CMR_RX_STAT10"
+#define device_bar_BDK_BGXX_CMR_RX_STAT10(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_RX_STAT10(a) (a)
+#define arguments_BDK_BGXX_CMR_RX_STAT10(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_rx_stat9
+ *
+ * BGX Receive Status Register 9
+ * This registers provides a count of packets dropped at the NCSI interface.
+ * The count of dropped NCSI packets is not accounted for in any other stats
+ * registers.
+ */
+union bdk_bgxx_cmr_rx_stat9
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_rx_stat9_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of dropped packets. CNT will wrap and is cleared if the NCSI interface is reset with
+ BGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_RESET]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of dropped packets. CNT will wrap and is cleared if the NCSI interface is reset with
+ BGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_RESET]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_rx_stat9_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of dropped packets. [CNT] will wrap and is cleared if NCSI is reset with
+ BGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_RESET]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Count of dropped packets. [CNT] will wrap and is cleared if NCSI is reset with
+ BGX()_CMR_GLOBAL_CONFIG[CMR_NCSI_RESET]. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_bgxx_cmr_rx_stat9_s cn88xx; */
+ /* struct bdk_bgxx_cmr_rx_stat9_cn81xx cn83xx; */
+};
+typedef union bdk_bgxx_cmr_rx_stat9 bdk_bgxx_cmr_rx_stat9_t;
+
+static inline uint64_t BDK_BGXX_CMR_RX_STAT9(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_RX_STAT9(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e00000b8ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e00000b8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e00000b8ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_RX_STAT9", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_RX_STAT9(a) bdk_bgxx_cmr_rx_stat9_t
+#define bustype_BDK_BGXX_CMR_RX_STAT9(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_RX_STAT9(a) "BGXX_CMR_RX_STAT9"
+#define device_bar_BDK_BGXX_CMR_RX_STAT9(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_RX_STAT9(a) (a)
+#define arguments_BDK_BGXX_CMR_RX_STAT9(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_rx_steering#
+ *
+ * BGX CMR Receive Steering Registers
+ * These registers, along with BGX()_CMR_RX_STEERING_VETYPE(), provide eight filters for
+ * identifying and steering NCSI receive traffic.
+ *
+ * Steering is done for the designated LMAC specified by BGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID].
+ * The steering algorithm is applied after the RX DMAC filter specified by
+ * BGX()_CMR()_RX_DMAC_CTL and BGX()_CMR_RX_DMAC()_CAM. As such, the DMAC filter and steering
+ * filters should be set in a consistent manner.
+ *
+ * Internal:
+ * "* ALGORITHM
+ * // Steering of RX packets for LMAC identified by BGX()_CMR_GLOBAL_CONFIG[NCSI_LMAC_ID].
+ * rx_steering(uint48 pkt_dmac, uint16 pkt_etype, uint16 pkt_vlan_id) {
+ * for (int i = 0; i \< 8; i++) {
+ * steer = BGX()_CMR_RX_STEERING(i);
+ * vetype = BGX()_CMR_RX_STEERING_VETYPE(i);
+ * if (steer[MCST_EN] || steer[DMAC_EN] || vetype[VLAN_EN] || vetype[VLAN_TAG_EN]) {
+ * // Filter is enabled.
+ * if ( (!steer[MCST_EN] || is_mcst(pkt_dmac))
+ * && (!steer[DMAC_EN] || pkt_dmac == steer[DMAC])
+ * && (!vetype[VLAN_EN] || pkt_vlan_id == vetype[VLAN_ID])
+ * && (!vetype[VLAN_TAG_EN] || pkt_etype == vetype[VLAN_ETYPE]) )
+ * {
+ * // Filter match (all enabled matching criteria are met).
+ * return steer[DEST];
+ * }
+ * }
+ * }
+ * return BGX()_CMR_RX_STEERING_DEFAULT[DEST]; // No match
+ * }"
+ */
+union bdk_bgxx_cmr_rx_steeringx
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_rx_steeringx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t dest : 2; /**< [ 51: 50](R/W) Destination for traffic that meets all criteria of the matching algorithm:
+ 0x0 = Steer this traffic exclusively to NCSI.
+ 0x1 = Steer this traffic exclusively to TNS/NIC.
+ 0x2 = Steer this traffic to BOTH TNS/NIC and NCSI.
+ 0x3 = Steer this traffic to the bit bucket (drop). */
+ uint64_t mcst_en : 1; /**< [ 49: 49](R/W) Enable for identifying multicast packets:
+ 1 = Include multicast packets in the matching algorithm.
+ 0 = Do not include multicast packets in the matching algorithm. */
+ uint64_t dmac_en : 1; /**< [ 48: 48](R/W) Enable DMAC address check:
+ 1 = Include DMAC address checking in the matching algorithm.
+ 0 = Do not include DMAC address checking in the matching algorithm. */
+ uint64_t dmac : 48; /**< [ 47: 0](R/W) DMAC address used for the matching algorithm when [DMAC_EN] is set. Broadcast can be
+ specified with value 0xFFFF_FFFFFFFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t dmac : 48; /**< [ 47: 0](R/W) DMAC address used for the matching algorithm when [DMAC_EN] is set. Broadcast can be
+ specified with value 0xFFFF_FFFFFFFF. */
+ uint64_t dmac_en : 1; /**< [ 48: 48](R/W) Enable DMAC address check:
+ 1 = Include DMAC address checking in the matching algorithm.
+ 0 = Do not include DMAC address checking in the matching algorithm. */
+ uint64_t mcst_en : 1; /**< [ 49: 49](R/W) Enable for identifying multicast packets:
+ 1 = Include multicast packets in the matching algorithm.
+ 0 = Do not include multicast packets in the matching algorithm. */
+ uint64_t dest : 2; /**< [ 51: 50](R/W) Destination for traffic that meets all criteria of the matching algorithm:
+ 0x0 = Steer this traffic exclusively to NCSI.
+ 0x1 = Steer this traffic exclusively to TNS/NIC.
+ 0x2 = Steer this traffic to BOTH TNS/NIC and NCSI.
+ 0x3 = Steer this traffic to the bit bucket (drop). */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_rx_steeringx_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t dest : 2; /**< [ 51: 50](R/W) Destination for traffic that meets all criteria of the matching algorithm:
+ 0x0 = Steer this traffic exclusively to NCSI.
+ 0x1 = Steer this traffic exclusively to NIC.
+ 0x2 = Steer this traffic to BOTH NIC and NCSI.
+ 0x3 = Steer this traffic to the bit bucket (drop). */
+ uint64_t mcst_en : 1; /**< [ 49: 49](R/W) Enable for identifying multicast packets:
+ 1 = Include multicast packets in the matching algorithm.
+ 0 = Do not include multicast packets in the matching algorithm. */
+ uint64_t dmac_en : 1; /**< [ 48: 48](R/W) Enable DMAC address check:
+ 1 = Include DMAC address checking in the matching algorithm.
+ 0 = Do not include DMAC address checking in the matching algorithm. */
+ uint64_t dmac : 48; /**< [ 47: 0](R/W) DMAC address used for the matching algorithm when [DMAC_EN] is set. Broadcast can be
+ specified with value 0xFFFF_FFFFFFFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t dmac : 48; /**< [ 47: 0](R/W) DMAC address used for the matching algorithm when [DMAC_EN] is set. Broadcast can be
+ specified with value 0xFFFF_FFFFFFFF. */
+ uint64_t dmac_en : 1; /**< [ 48: 48](R/W) Enable DMAC address check:
+ 1 = Include DMAC address checking in the matching algorithm.
+ 0 = Do not include DMAC address checking in the matching algorithm. */
+ uint64_t mcst_en : 1; /**< [ 49: 49](R/W) Enable for identifying multicast packets:
+ 1 = Include multicast packets in the matching algorithm.
+ 0 = Do not include multicast packets in the matching algorithm. */
+ uint64_t dest : 2; /**< [ 51: 50](R/W) Destination for traffic that meets all criteria of the matching algorithm:
+ 0x0 = Steer this traffic exclusively to NCSI.
+ 0x1 = Steer this traffic exclusively to NIC.
+ 0x2 = Steer this traffic to BOTH NIC and NCSI.
+ 0x3 = Steer this traffic to the bit bucket (drop). */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_bgxx_cmr_rx_steeringx_s cn88xx; */
+ struct bdk_bgxx_cmr_rx_steeringx_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t dest : 2; /**< [ 51: 50](R/W) Destination for traffic that meets all criteria of the matching algorithm:
+ 0x0 = Steer this traffic exclusively to NCSI.
+ 0x1 = Steer this traffic exclusively to PKO/NIC.
+ 0x2 = Steer this traffic to BOTH PKO/NIC and NCSI.
+ 0x3 = Steer this traffic to the bit bucket (drop). */
+ uint64_t mcst_en : 1; /**< [ 49: 49](R/W) Enable for identifying multicast packets:
+ 1 = Include multicast packets in the matching algorithm.
+ 0 = Do not include multicast packets in the matching algorithm. */
+ uint64_t dmac_en : 1; /**< [ 48: 48](R/W) Enable DMAC address check:
+ 1 = Include DMAC address checking in the matching algorithm.
+ 0 = Do not include DMAC address checking in the matching algorithm. */
+ uint64_t dmac : 48; /**< [ 47: 0](R/W) DMAC address used for the matching algorithm when [DMAC_EN] is set. Broadcast can be
+ specified with value 0xFFFF_FFFFFFFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t dmac : 48; /**< [ 47: 0](R/W) DMAC address used for the matching algorithm when [DMAC_EN] is set. Broadcast can be
+ specified with value 0xFFFF_FFFFFFFF. */
+ uint64_t dmac_en : 1; /**< [ 48: 48](R/W) Enable DMAC address check:
+ 1 = Include DMAC address checking in the matching algorithm.
+ 0 = Do not include DMAC address checking in the matching algorithm. */
+ uint64_t mcst_en : 1; /**< [ 49: 49](R/W) Enable for identifying multicast packets:
+ 1 = Include multicast packets in the matching algorithm.
+ 0 = Do not include multicast packets in the matching algorithm. */
+ uint64_t dest : 2; /**< [ 51: 50](R/W) Destination for traffic that meets all criteria of the matching algorithm:
+ 0x0 = Steer this traffic exclusively to NCSI.
+ 0x1 = Steer this traffic exclusively to PKO/NIC.
+ 0x2 = Steer this traffic to BOTH PKO/NIC and NCSI.
+ 0x3 = Steer this traffic to the bit bucket (drop). */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmr_rx_steeringx bdk_bgxx_cmr_rx_steeringx_t;
+
+static inline uint64_t BDK_BGXX_CMR_RX_STEERINGX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_RX_STEERINGX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=7)))
+ return 0x87e0e0000300ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=7)))
+ return 0x87e0e0000300ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=7)))
+ return 0x87e0e0000300ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x7);
+ __bdk_csr_fatal("BGXX_CMR_RX_STEERINGX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_RX_STEERINGX(a,b) bdk_bgxx_cmr_rx_steeringx_t
+#define bustype_BDK_BGXX_CMR_RX_STEERINGX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_RX_STEERINGX(a,b) "BGXX_CMR_RX_STEERINGX"
+#define device_bar_BDK_BGXX_CMR_RX_STEERINGX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_RX_STEERINGX(a,b) (a)
+#define arguments_BDK_BGXX_CMR_RX_STEERINGX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_rx_steering_default
+ *
+ * BGX CMR Receive Steering Default Destination Register
+ */
+union bdk_bgxx_cmr_rx_steering_default
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_rx_steering_default_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t dest : 2; /**< [ 1: 0](R/W) Destination for traffic that does not match any of the steering filters specified by
+ BGX()_CMR_RX_STEERING() and and BGX()_CMR_RX_STEERING_VETYPE():
+ 0x0 = Steer traffic exclusively to NCSI.
+ 0x1 = Steer traffic exclusively to TNS/NIC.
+ 0x2 = Steer traffic to BOTH TNS/NIC and NCSI.
+ 0x3 = Steer traffic to the bit bucket (drop). */
+#else /* Word 0 - Little Endian */
+ uint64_t dest : 2; /**< [ 1: 0](R/W) Destination for traffic that does not match any of the steering filters specified by
+ BGX()_CMR_RX_STEERING() and and BGX()_CMR_RX_STEERING_VETYPE():
+ 0x0 = Steer traffic exclusively to NCSI.
+ 0x1 = Steer traffic exclusively to TNS/NIC.
+ 0x2 = Steer traffic to BOTH TNS/NIC and NCSI.
+ 0x3 = Steer traffic to the bit bucket (drop). */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_cmr_rx_steering_default_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t dest : 2; /**< [ 1: 0](R/W) Destination for traffic that does not match any of the steering filters specified by
+ BGX()_CMR_RX_STEERING() and and BGX()_CMR_RX_STEERING_VETYPE():
+ 0x0 = Steer traffic exclusively to NCSI.
+ 0x1 = Steer traffic exclusively to NIC.
+ 0x2 = Steer traffic to BOTH NIC and NCSI.
+ 0x3 = Steer traffic to the bit bucket (drop). */
+#else /* Word 0 - Little Endian */
+ uint64_t dest : 2; /**< [ 1: 0](R/W) Destination for traffic that does not match any of the steering filters specified by
+ BGX()_CMR_RX_STEERING() and and BGX()_CMR_RX_STEERING_VETYPE():
+ 0x0 = Steer traffic exclusively to NCSI.
+ 0x1 = Steer traffic exclusively to NIC.
+ 0x2 = Steer traffic to BOTH NIC and NCSI.
+ 0x3 = Steer traffic to the bit bucket (drop). */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_bgxx_cmr_rx_steering_default_s cn88xx; */
+ struct bdk_bgxx_cmr_rx_steering_default_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t dest : 2; /**< [ 1: 0](R/W) Destination for traffic that does not match any of the steering filters specified by
+ BGX()_CMR_RX_STEERING() and and BGX()_CMR_RX_STEERING_VETYPE():
+ 0x0 = Steer traffic exclusively to NCSI.
+ 0x1 = Steer traffic exclusively to PKO/NIC.
+ 0x2 = Steer traffic to BOTH PKO/NIC and NCSI.
+ 0x3 = Steer traffic to the bit bucket (drop). */
+#else /* Word 0 - Little Endian */
+ uint64_t dest : 2; /**< [ 1: 0](R/W) Destination for traffic that does not match any of the steering filters specified by
+ BGX()_CMR_RX_STEERING() and and BGX()_CMR_RX_STEERING_VETYPE():
+ 0x0 = Steer traffic exclusively to NCSI.
+ 0x1 = Steer traffic exclusively to PKO/NIC.
+ 0x2 = Steer traffic to BOTH PKO/NIC and NCSI.
+ 0x3 = Steer traffic to the bit bucket (drop). */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_cmr_rx_steering_default bdk_bgxx_cmr_rx_steering_default_t;
+
+static inline uint64_t BDK_BGXX_CMR_RX_STEERING_DEFAULT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_RX_STEERING_DEFAULT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0000448ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0000448ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0000448ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_RX_STEERING_DEFAULT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_RX_STEERING_DEFAULT(a) bdk_bgxx_cmr_rx_steering_default_t
+#define bustype_BDK_BGXX_CMR_RX_STEERING_DEFAULT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_RX_STEERING_DEFAULT(a) "BGXX_CMR_RX_STEERING_DEFAULT"
+#define device_bar_BDK_BGXX_CMR_RX_STEERING_DEFAULT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_RX_STEERING_DEFAULT(a) (a)
+#define arguments_BDK_BGXX_CMR_RX_STEERING_DEFAULT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_rx_steering_vetype#
+ *
+ * BGX CMR Receive VLAN Ethertype Register
+ * These registers, along with BGX()_CMR_RX_STEERING(), provide eight filters for identifying and
+ * steering NCSI receive traffic.
+ */
+union bdk_bgxx_cmr_rx_steering_vetypex
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_rx_steering_vetypex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t vlan_en : 1; /**< [ 29: 29](R/W) Enable VLAN ID check:
+ 1 = Include VLAN ID checking in the matching algorithm.
+ 0 = Do not include VLAN ID checking in the matching algorithm. */
+ uint64_t vlan_id : 12; /**< [ 28: 17](R/W) VLAN ID used for the matching algorithm when [VLAN_EN] is set. */
+ uint64_t vlan_tag_en : 1; /**< [ 16: 16](R/W) Enable VLAN tag Ethertype check:
+ 1 = Include VLAN tag Ethertype checking in the matching algorithm.
+ 0 = Do not include VLAN tag Ethertype checking in the matching algorithm. */
+ uint64_t vlan_etype : 16; /**< [ 15: 0](R/W) VLAN Ethertype for the matching algorithm when [VLAN_TAG_EN] is set.
+ 802.1Q and 802.1ad specify several Ethertypes used to identify VLAN tagged and VLAN double
+ tagged packets. BGX will always match against the tag immediately following the SMAC
+ address of the L2 header. */
+#else /* Word 0 - Little Endian */
+ uint64_t vlan_etype : 16; /**< [ 15: 0](R/W) VLAN Ethertype for the matching algorithm when [VLAN_TAG_EN] is set.
+ 802.1Q and 802.1ad specify several Ethertypes used to identify VLAN tagged and VLAN double
+ tagged packets. BGX will always match against the tag immediately following the SMAC
+ address of the L2 header. */
+ uint64_t vlan_tag_en : 1; /**< [ 16: 16](R/W) Enable VLAN tag Ethertype check:
+ 1 = Include VLAN tag Ethertype checking in the matching algorithm.
+ 0 = Do not include VLAN tag Ethertype checking in the matching algorithm. */
+ uint64_t vlan_id : 12; /**< [ 28: 17](R/W) VLAN ID used for the matching algorithm when [VLAN_EN] is set. */
+ uint64_t vlan_en : 1; /**< [ 29: 29](R/W) Enable VLAN ID check:
+ 1 = Include VLAN ID checking in the matching algorithm.
+ 0 = Do not include VLAN ID checking in the matching algorithm. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_rx_steering_vetypex_s cn; */
+};
+typedef union bdk_bgxx_cmr_rx_steering_vetypex bdk_bgxx_cmr_rx_steering_vetypex_t;
+
+static inline uint64_t BDK_BGXX_CMR_RX_STEERING_VETYPEX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_RX_STEERING_VETYPEX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=7)))
+ return 0x87e0e0000400ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=7)))
+ return 0x87e0e0000400ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=7)))
+ return 0x87e0e0000400ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x7);
+ __bdk_csr_fatal("BGXX_CMR_RX_STEERING_VETYPEX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_RX_STEERING_VETYPEX(a,b) bdk_bgxx_cmr_rx_steering_vetypex_t
+#define bustype_BDK_BGXX_CMR_RX_STEERING_VETYPEX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_RX_STEERING_VETYPEX(a,b) "BGXX_CMR_RX_STEERING_VETYPEX"
+#define device_bar_BDK_BGXX_CMR_RX_STEERING_VETYPEX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_RX_STEERING_VETYPEX(a,b) (a)
+#define arguments_BDK_BGXX_CMR_RX_STEERING_VETYPEX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_cmr_tx_lmacs
+ *
+ * BGX CMR Transmit Logical MACs Registers
+ * This register sets the number of LMACs allowed on the TX interface. The value is important for
+ * defining the partitioning of the transmit FIFO.
+ */
+union bdk_bgxx_cmr_tx_lmacs
+{
+ uint64_t u;
+ struct bdk_bgxx_cmr_tx_lmacs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t lmacs : 3; /**< [ 2: 0](R/W) Number of LMACS: Specifies the number of LMACs that can be enabled.
+ This determines the logical TX buffer size per LMAC and the maximum
+ LMAC ID that can be used:
+
+ 0x0 = Reserved.
+ 0x1 = 48 KB per LMAC, maximum LMAC ID is 0.
+ 0x2 = 24 KB per LMAC, maximum LMAC ID is 1.
+ 0x3 = 12 KB per LMAC, maximum LMAC ID is 2.
+ 0x4 = 12 KB per LMAC, maximum LMAC ID is 3.
+ 0x5-0x7 = Reserved.
+
+ The maximum LMAC ID is determined by the smaller of BGX()_CMR_RX_LMACS[LMACS]
+ and BGX()_CMR_TX_LMACS[LMACS]. The two fields should be set to the same value for
+ normal operation.' */
+#else /* Word 0 - Little Endian */
+ uint64_t lmacs : 3; /**< [ 2: 0](R/W) Number of LMACS: Specifies the number of LMACs that can be enabled.
+ This determines the logical TX buffer size per LMAC and the maximum
+ LMAC ID that can be used:
+
+ 0x0 = Reserved.
+ 0x1 = 48 KB per LMAC, maximum LMAC ID is 0.
+ 0x2 = 24 KB per LMAC, maximum LMAC ID is 1.
+ 0x3 = 12 KB per LMAC, maximum LMAC ID is 2.
+ 0x4 = 12 KB per LMAC, maximum LMAC ID is 3.
+ 0x5-0x7 = Reserved.
+
+ The maximum LMAC ID is determined by the smaller of BGX()_CMR_RX_LMACS[LMACS]
+ and BGX()_CMR_TX_LMACS[LMACS]. The two fields should be set to the same value for
+ normal operation.' */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_cmr_tx_lmacs_s cn; */
+};
+typedef union bdk_bgxx_cmr_tx_lmacs bdk_bgxx_cmr_tx_lmacs_t;
+
+static inline uint64_t BDK_BGXX_CMR_TX_LMACS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CMR_TX_LMACS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0001000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0001000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0001000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_CMR_TX_LMACS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CMR_TX_LMACS(a) bdk_bgxx_cmr_tx_lmacs_t
+#define bustype_BDK_BGXX_CMR_TX_LMACS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CMR_TX_LMACS(a) "BGXX_CMR_TX_LMACS"
+#define device_bar_BDK_BGXX_CMR_TX_LMACS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CMR_TX_LMACS(a) (a)
+#define arguments_BDK_BGXX_CMR_TX_LMACS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_const
+ *
+ * BGX CONST Registers
+ * This register contains constants for software discovery.
+ */
+union bdk_bgxx_const
+{
+ uint64_t u;
+ struct bdk_bgxx_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t lmacs : 8; /**< [ 31: 24](RO) Number of LMACs. */
+ uint64_t tx_fifosz : 24; /**< [ 23: 0](RO) Number of bytes of transmit buffering in entire BGX. This buffering may be split
+ between LMACs; see BGX()_CMR_TX_LMACS[LMACS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_fifosz : 24; /**< [ 23: 0](RO) Number of bytes of transmit buffering in entire BGX. This buffering may be split
+ between LMACs; see BGX()_CMR_TX_LMACS[LMACS]. */
+ uint64_t lmacs : 8; /**< [ 31: 24](RO) Number of LMACs. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_const_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t lmacs : 8; /**< [ 31: 24](RO) Number of LMACs.
+ If 0x0, indicates 4 LMACs, otherwise, the number of LMACs. */
+ uint64_t tx_fifosz : 24; /**< [ 23: 0](RO) Number of bytes of transmit buffering in entire BGX. This buffering may be split
+ between LMACs; see BGX()_CMR_TX_LMACS[LMACS]. If 0x0, indicates size of 0xC000. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_fifosz : 24; /**< [ 23: 0](RO) Number of bytes of transmit buffering in entire BGX. This buffering may be split
+ between LMACs; see BGX()_CMR_TX_LMACS[LMACS]. If 0x0, indicates size of 0xC000. */
+ uint64_t lmacs : 8; /**< [ 31: 24](RO) Number of LMACs.
+ If 0x0, indicates 4 LMACs, otherwise, the number of LMACs. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_bgxx_const_s cn83xx; */
+};
+typedef union bdk_bgxx_const bdk_bgxx_const_t;
+
+static inline uint64_t BDK_BGXX_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0040000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0040000ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("BGXX_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CONST(a) bdk_bgxx_const_t
+#define bustype_BDK_BGXX_CONST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CONST(a) "BGXX_CONST"
+#define device_bar_BDK_BGXX_CONST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CONST(a) (a)
+#define arguments_BDK_BGXX_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_const1
+ *
+ * BGX CONST1 Registers
+ * This register contains constants for software discovery.
+ */
+union bdk_bgxx_const1
+{
+ uint64_t u;
+ struct bdk_bgxx_const1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t res_types : 24; /**< [ 31: 8](RO) Reserved for more LMAC TYPES. */
+ uint64_t types : 8; /**< [ 7: 0](RO) LMAC types supported. Each bit if set corresponds to that value of BGX_LMAC_TYPES_E being
+ supported.
+ E.g. TYPES\<0\> if set indicates BGX_LMAC_TYPES_E::SGMII is supported. */
+#else /* Word 0 - Little Endian */
+ uint64_t types : 8; /**< [ 7: 0](RO) LMAC types supported. Each bit if set corresponds to that value of BGX_LMAC_TYPES_E being
+ supported.
+ E.g. TYPES\<0\> if set indicates BGX_LMAC_TYPES_E::SGMII is supported. */
+ uint64_t res_types : 24; /**< [ 31: 8](RO) Reserved for more LMAC TYPES. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_const1_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t res_types : 24; /**< [ 31: 8](RO) Reserved for more LMAC TYPES. */
+ uint64_t types : 8; /**< [ 7: 0](RO) LMAC types supported. Each bit if set corresponds to that value of
+ BGX_LMAC_TYPES_E being supported. E.g. TYPES\<5\> if set indicates
+ BGX_LMAC_TYPES_E::RGMII is supported. If 0x0, [TYPES] should be treated by
+ software as if the read value was 0x5F (all but RGMII). */
+#else /* Word 0 - Little Endian */
+ uint64_t types : 8; /**< [ 7: 0](RO) LMAC types supported. Each bit if set corresponds to that value of
+ BGX_LMAC_TYPES_E being supported. E.g. TYPES\<5\> if set indicates
+ BGX_LMAC_TYPES_E::RGMII is supported. If 0x0, [TYPES] should be treated by
+ software as if the read value was 0x5F (all but RGMII). */
+ uint64_t res_types : 24; /**< [ 31: 8](RO) Reserved for more LMAC TYPES. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_bgxx_const1_s cn83xx; */
+};
+typedef union bdk_bgxx_const1 bdk_bgxx_const1_t;
+
+static inline uint64_t BDK_BGXX_CONST1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_CONST1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0040008ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0040008ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("BGXX_CONST1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_CONST1(a) bdk_bgxx_const1_t
+#define bustype_BDK_BGXX_CONST1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_CONST1(a) "BGXX_CONST1"
+#define device_bar_BDK_BGXX_CONST1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_CONST1(a) (a)
+#define arguments_BDK_BGXX_CONST1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_prt#_cfg
+ *
+ * BGX GMP GMI LMAC Configuration Registers
+ * This register controls the configuration of the LMAC.
+ */
+union bdk_bgxx_gmp_gmi_prtx_cfg
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_prtx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t tx_idle : 1; /**< [ 13: 13](RO/H) TX machine is idle. */
+ uint64_t rx_idle : 1; /**< [ 12: 12](RO/H) RX machine is idle. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t speed_msb : 1; /**< [ 8: 8](R/W) Link speed MSB (SGMII/QSGMII/1000Base-X only). See [SPEED]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t slottime : 1; /**< [ 3: 3](R/W) Slot time for half-duplex operation
+ (SGMII/QSGMII/1000Base-X only):
+ 0 = 512 bit times (10/100 Mb/s operation).
+ 1 = 4096 bit times (1000 Mb/s operation). */
+ uint64_t duplex : 1; /**< [ 2: 2](R/W) Duplex mode
+ (SGMII/QSGMII/1000Base-X only):
+ 0 = half-duplex (collisions/extensions/bursts):
+ 1 = full-duplex. */
+ uint64_t speed : 1; /**< [ 1: 1](R/W) Link Speed LSB (SGMII/QSGMII/1000Base-X only):
+ _ [SPEED_MSB,SPEED] = 0x0: 100 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x1: 1000 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x2: 10 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x3: Reserved. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t speed : 1; /**< [ 1: 1](R/W) Link Speed LSB (SGMII/QSGMII/1000Base-X only):
+ _ [SPEED_MSB,SPEED] = 0x0: 100 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x1: 1000 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x2: 10 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x3: Reserved. */
+ uint64_t duplex : 1; /**< [ 2: 2](R/W) Duplex mode
+ (SGMII/QSGMII/1000Base-X only):
+ 0 = half-duplex (collisions/extensions/bursts):
+ 1 = full-duplex. */
+ uint64_t slottime : 1; /**< [ 3: 3](R/W) Slot time for half-duplex operation
+ (SGMII/QSGMII/1000Base-X only):
+ 0 = 512 bit times (10/100 Mb/s operation).
+ 1 = 4096 bit times (1000 Mb/s operation). */
+ uint64_t reserved_4_7 : 4;
+ uint64_t speed_msb : 1; /**< [ 8: 8](R/W) Link speed MSB (SGMII/QSGMII/1000Base-X only). See [SPEED]. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t rx_idle : 1; /**< [ 12: 12](RO/H) RX machine is idle. */
+ uint64_t tx_idle : 1; /**< [ 13: 13](RO/H) TX machine is idle. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_prtx_cfg_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_prtx_cfg_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t tx_idle : 1; /**< [ 13: 13](RO/H) TX machine is idle. */
+ uint64_t rx_idle : 1; /**< [ 12: 12](RO/H) RX machine is idle. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t speed_msb : 1; /**< [ 8: 8](R/W) Link speed MSB (SGMII/1000Base-X only). See [SPEED]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t slottime : 1; /**< [ 3: 3](R/W) Slot time for half-duplex operation
+ (SGMII/1000Base-X only):
+ 0 = 512 bit times (10/100 Mb/s operation).
+ 1 = 4096 bit times (1000 Mb/s operation). */
+ uint64_t duplex : 1; /**< [ 2: 2](R/W) Duplex mode
+ (SGMII/1000Base-X only):
+ 0 = half-duplex (collisions/extensions/bursts).
+ 1 = full-duplex. */
+ uint64_t speed : 1; /**< [ 1: 1](R/W) Link Speed LSB (SGMII/1000Base-X only):
+ _ [SPEED_MSB,SPEED] = 0x0: 100 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x1: 1000 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x2: 10 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x3: Reserved. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t speed : 1; /**< [ 1: 1](R/W) Link Speed LSB (SGMII/1000Base-X only):
+ _ [SPEED_MSB,SPEED] = 0x0: 100 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x1: 1000 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x2: 10 Mb/s operation.
+ _ [SPEED_MSB,SPEED] = 0x3: Reserved. */
+ uint64_t duplex : 1; /**< [ 2: 2](R/W) Duplex mode
+ (SGMII/1000Base-X only):
+ 0 = half-duplex (collisions/extensions/bursts).
+ 1 = full-duplex. */
+ uint64_t slottime : 1; /**< [ 3: 3](R/W) Slot time for half-duplex operation
+ (SGMII/1000Base-X only):
+ 0 = 512 bit times (10/100 Mb/s operation).
+ 1 = 4096 bit times (1000 Mb/s operation). */
+ uint64_t reserved_4_7 : 4;
+ uint64_t speed_msb : 1; /**< [ 8: 8](R/W) Link speed MSB (SGMII/1000Base-X only). See [SPEED]. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t rx_idle : 1; /**< [ 12: 12](RO/H) RX machine is idle. */
+ uint64_t tx_idle : 1; /**< [ 13: 13](RO/H) TX machine is idle. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_prtx_cfg_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_prtx_cfg bdk_bgxx_gmp_gmi_prtx_cfg_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_PRTX_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_PRTX_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038020ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038020ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038020ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_PRTX_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_PRTX_CFG(a,b) bdk_bgxx_gmp_gmi_prtx_cfg_t
+#define bustype_BDK_BGXX_GMP_GMI_PRTX_CFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_PRTX_CFG(a,b) "BGXX_GMP_GMI_PRTX_CFG"
+#define device_bar_BDK_BGXX_GMP_GMI_PRTX_CFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_PRTX_CFG(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_PRTX_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_rx#_decision
+ *
+ * BGX GMP Packet-Decision Registers
+ * This register specifies the byte count used to determine when to accept or to filter a packet.
+ * As each byte in a packet is received by GMI, the L2 byte count is compared against
+ * [CNT]. In normal operation, the L2 header begins after the
+ * PREAMBLE + SFD (BGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] = 1) and any optional UDD skip
+ * data (BGX()_GMP_GMI_RX()_UDD_SKP[LEN]).
+ *
+ * Internal:
+ * Notes:
+ * As each byte in a packet is received by GMI, the L2 byte count is compared
+ * against the [CNT]. The L2 byte count is the number of bytes
+ * from the beginning of the L2 header (DMAC). In normal operation, the L2
+ * header begins after the PREAMBLE+SFD (BGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK]=1) and any
+ * optional UDD skip data (BGX()_GMP_GMI_RX()_UDD_SKP[LEN]).
+ * When BGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear, PREAMBLE+SFD are prepended to the
+ * packet and would require UDD skip length to account for them.
+ *
+ * Full Duplex:
+ * _ L2 Size \< [CNT] - Accept packet. No filtering is applied.
+ * _ L2 Size \>= [CNT] - Apply filter. Accept packet based on PAUSE packet filter.
+ *
+ * Half Duplex:
+ * _ L2 Size \< [CNT] - Drop packet. Packet is unconditionally dropped.
+ * _ L2 Size \>= [CNT] - Accept packet.
+ *
+ * where L2_size = MAX(0, total_packet_size - BGX()_GMP_GMI_RX()_UDD_SKP[LEN] -
+ * ((BGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK]==1)*8))
+ */
+union bdk_bgxx_gmp_gmi_rxx_decision
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_rxx_decision_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t cnt : 5; /**< [ 4: 0](R/W) The byte count used to decide when to accept or filter a packet. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 5; /**< [ 4: 0](R/W) The byte count used to decide when to accept or filter a packet. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_rxx_decision_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_rxx_decision bdk_bgxx_gmp_gmi_rxx_decision_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_DECISION(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_DECISION(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038040ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038040ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038040ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_RXX_DECISION", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_RXX_DECISION(a,b) bdk_bgxx_gmp_gmi_rxx_decision_t
+#define bustype_BDK_BGXX_GMP_GMI_RXX_DECISION(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_RXX_DECISION(a,b) "BGXX_GMP_GMI_RXX_DECISION"
+#define device_bar_BDK_BGXX_GMP_GMI_RXX_DECISION(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_RXX_DECISION(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_RXX_DECISION(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_rx#_frm_chk
+ *
+ * BGX GMP Frame Check Registers
+ */
+union bdk_bgxx_gmp_gmi_rxx_frm_chk
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_rxx_frm_chk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t skperr : 1; /**< [ 8: 8](R/W) Skipper error. */
+ uint64_t rcverr : 1; /**< [ 7: 7](R/W) Frame was received with data-reception error. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< [ 4: 4](R/W) Frame was received with FCS/CRC error. */
+ uint64_t jabber : 1; /**< [ 3: 3](R/W) Frame was received with length \> sys_length. */
+ uint64_t reserved_2 : 1;
+ uint64_t carext : 1; /**< [ 1: 1](R/W) Carrier extend error. SGMII/QSGMII/1000Base-X only. */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W) PAUSE frame was received with length \< minFrameSize. */
+#else /* Word 0 - Little Endian */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W) PAUSE frame was received with length \< minFrameSize. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W) Carrier extend error. SGMII/QSGMII/1000Base-X only. */
+ uint64_t reserved_2 : 1;
+ uint64_t jabber : 1; /**< [ 3: 3](R/W) Frame was received with length \> sys_length. */
+ uint64_t fcserr : 1; /**< [ 4: 4](R/W) Frame was received with FCS/CRC error. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1; /**< [ 7: 7](R/W) Frame was received with data-reception error. */
+ uint64_t skperr : 1; /**< [ 8: 8](R/W) Skipper error. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_rxx_frm_chk_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_rxx_frm_chk_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t skperr : 1; /**< [ 8: 8](R/W) Skipper error. */
+ uint64_t rcverr : 1; /**< [ 7: 7](R/W) Frame was received with data-reception error. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t fcserr : 1; /**< [ 4: 4](R/W) Frame was received with FCS/CRC error. */
+ uint64_t jabber : 1; /**< [ 3: 3](R/W) Frame was received with length \> sys_length. */
+ uint64_t reserved_2 : 1;
+ uint64_t carext : 1; /**< [ 1: 1](R/W) Carrier extend error. SGMII/1000Base-X only. */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W) PAUSE frame was received with length \< minFrameSize. */
+#else /* Word 0 - Little Endian */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W) PAUSE frame was received with length \< minFrameSize. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W) Carrier extend error. SGMII/1000Base-X only. */
+ uint64_t reserved_2 : 1;
+ uint64_t jabber : 1; /**< [ 3: 3](R/W) Frame was received with length \> sys_length. */
+ uint64_t fcserr : 1; /**< [ 4: 4](R/W) Frame was received with FCS/CRC error. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t rcverr : 1; /**< [ 7: 7](R/W) Frame was received with data-reception error. */
+ uint64_t skperr : 1; /**< [ 8: 8](R/W) Skipper error. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_rxx_frm_chk_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_rxx_frm_chk bdk_bgxx_gmp_gmi_rxx_frm_chk_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_FRM_CHK(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_FRM_CHK(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038030ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038030ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038030ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_RXX_FRM_CHK", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_RXX_FRM_CHK(a,b) bdk_bgxx_gmp_gmi_rxx_frm_chk_t
+#define bustype_BDK_BGXX_GMP_GMI_RXX_FRM_CHK(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_RXX_FRM_CHK(a,b) "BGXX_GMP_GMI_RXX_FRM_CHK"
+#define device_bar_BDK_BGXX_GMP_GMI_RXX_FRM_CHK(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_RXX_FRM_CHK(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_RXX_FRM_CHK(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_rx#_frm_ctl
+ *
+ * BGX GMP Frame Control Registers
+ * This register controls the handling of the frames.
+ * The [CTL_BCK] and [CTL_DRP] bits control how the hardware handles incoming PAUSE packets. The
+ * most
+ * common modes of operation:
+ * _ [CTL_BCK] = 1, [CTL_DRP] = 1: hardware handles everything.
+ * _ [CTL_BCK] = 0, [CTL_DRP] = 0: software sees all PAUSE frames.
+ * _ [CTL_BCK] = 0, [CTL_DRP] = 1: all PAUSE frames are completely ignored.
+ *
+ * These control bits should be set to [CTL_BCK] = 0, [CTL_DRP] = 0 in half-duplex mode. Since
+ * PAUSE
+ * packets only apply to full duplex operation, any PAUSE packet would constitute an exception
+ * which should be handled by the processing cores. PAUSE packets should not be forwarded.
+ *
+ * Internal:
+ * Notes:
+ * [PRE_STRP]:
+ * When [PRE_CHK] is set (indicating that the PREAMBLE will be sent), [PRE_STRP]
+ * determines if the PREAMBLE+SFD bytes are thrown away or sent to the Octane
+ * core as part of the packet.
+ * In either mode, the PREAMBLE+SFD bytes are not counted toward the packet
+ * size when checking against the MIN and MAX bounds. Furthermore, the bytes
+ * are skipped when locating the start of the L2 header for DMAC and Control
+ * frame recognition.
+ */
+union bdk_bgxx_gmp_gmi_rxx_frm_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_rxx_frm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t rx_fc_type : 1; /**< [ 13: 13](R/W) Receive side flow control type select.
+ 0 = GMI MAC receives and processes ITU G.999.1 pause frames.
+ 1 = GMI MAC receives and processes 802.3 pause frames. */
+ uint64_t ptp_mode : 1; /**< [ 12: 12](R/W) Timestamp mode. When [PTP_MODE] is set, a 64-bit timestamp is prepended to every incoming
+ packet.
+
+ The timestamp bytes are added to the packet in such a way as to not modify the packet's
+ receive byte count. This implies that the BGX()_GMP_GMI_RX()_JABBER,
+ BGX()_GMP_GMI_RX()_DECISION, BGX()_GMP_GMI_RX()_UDD_SKP, and
+ BGX()_CMR()_RX_STAT0..BGX()_CMR()_RX_STAT8
+ do not require any adjustment as they operate on the received
+ packet size. When the packet reaches NIC, its size reflects the additional bytes. */
+ uint64_t reserved_11 : 1;
+ uint64_t null_dis : 1; /**< [ 10: 10](R/W) When set, do not modify the MOD bits on NULL ticks due to partial packets. */
+ uint64_t pre_align : 1; /**< [ 9: 9](R/W) When set, PREAMBLE parser aligns the SFD byte regardless of the number of previous
+ PREAMBLE nibbles. In this mode, [PRE_STRP] should be set to account for the variable
+ nature of the PREAMBLE. [PRE_CHK] must be set to enable this and all PREAMBLE features.
+ SGMII at 10/100Mbs only. */
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_free : 1; /**< [ 6: 6](RO/H) When set, PREAMBLE checking is less strict. GMI will begin the frame at the first SFD.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features. SGMII/QSGMII/1000Base-X
+ only. */
+ uint64_t ctl_smac : 1; /**< [ 5: 5](R/W) Control PAUSE frames can match station SMAC. */
+ uint64_t ctl_mcst : 1; /**< [ 4: 4](R/W) Control PAUSE frames can match globally assigned multicast address. */
+ uint64_t ctl_bck : 1; /**< [ 3: 3](R/W) Forward PAUSE information to TX block. */
+ uint64_t ctl_drp : 1; /**< [ 2: 2](R/W) Drop control-PAUSE frames. */
+ uint64_t pre_strp : 1; /**< [ 1: 1](R/W) Strip off the preamble (when present).
+ 0 = PREAMBLE + SFD is sent to core as part of frame.
+ 1 = PREAMBLE + SFD is dropped.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features.
+
+ If [PTP_MODE]=1 and [PRE_CHK]=1, [PRE_STRP] must be 1.
+
+ When [PRE_CHK] is set (indicating that the PREAMBLE will be sent), [PRE_STRP] determines
+ if
+ the PREAMBLE+SFD bytes are thrown away or sent to the core as part of the packet. In
+ either mode, the PREAMBLE+SFD bytes are not counted toward the packet size when checking
+ against the MIN and MAX bounds. Furthermore, the bytes are skipped when locating the start
+ of the L2 header for DMAC and control frame recognition. */
+ uint64_t pre_chk : 1; /**< [ 0: 0](R/W) Check the preamble for correctness. This port is configured to send a valid 802.3 PREAMBLE
+ to begin every frame. GMI checks that a valid PREAMBLE is received (based on [PRE_FREE]).
+ When a problem does occur within the PREAMBLE sequence, the frame is marked as bad and not
+ sent into the core. The BGX()_GMP()_RX_INT[PCTERR] interrupt is also raised. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_chk : 1; /**< [ 0: 0](R/W) Check the preamble for correctness. This port is configured to send a valid 802.3 PREAMBLE
+ to begin every frame. GMI checks that a valid PREAMBLE is received (based on [PRE_FREE]).
+ When a problem does occur within the PREAMBLE sequence, the frame is marked as bad and not
+ sent into the core. The BGX()_GMP()_RX_INT[PCTERR] interrupt is also raised. */
+ uint64_t pre_strp : 1; /**< [ 1: 1](R/W) Strip off the preamble (when present).
+ 0 = PREAMBLE + SFD is sent to core as part of frame.
+ 1 = PREAMBLE + SFD is dropped.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features.
+
+ If [PTP_MODE]=1 and [PRE_CHK]=1, [PRE_STRP] must be 1.
+
+ When [PRE_CHK] is set (indicating that the PREAMBLE will be sent), [PRE_STRP] determines
+ if
+ the PREAMBLE+SFD bytes are thrown away or sent to the core as part of the packet. In
+ either mode, the PREAMBLE+SFD bytes are not counted toward the packet size when checking
+ against the MIN and MAX bounds. Furthermore, the bytes are skipped when locating the start
+ of the L2 header for DMAC and control frame recognition. */
+ uint64_t ctl_drp : 1; /**< [ 2: 2](R/W) Drop control-PAUSE frames. */
+ uint64_t ctl_bck : 1; /**< [ 3: 3](R/W) Forward PAUSE information to TX block. */
+ uint64_t ctl_mcst : 1; /**< [ 4: 4](R/W) Control PAUSE frames can match globally assigned multicast address. */
+ uint64_t ctl_smac : 1; /**< [ 5: 5](R/W) Control PAUSE frames can match station SMAC. */
+ uint64_t pre_free : 1; /**< [ 6: 6](RO/H) When set, PREAMBLE checking is less strict. GMI will begin the frame at the first SFD.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features. SGMII/QSGMII/1000Base-X
+ only. */
+ uint64_t reserved_7_8 : 2;
+ uint64_t pre_align : 1; /**< [ 9: 9](R/W) When set, PREAMBLE parser aligns the SFD byte regardless of the number of previous
+ PREAMBLE nibbles. In this mode, [PRE_STRP] should be set to account for the variable
+ nature of the PREAMBLE. [PRE_CHK] must be set to enable this and all PREAMBLE features.
+ SGMII at 10/100Mbs only. */
+ uint64_t null_dis : 1; /**< [ 10: 10](R/W) When set, do not modify the MOD bits on NULL ticks due to partial packets. */
+ uint64_t reserved_11 : 1;
+ uint64_t ptp_mode : 1; /**< [ 12: 12](R/W) Timestamp mode. When [PTP_MODE] is set, a 64-bit timestamp is prepended to every incoming
+ packet.
+
+ The timestamp bytes are added to the packet in such a way as to not modify the packet's
+ receive byte count. This implies that the BGX()_GMP_GMI_RX()_JABBER,
+ BGX()_GMP_GMI_RX()_DECISION, BGX()_GMP_GMI_RX()_UDD_SKP, and
+ BGX()_CMR()_RX_STAT0..BGX()_CMR()_RX_STAT8
+ do not require any adjustment as they operate on the received
+ packet size. When the packet reaches NIC, its size reflects the additional bytes. */
+ uint64_t rx_fc_type : 1; /**< [ 13: 13](R/W) Receive side flow control type select.
+ 0 = GMI MAC receives and processes ITU G.999.1 pause frames.
+ 1 = GMI MAC receives and processes 802.3 pause frames. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_gmp_gmi_rxx_frm_ctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t rx_fc_type : 1; /**< [ 13: 13](R/W) Receive side flow control type select.
+ 0 = GMI MAC receives and processes ITU G.999.1 pause frames.
+ 1 = GMI MAC receives and processes 802.3 pause frames. */
+ uint64_t ptp_mode : 1; /**< [ 12: 12](R/W) Timestamp mode. When [PTP_MODE] is set, a 64-bit timestamp is prepended to every incoming
+ packet.
+
+ The timestamp bytes are added to the packet in such a way as to not modify the packet's
+ receive byte count. This implies that the BGX()_GMP_GMI_RX()_JABBER,
+ BGX()_GMP_GMI_RX()_DECISION, BGX()_GMP_GMI_RX()_UDD_SKP, and
+ BGX()_CMR()_RX_STAT0..BGX()_CMR()_RX_STAT8
+ do not require any adjustment as they operate on the received
+ packet size. When the packet reaches NIC, its size reflects the additional bytes. */
+ uint64_t reserved_11 : 1;
+ uint64_t null_dis : 1; /**< [ 10: 10](R/W) When set, do not modify the MOD bits on NULL ticks due to partial packets. */
+ uint64_t pre_align : 1; /**< [ 9: 9](R/W) When set, PREAMBLE parser aligns the SFD byte regardless of the number of previous
+ PREAMBLE nibbles. In this mode, [PRE_STRP] should be set to account for the variable
+ nature of the PREAMBLE. [PRE_CHK] must be set to enable this and all PREAMBLE features.
+ SGMII at 10/100Mbs only. */
+ uint64_t reserved_8 : 1;
+ uint64_t reserved_7 : 1;
+ uint64_t pre_free : 1; /**< [ 6: 6](RO/H) When set, PREAMBLE checking is less strict. GMI will begin the frame at the first SFD.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features. SGMII/QSGMII/1000Base-X
+ only. */
+ uint64_t ctl_smac : 1; /**< [ 5: 5](R/W) Control PAUSE frames can match station SMAC. */
+ uint64_t ctl_mcst : 1; /**< [ 4: 4](R/W) Control PAUSE frames can match globally assigned multicast address. */
+ uint64_t ctl_bck : 1; /**< [ 3: 3](R/W) Forward PAUSE information to TX block. */
+ uint64_t ctl_drp : 1; /**< [ 2: 2](R/W) Drop control-PAUSE frames. */
+ uint64_t pre_strp : 1; /**< [ 1: 1](R/W) Strip off the preamble (when present).
+ 0 = PREAMBLE + SFD is sent to core as part of frame.
+ 1 = PREAMBLE + SFD is dropped.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features.
+
+ If [PTP_MODE]=1 and [PRE_CHK]=1, [PRE_STRP] must be 1.
+
+ When [PRE_CHK] is set (indicating that the PREAMBLE will be sent), [PRE_STRP] determines
+ if
+ the PREAMBLE+SFD bytes are thrown away or sent to the core as part of the packet. In
+ either mode, the PREAMBLE+SFD bytes are not counted toward the packet size when checking
+ against the MIN and MAX bounds. Furthermore, the bytes are skipped when locating the start
+ of the L2 header for DMAC and control frame recognition. */
+ uint64_t pre_chk : 1; /**< [ 0: 0](R/W) Check the preamble for correctness. This port is configured to send a valid 802.3 PREAMBLE
+ to begin every frame. GMI checks that a valid PREAMBLE is received (based on [PRE_FREE]).
+ When a problem does occur within the PREAMBLE sequence, the frame is marked as bad and not
+ sent into the core. The BGX()_GMP()_RX_INT[PCTERR] interrupt is also raised. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_chk : 1; /**< [ 0: 0](R/W) Check the preamble for correctness. This port is configured to send a valid 802.3 PREAMBLE
+ to begin every frame. GMI checks that a valid PREAMBLE is received (based on [PRE_FREE]).
+ When a problem does occur within the PREAMBLE sequence, the frame is marked as bad and not
+ sent into the core. The BGX()_GMP()_RX_INT[PCTERR] interrupt is also raised. */
+ uint64_t pre_strp : 1; /**< [ 1: 1](R/W) Strip off the preamble (when present).
+ 0 = PREAMBLE + SFD is sent to core as part of frame.
+ 1 = PREAMBLE + SFD is dropped.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features.
+
+ If [PTP_MODE]=1 and [PRE_CHK]=1, [PRE_STRP] must be 1.
+
+ When [PRE_CHK] is set (indicating that the PREAMBLE will be sent), [PRE_STRP] determines
+ if
+ the PREAMBLE+SFD bytes are thrown away or sent to the core as part of the packet. In
+ either mode, the PREAMBLE+SFD bytes are not counted toward the packet size when checking
+ against the MIN and MAX bounds. Furthermore, the bytes are skipped when locating the start
+ of the L2 header for DMAC and control frame recognition. */
+ uint64_t ctl_drp : 1; /**< [ 2: 2](R/W) Drop control-PAUSE frames. */
+ uint64_t ctl_bck : 1; /**< [ 3: 3](R/W) Forward PAUSE information to TX block. */
+ uint64_t ctl_mcst : 1; /**< [ 4: 4](R/W) Control PAUSE frames can match globally assigned multicast address. */
+ uint64_t ctl_smac : 1; /**< [ 5: 5](R/W) Control PAUSE frames can match station SMAC. */
+ uint64_t pre_free : 1; /**< [ 6: 6](RO/H) When set, PREAMBLE checking is less strict. GMI will begin the frame at the first SFD.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features. SGMII/QSGMII/1000Base-X
+ only. */
+ uint64_t reserved_7 : 1;
+ uint64_t reserved_8 : 1;
+ uint64_t pre_align : 1; /**< [ 9: 9](R/W) When set, PREAMBLE parser aligns the SFD byte regardless of the number of previous
+ PREAMBLE nibbles. In this mode, [PRE_STRP] should be set to account for the variable
+ nature of the PREAMBLE. [PRE_CHK] must be set to enable this and all PREAMBLE features.
+ SGMII at 10/100Mbs only. */
+ uint64_t null_dis : 1; /**< [ 10: 10](R/W) When set, do not modify the MOD bits on NULL ticks due to partial packets. */
+ uint64_t reserved_11 : 1;
+ uint64_t ptp_mode : 1; /**< [ 12: 12](R/W) Timestamp mode. When [PTP_MODE] is set, a 64-bit timestamp is prepended to every incoming
+ packet.
+
+ The timestamp bytes are added to the packet in such a way as to not modify the packet's
+ receive byte count. This implies that the BGX()_GMP_GMI_RX()_JABBER,
+ BGX()_GMP_GMI_RX()_DECISION, BGX()_GMP_GMI_RX()_UDD_SKP, and
+ BGX()_CMR()_RX_STAT0..BGX()_CMR()_RX_STAT8
+ do not require any adjustment as they operate on the received
+ packet size. When the packet reaches NIC, its size reflects the additional bytes. */
+ uint64_t rx_fc_type : 1; /**< [ 13: 13](R/W) Receive side flow control type select.
+ 0 = GMI MAC receives and processes ITU G.999.1 pause frames.
+ 1 = GMI MAC receives and processes 802.3 pause frames. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_gmp_gmi_rxx_frm_ctl_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t ptp_mode : 1; /**< [ 12: 12](R/W) Timestamp mode. When [PTP_MODE] is set, a 64-bit timestamp is prepended to every incoming
+ packet.
+
+ The timestamp bytes are added to the packet in such a way as to not modify the packet's
+ receive byte count. This implies that the BGX()_GMP_GMI_RX()_JABBER,
+ BGX()_GMP_GMI_RX()_DECISION, BGX()_GMP_GMI_RX()_UDD_SKP, and
+ BGX()_CMR()_RX_STAT0..BGX()_CMR()_RX_STAT8
+ do not require any adjustment as they operate on the received
+ packet size. When the packet reaches NIC, its size reflects the additional bytes. */
+ uint64_t reserved_11 : 1;
+ uint64_t null_dis : 1; /**< [ 10: 10](R/W) When set, do not modify the MOD bits on NULL ticks due to partial packets. */
+ uint64_t pre_align : 1; /**< [ 9: 9](R/W) When set, PREAMBLE parser aligns the SFD byte regardless of the number of previous
+ PREAMBLE nibbles. In this mode, [PRE_STRP] should be set to account for the variable
+ nature of the PREAMBLE. [PRE_CHK] must be set to enable this and all PREAMBLE features.
+ SGMII at 10/100Mbs only. */
+ uint64_t reserved_8 : 1;
+ uint64_t reserved_7 : 1;
+ uint64_t pre_free : 1; /**< [ 6: 6](RO/H) When set, PREAMBLE checking is less strict. GMI will begin the frame at the first SFD.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features. SGMII/1000Base-X only. */
+ uint64_t ctl_smac : 1; /**< [ 5: 5](R/W) Control PAUSE frames can match station SMAC. */
+ uint64_t ctl_mcst : 1; /**< [ 4: 4](R/W) Control PAUSE frames can match globally assigned multicast address. */
+ uint64_t ctl_bck : 1; /**< [ 3: 3](R/W) Forward PAUSE information to TX block. */
+ uint64_t ctl_drp : 1; /**< [ 2: 2](R/W) Drop control-PAUSE frames. */
+ uint64_t pre_strp : 1; /**< [ 1: 1](R/W) Strip off the preamble (when present).
+ 0 = PREAMBLE + SFD is sent to core as part of frame.
+ 1 = PREAMBLE + SFD is dropped.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features.
+
+ If [PTP_MODE]=1 and [PRE_CHK]=1, [PRE_STRP] must be 1.
+
+ When [PRE_CHK] is set (indicating that the PREAMBLE will be sent), [PRE_STRP] determines
+ if
+ the PREAMBLE+SFD bytes are thrown away or sent to the core as part of the packet. In
+ either mode, the PREAMBLE+SFD bytes are not counted toward the packet size when checking
+ against the MIN and MAX bounds. Furthermore, the bytes are skipped when locating the start
+ of the L2 header for DMAC and control frame recognition. */
+ uint64_t pre_chk : 1; /**< [ 0: 0](R/W) Check the preamble for correctness. This port is configured to send a valid 802.3 PREAMBLE
+ to begin every frame. GMI checks that a valid PREAMBLE is received (based on [PRE_FREE]).
+ When a problem does occur within the PREAMBLE sequence, the frame is marked as bad and not
+ sent into the core. The BGX()_SMU()_RX_INT[PCTERR] interrupt is also raised.
+
+ When BGX()_SMU()_TX_CTL[HG_EN] is set, [PRE_CHK] must be 0. If [PTP_MODE] = 1 and
+ [PRE_CHK] = 1, [PRE_STRP] must be 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_chk : 1; /**< [ 0: 0](R/W) Check the preamble for correctness. This port is configured to send a valid 802.3 PREAMBLE
+ to begin every frame. GMI checks that a valid PREAMBLE is received (based on [PRE_FREE]).
+ When a problem does occur within the PREAMBLE sequence, the frame is marked as bad and not
+ sent into the core. The BGX()_SMU()_RX_INT[PCTERR] interrupt is also raised.
+
+ When BGX()_SMU()_TX_CTL[HG_EN] is set, [PRE_CHK] must be 0. If [PTP_MODE] = 1 and
+ [PRE_CHK] = 1, [PRE_STRP] must be 1. */
+ uint64_t pre_strp : 1; /**< [ 1: 1](R/W) Strip off the preamble (when present).
+ 0 = PREAMBLE + SFD is sent to core as part of frame.
+ 1 = PREAMBLE + SFD is dropped.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features.
+
+ If [PTP_MODE]=1 and [PRE_CHK]=1, [PRE_STRP] must be 1.
+
+ When [PRE_CHK] is set (indicating that the PREAMBLE will be sent), [PRE_STRP] determines
+ if
+ the PREAMBLE+SFD bytes are thrown away or sent to the core as part of the packet. In
+ either mode, the PREAMBLE+SFD bytes are not counted toward the packet size when checking
+ against the MIN and MAX bounds. Furthermore, the bytes are skipped when locating the start
+ of the L2 header for DMAC and control frame recognition. */
+ uint64_t ctl_drp : 1; /**< [ 2: 2](R/W) Drop control-PAUSE frames. */
+ uint64_t ctl_bck : 1; /**< [ 3: 3](R/W) Forward PAUSE information to TX block. */
+ uint64_t ctl_mcst : 1; /**< [ 4: 4](R/W) Control PAUSE frames can match globally assigned multicast address. */
+ uint64_t ctl_smac : 1; /**< [ 5: 5](R/W) Control PAUSE frames can match station SMAC. */
+ uint64_t pre_free : 1; /**< [ 6: 6](RO/H) When set, PREAMBLE checking is less strict. GMI will begin the frame at the first SFD.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features. SGMII/1000Base-X only. */
+ uint64_t reserved_7 : 1;
+ uint64_t reserved_8 : 1;
+ uint64_t pre_align : 1; /**< [ 9: 9](R/W) When set, PREAMBLE parser aligns the SFD byte regardless of the number of previous
+ PREAMBLE nibbles. In this mode, [PRE_STRP] should be set to account for the variable
+ nature of the PREAMBLE. [PRE_CHK] must be set to enable this and all PREAMBLE features.
+ SGMII at 10/100Mbs only. */
+ uint64_t null_dis : 1; /**< [ 10: 10](R/W) When set, do not modify the MOD bits on NULL ticks due to partial packets. */
+ uint64_t reserved_11 : 1;
+ uint64_t ptp_mode : 1; /**< [ 12: 12](R/W) Timestamp mode. When [PTP_MODE] is set, a 64-bit timestamp is prepended to every incoming
+ packet.
+
+ The timestamp bytes are added to the packet in such a way as to not modify the packet's
+ receive byte count. This implies that the BGX()_GMP_GMI_RX()_JABBER,
+ BGX()_GMP_GMI_RX()_DECISION, BGX()_GMP_GMI_RX()_UDD_SKP, and
+ BGX()_CMR()_RX_STAT0..BGX()_CMR()_RX_STAT8
+ do not require any adjustment as they operate on the received
+ packet size. When the packet reaches NIC, its size reflects the additional bytes. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_rxx_frm_ctl_cn81xx cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_rxx_frm_ctl bdk_bgxx_gmp_gmi_rxx_frm_ctl_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_FRM_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_FRM_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038028ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038028ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038028ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_RXX_FRM_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_RXX_FRM_CTL(a,b) bdk_bgxx_gmp_gmi_rxx_frm_ctl_t
+#define bustype_BDK_BGXX_GMP_GMI_RXX_FRM_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_RXX_FRM_CTL(a,b) "BGXX_GMP_GMI_RXX_FRM_CTL"
+#define device_bar_BDK_BGXX_GMP_GMI_RXX_FRM_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_RXX_FRM_CTL(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_RXX_FRM_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_rx#_ifg
+ *
+ * BGX GMI Minimum Interframe-Gap Cycles Registers
+ * This register specifies the minimum number of interframe-gap (IFG) cycles between packets.
+ */
+union bdk_bgxx_gmp_gmi_rxx_ifg
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_rxx_ifg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ifg : 4; /**< [ 3: 0](R/W) Min IFG (in IFG * 8 bits) between packets used to determine IFGERR. Normally IFG is 96
+ bits. Values 0x1 or smaller are illegal.
+
+ Note that in some operating modes, IFG cycles can be inserted or removed in order to
+ achieve clock rate adaptation. For these reasons, the default value is slightly
+ conservative and does not check up to the full 96 bits of IFG.
+ (SGMII/QSGMII/1000Base-X only) */
+#else /* Word 0 - Little Endian */
+ uint64_t ifg : 4; /**< [ 3: 0](R/W) Min IFG (in IFG * 8 bits) between packets used to determine IFGERR. Normally IFG is 96
+ bits. Values 0x1 or smaller are illegal.
+
+ Note that in some operating modes, IFG cycles can be inserted or removed in order to
+ achieve clock rate adaptation. For these reasons, the default value is slightly
+ conservative and does not check up to the full 96 bits of IFG.
+ (SGMII/QSGMII/1000Base-X only) */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_rxx_ifg_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_rxx_ifg_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ifg : 4; /**< [ 3: 0](R/W) Min IFG (in IFG * 8 bits) between packets used to determine IFGERR. Normally IFG is 96
+ bits. Values 0x1 or smaller are illegal.
+
+ Note that in some operating modes, IFG cycles can be inserted or removed in order to
+ achieve clock rate adaptation. For these reasons, the default value is slightly
+ conservative and does not check up to the full 96 bits of IFG.
+ (SGMII/1000Base-X only) */
+#else /* Word 0 - Little Endian */
+ uint64_t ifg : 4; /**< [ 3: 0](R/W) Min IFG (in IFG * 8 bits) between packets used to determine IFGERR. Normally IFG is 96
+ bits. Values 0x1 or smaller are illegal.
+
+ Note that in some operating modes, IFG cycles can be inserted or removed in order to
+ achieve clock rate adaptation. For these reasons, the default value is slightly
+ conservative and does not check up to the full 96 bits of IFG.
+ (SGMII/1000Base-X only) */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_rxx_ifg_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_rxx_ifg bdk_bgxx_gmp_gmi_rxx_ifg_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_IFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_IFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038058ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038058ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038058ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_RXX_IFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_RXX_IFG(a,b) bdk_bgxx_gmp_gmi_rxx_ifg_t
+#define bustype_BDK_BGXX_GMP_GMI_RXX_IFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_RXX_IFG(a,b) "BGXX_GMP_GMI_RXX_IFG"
+#define device_bar_BDK_BGXX_GMP_GMI_RXX_IFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_RXX_IFG(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_RXX_IFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_rx#_int
+ *
+ * BGX GMP GMI RX Interrupt Registers
+ * '"These registers allow interrupts to be sent to the control processor.
+ * * Exception conditions \<10:0\> can also set the rcv/opcode in the received packet's work-queue
+ * entry. BGX()_GMP_GMI_RX()_FRM_CHK provides a bit mask for configuring which conditions
+ * set the error.
+ * In half duplex operation, the expectation is that collisions will appear as either MINERR or
+ * CAREXT errors.'
+ *
+ * Internal:
+ * Notes:
+ * (1) exception conditions 10:0 can also set the rcv/opcode in the received
+ * packet's workQ entry. The BGX()_GMP_GMI_RX()_FRM_CHK register provides a bit mask
+ * for configuring which conditions set the error.
+ *
+ * (2) in half duplex operation, the expectation is that collisions will appear
+ * as either MINERR o r CAREXT errors.
+ *
+ * (3) JABBER An RX jabber error indicates that a packet was received which
+ * is longer than the maximum allowed packet as defined by the
+ * system. GMI will truncate the packet at the JABBER count.
+ * Failure to do so could lead to system instabilty.
+ *
+ * (4) NIBERR This error is illegal at 1000Mbs speeds
+ * (BGX()_GMP_GMI_PRT()_CFG[SPEED]==0) and will never assert.
+ *
+ * (5) MINERR total frame DA+SA+TL+DATA+PAD+FCS \< 64
+ *
+ * (6) ALNERR Indicates that the packet received was not an integer number of
+ * bytes. If FCS checking is enabled, ALNERR will only assert if
+ * the FCS is bad. If FCS checking is disabled, ALNERR will
+ * assert in all non-integer frame cases.
+ *
+ * (7) Collisions Collisions can only occur in half-duplex mode. A collision
+ * is assumed by the receiver when the slottime
+ * (BGX()_GMP_GMI_PRT()_CFG[SLOTTIME]) is not satisfied. In 10/100 mode,
+ * this will result in a frame \< SLOTTIME. In 1000 mode, it
+ * could result either in frame \< SLOTTIME or a carrier extend
+ * error with the SLOTTIME. These conditions are visible by...
+ * . transfer ended before slottime COLDET
+ * . carrier extend error CAREXT
+ *
+ * (A) LENERR Length errors occur when the received packet does not match the
+ * length field. LENERR is only checked for packets between 64
+ * and 1500 bytes. For untagged frames, the length must exact
+ * match. For tagged frames the length or length+4 must match.
+ *
+ * (B) PCTERR checks that the frame begins with a valid PREAMBLE sequence.
+ * Does not check the number of PREAMBLE cycles.
+ *
+ * (C) OVRERR *DON'T PUT IN HRM*
+ * OVRERR is an architectural assertion check internal to GMI to
+ * make sure no assumption was violated. In a correctly operating
+ * system, this interrupt can never fire.
+ * GMI has an internal arbiter which selects which of four ports to
+ * buffer in the main RX FIFO. If we normally buffer eight bytes,
+ * then each port will typically push a tick every eight cycles if
+ * the packet interface is going as fast as possible. If there
+ * are four ports, they push every two cycles. So that's the
+ * assumption. That the inbound module will always be able to
+ * consume the tick before another is produced. If that doesn't
+ * happen that's when OVRERR will assert."
+ */
+union bdk_bgxx_gmp_gmi_rxx_int
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_rxx_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1C/H) Interframe gap violation. Does not necessarily indicate a failure. SGMII/QSGMII/1000Base-X only. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1C/H) Collision detection. Collisions can only occur in half-duplex mode. A collision is assumed
+ by the receiver when the slottime (BGX()_GMP_GMI_PRT()_CFG[SLOTTIME]) is not
+ satisfied. In 10/100 mode, this will result in a frame \< SLOTTIME. In 1000 mode, it could
+ result either in frame \< SLOTTIME or a carrier extend error with the SLOTTIME. These
+ conditions are visible by 1) transfer ended before slottime - COLDET or 2) carrier extend
+ error - CAREXT. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1C/H) False-carrier error, or carrier-extend error after slottime is satisfied.
+ SGMII/QSGMII/1000Base-X only. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1C/H) Detected reserved opcode. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1C/H) Bad preamble/protocol error. Checks that the frame begins with a valid PREAMBLE sequence.
+ Does not check the number of PREAMBLE cycles. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1C/H) Internal data aggregation overflow. This interrupt should never assert.
+ SGMII/QSGMII/1000Base-X only. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1C/H) Skipper error. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1C/H) Data-reception error. Frame was received with data-reception error. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1C/H) FCS/CRC error. Frame was received with FCS/CRC error. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1C/H) System-length error: frame was received with length \> sys_length.
+ An RX Jabber error indicates that a packet was received which is longer than the maximum
+ allowed packet as defined by the system. GMI truncates the packet at the JABBER count.
+ Failure to do so could lead to system instability. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1C/H) Carrier-extend error. (SGMII/QSGMII/1000Base-X only) */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1C/H) PAUSE frame was received with length \< minFrameSize. Frame length checks are typically
+ handled in NIC, but PAUSE frames are normally discarded before being inspected by NIC.
+ Total frame DA+SA+TL+DATA+PAD+FCS \< 64. */
+#else /* Word 0 - Little Endian */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1C/H) PAUSE frame was received with length \< minFrameSize. Frame length checks are typically
+ handled in NIC, but PAUSE frames are normally discarded before being inspected by NIC.
+ Total frame DA+SA+TL+DATA+PAD+FCS \< 64. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1C/H) Carrier-extend error. (SGMII/QSGMII/1000Base-X only) */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1C/H) System-length error: frame was received with length \> sys_length.
+ An RX Jabber error indicates that a packet was received which is longer than the maximum
+ allowed packet as defined by the system. GMI truncates the packet at the JABBER count.
+ Failure to do so could lead to system instability. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1C/H) FCS/CRC error. Frame was received with FCS/CRC error. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1C/H) Data-reception error. Frame was received with data-reception error. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1C/H) Skipper error. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1C/H) Internal data aggregation overflow. This interrupt should never assert.
+ SGMII/QSGMII/1000Base-X only. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1C/H) Bad preamble/protocol error. Checks that the frame begins with a valid PREAMBLE sequence.
+ Does not check the number of PREAMBLE cycles. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1C/H) Detected reserved opcode. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1C/H) False-carrier error, or carrier-extend error after slottime is satisfied.
+ SGMII/QSGMII/1000Base-X only. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1C/H) Collision detection. Collisions can only occur in half-duplex mode. A collision is assumed
+ by the receiver when the slottime (BGX()_GMP_GMI_PRT()_CFG[SLOTTIME]) is not
+ satisfied. In 10/100 mode, this will result in a frame \< SLOTTIME. In 1000 mode, it could
+ result either in frame \< SLOTTIME or a carrier extend error with the SLOTTIME. These
+ conditions are visible by 1) transfer ended before slottime - COLDET or 2) carrier extend
+ error - CAREXT. */
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1C/H) Interframe gap violation. Does not necessarily indicate a failure. SGMII/QSGMII/1000Base-X only. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_rxx_int_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_rxx_int_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1C/H) Interframe gap violation. Does not necessarily indicate a failure. SGMII/1000Base-X only. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1C/H) Collision detection. Collisions can only occur in half-duplex mode. A collision is assumed
+ by the receiver when the slottime (BGX()_GMP_GMI_PRT()_CFG[SLOTTIME]) is not
+ satisfied. In 10/100 mode, this will result in a frame \< SLOTTIME. In 1000 mode, it could
+ result either in frame \< SLOTTIME or a carrier extend error with the SLOTTIME. These
+ conditions are visible by 1) transfer ended before slottime - COLDET or 2) carrier extend
+ error - CAREXT. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1C/H) False-carrier error, or carrier-extend error after slottime is satisfied. SGMII/1000Base-X only. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1C/H) Detected reserved opcode. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1C/H) Bad preamble/protocol error. Checks that the frame begins with a valid PREAMBLE sequence.
+ Does not check the number of PREAMBLE cycles. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1C/H) Internal data aggregation overflow. This interrupt should never assert. SGMII/1000Base-X only. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1C/H) Skipper error. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1C/H) Data-reception error. Frame was received with data-reception error. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1C/H) FCS/CRC error. Frame was received with FCS/CRC error. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1C/H) System-length error: frame was received with length \> sys_length.
+ An RX Jabber error indicates that a packet was received which is longer than the maximum
+ allowed packet as defined by the system. GMI truncates the packet at the JABBER count.
+ Failure to do so could lead to system instability. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1C/H) Carrier-extend error. (SGMII/1000Base-X only) */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1C/H) PAUSE frame was received with length \< minFrameSize. Frame length checks are typically
+ handled in NIC, but PAUSE frames are normally discarded before being inspected by NIC.
+ Total frame DA+SA+TL+DATA+PAD+FCS \< 64. */
+#else /* Word 0 - Little Endian */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1C/H) PAUSE frame was received with length \< minFrameSize. Frame length checks are typically
+ handled in NIC, but PAUSE frames are normally discarded before being inspected by NIC.
+ Total frame DA+SA+TL+DATA+PAD+FCS \< 64. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1C/H) Carrier-extend error. (SGMII/1000Base-X only) */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1C/H) System-length error: frame was received with length \> sys_length.
+ An RX Jabber error indicates that a packet was received which is longer than the maximum
+ allowed packet as defined by the system. GMI truncates the packet at the JABBER count.
+ Failure to do so could lead to system instability. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1C/H) FCS/CRC error. Frame was received with FCS/CRC error. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1C/H) Data-reception error. Frame was received with data-reception error. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1C/H) Skipper error. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1C/H) Internal data aggregation overflow. This interrupt should never assert. SGMII/1000Base-X only. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1C/H) Bad preamble/protocol error. Checks that the frame begins with a valid PREAMBLE sequence.
+ Does not check the number of PREAMBLE cycles. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1C/H) Detected reserved opcode. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1C/H) False-carrier error, or carrier-extend error after slottime is satisfied. SGMII/1000Base-X only. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1C/H) Collision detection. Collisions can only occur in half-duplex mode. A collision is assumed
+ by the receiver when the slottime (BGX()_GMP_GMI_PRT()_CFG[SLOTTIME]) is not
+ satisfied. In 10/100 mode, this will result in a frame \< SLOTTIME. In 1000 mode, it could
+ result either in frame \< SLOTTIME or a carrier extend error with the SLOTTIME. These
+ conditions are visible by 1) transfer ended before slottime - COLDET or 2) carrier extend
+ error - CAREXT. */
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1C/H) Interframe gap violation. Does not necessarily indicate a failure. SGMII/1000Base-X only. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_rxx_int_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_rxx_int bdk_bgxx_gmp_gmi_rxx_int_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_INT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_INT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038000ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038000ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038000ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_RXX_INT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_RXX_INT(a,b) bdk_bgxx_gmp_gmi_rxx_int_t
+#define bustype_BDK_BGXX_GMP_GMI_RXX_INT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_RXX_INT(a,b) "BGXX_GMP_GMI_RXX_INT"
+#define device_bar_BDK_BGXX_GMP_GMI_RXX_INT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_RXX_INT(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_RXX_INT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_rx#_int_ena_w1c
+ *
+ * BGX GMP GMI RX Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_bgxx_gmp_gmi_rxx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_rxx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_rxx_int_ena_w1c_s cn81xx; */
+ /* struct bdk_bgxx_gmp_gmi_rxx_int_ena_w1c_s cn88xx; */
+ struct bdk_bgxx_gmp_gmi_rxx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_gmp_gmi_rxx_int_ena_w1c bdk_bgxx_gmp_gmi_rxx_int_ena_w1c_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038010ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038010ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038010ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_RXX_INT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1C(a,b) bdk_bgxx_gmp_gmi_rxx_int_ena_w1c_t
+#define bustype_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1C(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1C(a,b) "BGXX_GMP_GMI_RXX_INT_ENA_W1C"
+#define device_bar_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1C(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_rx#_int_ena_w1s
+ *
+ * BGX GMP GMI RX Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_bgxx_gmp_gmi_rxx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_rxx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_rxx_int_ena_w1s_s cn81xx; */
+ /* struct bdk_bgxx_gmp_gmi_rxx_int_ena_w1s_s cn88xx; */
+ struct bdk_bgxx_gmp_gmi_rxx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_gmp_gmi_rxx_int_ena_w1s bdk_bgxx_gmp_gmi_rxx_int_ena_w1s_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038018ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038018ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038018ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_RXX_INT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1S(a,b) bdk_bgxx_gmp_gmi_rxx_int_ena_w1s_t
+#define bustype_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1S(a,b) "BGXX_GMP_GMI_RXX_INT_ENA_W1S"
+#define device_bar_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1S(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_RXX_INT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_rx#_int_w1s
+ *
+ * BGX GMP GMI RX Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_bgxx_gmp_gmi_rxx_int_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_rxx_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_rxx_int_w1s_s cn81xx; */
+ /* struct bdk_bgxx_gmp_gmi_rxx_int_w1s_s cn88xx; */
+ struct bdk_bgxx_gmp_gmi_rxx_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t minerr : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[MINERR]. */
+ uint64_t carext : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[CAREXT]. */
+ uint64_t jabber : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[SKPERR]. */
+ uint64_t ovrerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[OVRERR]. */
+ uint64_t pcterr : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[RSVERR]. */
+ uint64_t falerr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[FALERR]. */
+ uint64_t coldet : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[COLDET]. */
+ uint64_t ifgerr : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_RX(0..3)_INT[IFGERR]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_gmp_gmi_rxx_int_w1s bdk_bgxx_gmp_gmi_rxx_int_w1s_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_INT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_INT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038008ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038008ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038008ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_RXX_INT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_RXX_INT_W1S(a,b) bdk_bgxx_gmp_gmi_rxx_int_w1s_t
+#define bustype_BDK_BGXX_GMP_GMI_RXX_INT_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_RXX_INT_W1S(a,b) "BGXX_GMP_GMI_RXX_INT_W1S"
+#define device_bar_BDK_BGXX_GMP_GMI_RXX_INT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_RXX_INT_W1S(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_RXX_INT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_rx#_jabber
+ *
+ * BGX GMP Maximum Packet-Size Registers
+ * This register specifies the maximum size for packets, beyond which the GMI truncates.
+ */
+union bdk_bgxx_gmp_gmi_rxx_jabber
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_rxx_jabber_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt : 16; /**< [ 15: 0](R/W) Byte count for jabber check. Failing packets set the JABBER interrupt and are optionally
+ sent with opcode = JABBER. GMI truncates the packet to [CNT] bytes.
+ [CNT] must be 8-byte aligned such that CNT\<2:0\> = 000. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 16; /**< [ 15: 0](R/W) Byte count for jabber check. Failing packets set the JABBER interrupt and are optionally
+ sent with opcode = JABBER. GMI truncates the packet to [CNT] bytes.
+ [CNT] must be 8-byte aligned such that CNT\<2:0\> = 000. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_rxx_jabber_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_rxx_jabber bdk_bgxx_gmp_gmi_rxx_jabber_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_JABBER(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_JABBER(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038038ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038038ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038038ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_RXX_JABBER", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_RXX_JABBER(a,b) bdk_bgxx_gmp_gmi_rxx_jabber_t
+#define bustype_BDK_BGXX_GMP_GMI_RXX_JABBER(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_RXX_JABBER(a,b) "BGXX_GMP_GMI_RXX_JABBER"
+#define device_bar_BDK_BGXX_GMP_GMI_RXX_JABBER(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_RXX_JABBER(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_RXX_JABBER(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_rx#_udd_skp
+ *
+ * BGX GMP GMI User-Defined Data Skip Registers
+ * This register specifies the amount of user-defined data (UDD) added before the start of the
+ * L2C data.
+ *
+ * Internal:
+ * Notes:
+ * (1) The skip bytes are part of the packet and will be handled by NIC.
+ *
+ * (2) The system can determine if the UDD bytes are included in the FCS check
+ * by using the FCSSEL field - if the FCS check is enabled.
+ *
+ * (3) Assume that the preamble/sfd is always at the start of the frame - even
+ * before UDD bytes. In most cases, there will be no preamble in these
+ * cases since it will be packet interface in direct communication to
+ * another packet interface (MAC to MAC) without a PHY involved.
+ *
+ * (4) We can still do address filtering and control packet filtering is the
+ * user desires.
+ *
+ * (5) BGX()_GMP_GMI_RX()_UDD_SKP[LEN] must be 0 in half-duplex operation unless
+ * BGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is clear. If BGX()_GMP_GMI_RX()_FRM_CTL[PRE_CHK] is
+ * clear,
+ * then BGX()_GMP_GMI_RX()_UDD_SKP[LEN] will normally be 8.
+ *
+ * (6) In all cases, the UDD bytes will be sent down the packet interface as
+ * part of the packet. The UDD bytes are never stripped from the actual
+ * packet.
+ */
+union bdk_bgxx_gmp_gmi_rxx_udd_skp
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_rxx_udd_skp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t fcssel : 1; /**< [ 8: 8](R/W) Include the skip bytes in the FCS calculation.
+ 0 = All skip bytes are included in FCS.
+ 1 = The skip bytes are not included in FCS.
+
+ The skip bytes are part of the packet and are
+ handled by NIC. The system can determine if the UDD bytes are included in the FCS check by
+ using [FCSSEL], if the FCS check is enabled. */
+ uint64_t reserved_7 : 1;
+ uint64_t len : 7; /**< [ 6: 0](R/W) Amount of user-defined data before the start of the L2C data, in bytes.
+ Setting to 0 means L2C comes first; maximum value is 64.
+ LEN must be 0x0 in half-duplex operation.
+
+ If LEN != 0, then BGX()_GMP_GMI_RX()_FRM_CHK[MINERR] will be disabled and
+ BGX()_GMP_GMI_RX()_INT[MINERR] will be zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t len : 7; /**< [ 6: 0](R/W) Amount of user-defined data before the start of the L2C data, in bytes.
+ Setting to 0 means L2C comes first; maximum value is 64.
+ LEN must be 0x0 in half-duplex operation.
+
+ If LEN != 0, then BGX()_GMP_GMI_RX()_FRM_CHK[MINERR] will be disabled and
+ BGX()_GMP_GMI_RX()_INT[MINERR] will be zero. */
+ uint64_t reserved_7 : 1;
+ uint64_t fcssel : 1; /**< [ 8: 8](R/W) Include the skip bytes in the FCS calculation.
+ 0 = All skip bytes are included in FCS.
+ 1 = The skip bytes are not included in FCS.
+
+ The skip bytes are part of the packet and are
+ handled by NIC. The system can determine if the UDD bytes are included in the FCS check by
+ using [FCSSEL], if the FCS check is enabled. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_rxx_udd_skp_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_rxx_udd_skp bdk_bgxx_gmp_gmi_rxx_udd_skp_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_UDD_SKP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_RXX_UDD_SKP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038048ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038048ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038048ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_RXX_UDD_SKP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_RXX_UDD_SKP(a,b) bdk_bgxx_gmp_gmi_rxx_udd_skp_t
+#define bustype_BDK_BGXX_GMP_GMI_RXX_UDD_SKP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_RXX_UDD_SKP(a,b) "BGXX_GMP_GMI_RXX_UDD_SKP"
+#define device_bar_BDK_BGXX_GMP_GMI_RXX_UDD_SKP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_RXX_UDD_SKP(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_RXX_UDD_SKP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_smac#
+ *
+ * BGX GMI SMAC Registers
+ */
+union bdk_bgxx_gmp_gmi_smacx
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_smacx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t smac : 48; /**< [ 47: 0](R/W) The SMAC field is used for generating and accepting control PAUSE packets. */
+#else /* Word 0 - Little Endian */
+ uint64_t smac : 48; /**< [ 47: 0](R/W) The SMAC field is used for generating and accepting control PAUSE packets. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_smacx_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_smacx bdk_bgxx_gmp_gmi_smacx_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_SMACX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_SMACX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038230ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038230ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038230ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_SMACX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_SMACX(a,b) bdk_bgxx_gmp_gmi_smacx_t
+#define bustype_BDK_BGXX_GMP_GMI_SMACX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_SMACX(a,b) "BGXX_GMP_GMI_SMACX"
+#define device_bar_BDK_BGXX_GMP_GMI_SMACX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_SMACX(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_SMACX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_append
+ *
+ * BGX GMI TX Append Control Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_append
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_append_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t force_fcs : 1; /**< [ 3: 3](R/W) Append the Ethernet FCS on each PAUSE packet. */
+ uint64_t fcs : 1; /**< [ 2: 2](R/W) Append the Ethernet FCS on each packet. */
+ uint64_t pad : 1; /**< [ 1: 1](R/W) Append PAD bytes such that minimum-sized packet is transmitted. */
+ uint64_t preamble : 1; /**< [ 0: 0](R/W) Prepend the Ethernet preamble on each transfer. */
+#else /* Word 0 - Little Endian */
+ uint64_t preamble : 1; /**< [ 0: 0](R/W) Prepend the Ethernet preamble on each transfer. */
+ uint64_t pad : 1; /**< [ 1: 1](R/W) Append PAD bytes such that minimum-sized packet is transmitted. */
+ uint64_t fcs : 1; /**< [ 2: 2](R/W) Append the Ethernet FCS on each packet. */
+ uint64_t force_fcs : 1; /**< [ 3: 3](R/W) Append the Ethernet FCS on each PAUSE packet. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_append_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_append bdk_bgxx_gmp_gmi_txx_append_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_APPEND(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_APPEND(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038218ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038218ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038218ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_APPEND", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_APPEND(a,b) bdk_bgxx_gmp_gmi_txx_append_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_APPEND(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_APPEND(a,b) "BGXX_GMP_GMI_TXX_APPEND"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_APPEND(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_APPEND(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_APPEND(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_burst
+ *
+ * BGX GMI TX Burst-Counter Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_burst
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_burst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t burst : 16; /**< [ 15: 0](R/W) Burst (refer to 802.3 to set correctly). Only valid for 1000Mb/s half-duplex operation as
+ follows:
+ half duplex/1000Mb/s: 0x2000
+ all other modes: 0x0
+ SGMII/QSGMII/1000Base-X only. */
+#else /* Word 0 - Little Endian */
+ uint64_t burst : 16; /**< [ 15: 0](R/W) Burst (refer to 802.3 to set correctly). Only valid for 1000Mb/s half-duplex operation as
+ follows:
+ half duplex/1000Mb/s: 0x2000
+ all other modes: 0x0
+ SGMII/QSGMII/1000Base-X only. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_burst_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_txx_burst_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t burst : 16; /**< [ 15: 0](R/W) Burst (refer to 802.3 to set correctly). Only valid for 1000Mb/s half-duplex operation as
+ follows:
+ half duplex/1000Mb/s: 0x2000
+ all other modes: 0x0
+ SGMII/1000Base-X only. */
+#else /* Word 0 - Little Endian */
+ uint64_t burst : 16; /**< [ 15: 0](R/W) Burst (refer to 802.3 to set correctly). Only valid for 1000Mb/s half-duplex operation as
+ follows:
+ half duplex/1000Mb/s: 0x2000
+ all other modes: 0x0
+ SGMII/1000Base-X only. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_txx_burst_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_burst bdk_bgxx_gmp_gmi_txx_burst_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_BURST(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_BURST(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038228ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038228ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038228ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_BURST", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_BURST(a,b) bdk_bgxx_gmp_gmi_txx_burst_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_BURST(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_BURST(a,b) "BGXX_GMP_GMI_TXX_BURST"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_BURST(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_BURST(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_BURST(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_ctl
+ *
+ * BGX GMI Transmit Control Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t link_drain : 1; /**< [ 3: 3](R/W) Enable dropping of full packets to allow BGX and PKO/NIC to drain their FIFOs.
+ For diagnostic use only. */
+ uint64_t tx_fc_type : 1; /**< [ 2: 2](R/W) Transmit side flow control type select.
+ 0 = GMI MAC transmits ITU G.999.1 pause frames.
+ 1 = GMI MAC transmits 802.3 pause frames. */
+ uint64_t xsdef_en : 1; /**< [ 1: 1](R/W) Enables the excessive-deferral check for statistics and interrupts. SGMII/1000Base-X half-
+ duplex only. */
+ uint64_t xscol_en : 1; /**< [ 0: 0](R/W) Enables the excessive-collision check for statistics and interrupts. SGMII/1000Base-X
+ half-duplex only. */
+#else /* Word 0 - Little Endian */
+ uint64_t xscol_en : 1; /**< [ 0: 0](R/W) Enables the excessive-collision check for statistics and interrupts. SGMII/1000Base-X
+ half-duplex only. */
+ uint64_t xsdef_en : 1; /**< [ 1: 1](R/W) Enables the excessive-deferral check for statistics and interrupts. SGMII/1000Base-X half-
+ duplex only. */
+ uint64_t tx_fc_type : 1; /**< [ 2: 2](R/W) Transmit side flow control type select.
+ 0 = GMI MAC transmits ITU G.999.1 pause frames.
+ 1 = GMI MAC transmits 802.3 pause frames. */
+ uint64_t link_drain : 1; /**< [ 3: 3](R/W) Enable dropping of full packets to allow BGX and PKO/NIC to drain their FIFOs.
+ For diagnostic use only. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_gmp_gmi_txx_ctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t tx_fc_type : 1; /**< [ 2: 2](R/W) Transmit side flow control type select.
+ 0 = GMI MAC transmits ITU G.999.1 pause frames.
+ 1 = GMI MAC transmits 802.3 pause frames. */
+ uint64_t xsdef_en : 1; /**< [ 1: 1](R/W) Enables the excessive-deferral check for statistics and interrupts. SGMII/1000Base-X half-
+ duplex only. */
+ uint64_t xscol_en : 1; /**< [ 0: 0](R/W) Enables the excessive-collision check for statistics and interrupts. SGMII/1000Base-X
+ half-duplex only. */
+#else /* Word 0 - Little Endian */
+ uint64_t xscol_en : 1; /**< [ 0: 0](R/W) Enables the excessive-collision check for statistics and interrupts. SGMII/1000Base-X
+ half-duplex only. */
+ uint64_t xsdef_en : 1; /**< [ 1: 1](R/W) Enables the excessive-deferral check for statistics and interrupts. SGMII/1000Base-X half-
+ duplex only. */
+ uint64_t tx_fc_type : 1; /**< [ 2: 2](R/W) Transmit side flow control type select.
+ 0 = GMI MAC transmits ITU G.999.1 pause frames.
+ 1 = GMI MAC transmits 802.3 pause frames. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_bgxx_gmp_gmi_txx_ctl_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t xsdef_en : 1; /**< [ 1: 1](R/W) Enables the excessive-deferral check for statistics and interrupts. SGMII/1000Base-X half-
+ duplex only. */
+ uint64_t xscol_en : 1; /**< [ 0: 0](R/W) Enables the excessive-collision check for statistics and interrupts. SGMII/1000Base-X
+ half-duplex only. */
+#else /* Word 0 - Little Endian */
+ uint64_t xscol_en : 1; /**< [ 0: 0](R/W) Enables the excessive-collision check for statistics and interrupts. SGMII/1000Base-X
+ half-duplex only. */
+ uint64_t xsdef_en : 1; /**< [ 1: 1](R/W) Enables the excessive-deferral check for statistics and interrupts. SGMII/1000Base-X half-
+ duplex only. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_txx_ctl_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_ctl bdk_bgxx_gmp_gmi_txx_ctl_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038270ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038270ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038270ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_CTL(a,b) bdk_bgxx_gmp_gmi_txx_ctl_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_CTL(a,b) "BGXX_GMP_GMI_TXX_CTL"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_CTL(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_int
+ *
+ * BGX GMI TX Interrupt Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_int
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1C/H) A packet with a PTP request was not able to be sent due to XSCOL. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1C/H) TX late collision. (SGMII/1000BASE-X half-duplex only) */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1C/H) TX excessive deferral. (SGMII/1000BASE-X half-duplex only) */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1C/H) TX excessive collisions. (SGMII/1000BASE-X half-duplex only) */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) TX underflow. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) TX underflow. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1C/H) TX excessive collisions. (SGMII/1000BASE-X half-duplex only) */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1C/H) TX excessive deferral. (SGMII/1000BASE-X half-duplex only) */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1C/H) TX late collision. (SGMII/1000BASE-X half-duplex only) */
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1C/H) A packet with a PTP request was not able to be sent due to XSCOL. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_int_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_int bdk_bgxx_gmp_gmi_txx_int_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_INT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_INT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038500ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038500ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038500ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_INT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_INT(a,b) bdk_bgxx_gmp_gmi_txx_int_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_INT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_INT(a,b) "BGXX_GMP_GMI_TXX_INT"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_INT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_INT(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_INT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_int_ena_w1c
+ *
+ * BGX GMI TX Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_bgxx_gmp_gmi_txx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_int_ena_w1c_s cn81xx; */
+ /* struct bdk_bgxx_gmp_gmi_txx_int_ena_w1c_s cn88xx; */
+ struct bdk_bgxx_gmp_gmi_txx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_gmp_gmi_txx_int_ena_w1c bdk_bgxx_gmp_gmi_txx_int_ena_w1c_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038510ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038510ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038510ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_INT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1C(a,b) bdk_bgxx_gmp_gmi_txx_int_ena_w1c_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1C(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1C(a,b) "BGXX_GMP_GMI_TXX_INT_ENA_W1C"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1C(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_int_ena_w1s
+ *
+ * BGX GMI TX Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_bgxx_gmp_gmi_txx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_int_ena_w1s_s cn81xx; */
+ /* struct bdk_bgxx_gmp_gmi_txx_int_ena_w1s_s cn88xx; */
+ struct bdk_bgxx_gmp_gmi_txx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_gmp_gmi_txx_int_ena_w1s bdk_bgxx_gmp_gmi_txx_int_ena_w1s_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038518ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038518ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038518ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_INT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1S(a,b) bdk_bgxx_gmp_gmi_txx_int_ena_w1s_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1S(a,b) "BGXX_GMP_GMI_TXX_INT_ENA_W1S"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1S(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_INT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_int_w1s
+ *
+ * BGX GMI TX Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_bgxx_gmp_gmi_txx_int_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_int_w1s_s cn81xx; */
+ /* struct bdk_bgxx_gmp_gmi_txx_int_w1s_s cn88xx; */
+ struct bdk_bgxx_gmp_gmi_txx_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_TX(0..3)_INT[UNDFLW]. */
+ uint64_t xscol : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSCOL]. */
+ uint64_t xsdef : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_TX(0..3)_INT[XSDEF]. */
+ uint64_t late_col : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_TX(0..3)_INT[LATE_COL]. */
+ uint64_t ptp_lost : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_GMP_GMI_TX(0..3)_INT[PTP_LOST]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_gmp_gmi_txx_int_w1s bdk_bgxx_gmp_gmi_txx_int_w1s_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_INT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_INT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038508ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038508ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038508ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_INT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_INT_W1S(a,b) bdk_bgxx_gmp_gmi_txx_int_w1s_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_INT_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_INT_W1S(a,b) "BGXX_GMP_GMI_TXX_INT_W1S"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_INT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_INT_W1S(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_INT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_min_pkt
+ *
+ * BGX GMI TX Minimum-Size-Packet Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_min_pkt
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_min_pkt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t min_size : 8; /**< [ 7: 0](R/W) Minimum frame size in bytes before the FCS is applied.
+ Padding is only appended when BGX()_GMP_GMI_TX()_APPEND[PAD] for the corresponding
+ LMAC is set.
+
+ When LMAC_TYPE=SGMII/QSGMII, packets are padded to [MIN_SIZE]+1. The reset value pads to
+ 60
+ bytes. */
+#else /* Word 0 - Little Endian */
+ uint64_t min_size : 8; /**< [ 7: 0](R/W) Minimum frame size in bytes before the FCS is applied.
+ Padding is only appended when BGX()_GMP_GMI_TX()_APPEND[PAD] for the corresponding
+ LMAC is set.
+
+ When LMAC_TYPE=SGMII/QSGMII, packets are padded to [MIN_SIZE]+1. The reset value pads to
+ 60
+ bytes. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_min_pkt_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_txx_min_pkt_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t min_size : 8; /**< [ 7: 0](R/W) Minimum frame size in bytes before the FCS is applied.
+ Padding is only appended when BGX()_GMP_GMI_TX()_APPEND[PAD] for the corresponding
+ LMAC is set.
+
+ In SGMII mode, packets are padded to [MIN_SIZE]+1. The reset value pads to 60 bytes. */
+#else /* Word 0 - Little Endian */
+ uint64_t min_size : 8; /**< [ 7: 0](R/W) Minimum frame size in bytes before the FCS is applied.
+ Padding is only appended when BGX()_GMP_GMI_TX()_APPEND[PAD] for the corresponding
+ LMAC is set.
+
+ In SGMII mode, packets are padded to [MIN_SIZE]+1. The reset value pads to 60 bytes. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_txx_min_pkt_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_min_pkt bdk_bgxx_gmp_gmi_txx_min_pkt_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_MIN_PKT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_MIN_PKT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038240ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038240ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038240ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_MIN_PKT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_MIN_PKT(a,b) bdk_bgxx_gmp_gmi_txx_min_pkt_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_MIN_PKT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_MIN_PKT(a,b) "BGXX_GMP_GMI_TXX_MIN_PKT"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_MIN_PKT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_MIN_PKT(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_MIN_PKT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_pause_pkt_interval
+ *
+ * BGX GMI TX PAUSE-Packet Transmission-Interval Registers
+ * This register specifies how often PAUSE packets are sent.
+ * Internal:
+ * Notes:
+ * Choosing proper values of BGX()_GMP_GMI_TX()_PAUSE_PKT_TIME[PTIME] and
+ * BGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL[INTERVAL] can be challenging to the system
+ * designer. It is suggested that TIME be much greater than INTERVAL and
+ * BGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND] be set. This allows a periodic refresh of the PAUSE
+ * count and then when the backpressure condition is lifted, a PAUSE packet
+ * with TIME==0 will be sent indicating that Octane is ready for additional
+ * data.
+ *
+ * If the system chooses to not set BGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND], then it is
+ * suggested that TIME and INTERVAL are programmed such that they satisify the
+ * following rule:
+ *
+ * _ INTERVAL \<= TIME - (largest_pkt_size + IFG + pause_pkt_size)
+ *
+ * where largest_pkt_size is that largest packet that the system can send
+ * (normally 1518B), IFG is the interframe gap and pause_pkt_size is the size
+ * of the PAUSE packet (normally 64B).
+ */
+union bdk_bgxx_gmp_gmi_txx_pause_pkt_interval
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_pause_pkt_interval_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t interval : 16; /**< [ 15: 0](R/W) Arbitrate for a 802.3 PAUSE packet every ([INTERVAL] * 512)
+ bit-times. Normally, 0 \< [INTERVAL] \< BGX()_GMP_GMI_TX()_PAUSE_PKT_TIME[PTIME].
+
+ [INTERVAL] = 0 only sends a single PAUSE packet for each backpressure event.
+ BGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND] must be 1 when [INTERVAL] = 0.
+ [INTERVAL] should be 0x0 if BGX()_GMP_GMI_TX()_CTL[TX_FC_TYPE] is clear (G.999.1). */
+#else /* Word 0 - Little Endian */
+ uint64_t interval : 16; /**< [ 15: 0](R/W) Arbitrate for a 802.3 PAUSE packet every ([INTERVAL] * 512)
+ bit-times. Normally, 0 \< [INTERVAL] \< BGX()_GMP_GMI_TX()_PAUSE_PKT_TIME[PTIME].
+
+ [INTERVAL] = 0 only sends a single PAUSE packet for each backpressure event.
+ BGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND] must be 1 when [INTERVAL] = 0.
+ [INTERVAL] should be 0x0 if BGX()_GMP_GMI_TX()_CTL[TX_FC_TYPE] is clear (G.999.1). */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_pause_pkt_interval_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_txx_pause_pkt_interval_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t interval : 16; /**< [ 15: 0](R/W) Arbitrate for a 802.3 PAUSE packet every ([INTERVAL] * 512)
+ bit-times. Normally, 0 \< [INTERVAL] \< BGX()_GMP_GMI_TX()_PAUSE_PKT_TIME[PTIME].
+
+ [INTERVAL] = 0 only sends a single PAUSE packet for each backpressure event.
+ BGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND] must be 1 when [INTERVAL] = 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t interval : 16; /**< [ 15: 0](R/W) Arbitrate for a 802.3 PAUSE packet every ([INTERVAL] * 512)
+ bit-times. Normally, 0 \< [INTERVAL] \< BGX()_GMP_GMI_TX()_PAUSE_PKT_TIME[PTIME].
+
+ [INTERVAL] = 0 only sends a single PAUSE packet for each backpressure event.
+ BGX()_GMP_GMI_TX()_PAUSE_ZERO[SEND] must be 1 when [INTERVAL] = 0. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_txx_pause_pkt_interval_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_pause_pkt_interval bdk_bgxx_gmp_gmi_txx_pause_pkt_interval_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038248ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038248ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038248ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(a,b) bdk_bgxx_gmp_gmi_txx_pause_pkt_interval_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(a,b) "BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_INTERVAL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_pause_pkt_time
+ *
+ * BGX GMI TX PAUSE Packet PAUSE-Time Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_pause_pkt_time
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_pause_pkt_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptime : 16; /**< [ 15: 0](R/W) Provides the pause_time field placed in outbound 802.3 PAUSE packets
+ in 512 bit-times. Normally, [PTIME] \>
+ BGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL[INTERVAL]. For programming information see
+ BGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL.
+ [PTIME] should be 0x0 if BGX()_GMP_GMI_TX()_CTL[TX_FC_TYPE] is clear (G.999.1). */
+#else /* Word 0 - Little Endian */
+ uint64_t ptime : 16; /**< [ 15: 0](R/W) Provides the pause_time field placed in outbound 802.3 PAUSE packets
+ in 512 bit-times. Normally, [PTIME] \>
+ BGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL[INTERVAL]. For programming information see
+ BGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL.
+ [PTIME] should be 0x0 if BGX()_GMP_GMI_TX()_CTL[TX_FC_TYPE] is clear (G.999.1). */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_pause_pkt_time_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_txx_pause_pkt_time_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptime : 16; /**< [ 15: 0](R/W) Provides the pause_time field placed in outbound 802.3 PAUSE packets
+ in 512 bit-times. Normally, [PTIME] \>
+ BGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL[INTERVAL]. For programming information see
+ BGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL. */
+#else /* Word 0 - Little Endian */
+ uint64_t ptime : 16; /**< [ 15: 0](R/W) Provides the pause_time field placed in outbound 802.3 PAUSE packets
+ in 512 bit-times. Normally, [PTIME] \>
+ BGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL[INTERVAL]. For programming information see
+ BGX()_GMP_GMI_TX()_PAUSE_PKT_INTERVAL. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_txx_pause_pkt_time_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_pause_pkt_time bdk_bgxx_gmp_gmi_txx_pause_pkt_time_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038238ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038238ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038238ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(a,b) bdk_bgxx_gmp_gmi_txx_pause_pkt_time_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(a,b) "BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_PAUSE_PKT_TIME(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_pause_togo
+ *
+ * BGX GMI TX Time-to-Backpressure Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_pause_togo
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_pause_togo_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptime : 16; /**< [ 15: 0](RO/H) Amount of time remaining to backpressure, from the standard 802.3 PAUSE timer. */
+#else /* Word 0 - Little Endian */
+ uint64_t ptime : 16; /**< [ 15: 0](RO/H) Amount of time remaining to backpressure, from the standard 802.3 PAUSE timer. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_pause_togo_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_pause_togo bdk_bgxx_gmp_gmi_txx_pause_togo_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_PAUSE_TOGO(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_PAUSE_TOGO(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038258ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038258ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038258ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_PAUSE_TOGO", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_PAUSE_TOGO(a,b) bdk_bgxx_gmp_gmi_txx_pause_togo_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_PAUSE_TOGO(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_PAUSE_TOGO(a,b) "BGXX_GMP_GMI_TXX_PAUSE_TOGO"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_PAUSE_TOGO(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_PAUSE_TOGO(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_PAUSE_TOGO(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_pause_zero
+ *
+ * BGX GMI TX PAUSE-Zero-Enable Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_pause_zero
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_pause_zero_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t send : 1; /**< [ 0: 0](R/W) Send PAUSE-zero enable. When this bit is set, and the backpressure condition is clear, it
+ allows sending a PAUSE packet with pause_time of 0 to enable the channel.
+ [SEND] should be set if BGX()_GMP_GMI_TX()_CTL[TX_FC_TYPE] is clear (G.999.1). */
+#else /* Word 0 - Little Endian */
+ uint64_t send : 1; /**< [ 0: 0](R/W) Send PAUSE-zero enable. When this bit is set, and the backpressure condition is clear, it
+ allows sending a PAUSE packet with pause_time of 0 to enable the channel.
+ [SEND] should be set if BGX()_GMP_GMI_TX()_CTL[TX_FC_TYPE] is clear (G.999.1). */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_pause_zero_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_txx_pause_zero_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t send : 1; /**< [ 0: 0](R/W) Send PAUSE-zero enable.When this bit is set, and the backpressure condition is clear, it
+ allows sending a PAUSE packet with pause_time of 0 to enable the channel. */
+#else /* Word 0 - Little Endian */
+ uint64_t send : 1; /**< [ 0: 0](R/W) Send PAUSE-zero enable.When this bit is set, and the backpressure condition is clear, it
+ allows sending a PAUSE packet with pause_time of 0 to enable the channel. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_txx_pause_zero_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_pause_zero bdk_bgxx_gmp_gmi_txx_pause_zero_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_PAUSE_ZERO(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_PAUSE_ZERO(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038260ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038260ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038260ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_PAUSE_ZERO", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_PAUSE_ZERO(a,b) bdk_bgxx_gmp_gmi_txx_pause_zero_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_PAUSE_ZERO(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_PAUSE_ZERO(a,b) "BGXX_GMP_GMI_TXX_PAUSE_ZERO"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_PAUSE_ZERO(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_PAUSE_ZERO(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_PAUSE_ZERO(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_sgmii_ctl
+ *
+ * BGX SGMII Control Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_sgmii_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_sgmii_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t align : 1; /**< [ 0: 0](R/W) Align the transmission to even cycles: (SGMII/1000BASE-X half-duplex only)
+ Recommended value is: ALIGN = !BGX()_GMP_GMI_TX()_APPEND[PREAMBLE].
+ (See Transmit Conversion to Code groups, Transmit Conversion to Code Groups for a complete
+ discussion.)
+
+ _ 0 = Data can be sent on any cycle. In this mode, the interface functions at maximum
+ bandwidth. It is possible for the TX PCS machine to drop the first byte of the TX frame.
+ When BGX()_GMP_GMI_TX()_APPEND[PREAMBLE] is set, the first byte is a preamble
+ byte, which can be dropped to compensate for an extended IPG.
+
+ _ 1 = Data is only sent on even cycles. In this mode, there can be bandwidth implications
+ when sending odd-byte packets as the IPG can extend an extra cycle. There will be no loss
+ of data. */
+#else /* Word 0 - Little Endian */
+ uint64_t align : 1; /**< [ 0: 0](R/W) Align the transmission to even cycles: (SGMII/1000BASE-X half-duplex only)
+ Recommended value is: ALIGN = !BGX()_GMP_GMI_TX()_APPEND[PREAMBLE].
+ (See Transmit Conversion to Code groups, Transmit Conversion to Code Groups for a complete
+ discussion.)
+
+ _ 0 = Data can be sent on any cycle. In this mode, the interface functions at maximum
+ bandwidth. It is possible for the TX PCS machine to drop the first byte of the TX frame.
+ When BGX()_GMP_GMI_TX()_APPEND[PREAMBLE] is set, the first byte is a preamble
+ byte, which can be dropped to compensate for an extended IPG.
+
+ _ 1 = Data is only sent on even cycles. In this mode, there can be bandwidth implications
+ when sending odd-byte packets as the IPG can extend an extra cycle. There will be no loss
+ of data. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_sgmii_ctl_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_sgmii_ctl bdk_bgxx_gmp_gmi_txx_sgmii_ctl_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_SGMII_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_SGMII_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038300ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038300ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038300ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_SGMII_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_SGMII_CTL(a,b) bdk_bgxx_gmp_gmi_txx_sgmii_ctl_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_SGMII_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_SGMII_CTL(a,b) "BGXX_GMP_GMI_TXX_SGMII_CTL"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_SGMII_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_SGMII_CTL(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_SGMII_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_slot
+ *
+ * BGX GMI TX Slottime Counter Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_slot
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_slot_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t slot : 10; /**< [ 9: 0](R/W) Slottime (refer to IEEE 802.3 to set correctly):
+ 10/100 Mbs: Set SLOT to 0x40.
+ 1000 Mbs: Set SLOT to 0x200.
+
+ SGMII/QSGMII/1000Base-X only. */
+#else /* Word 0 - Little Endian */
+ uint64_t slot : 10; /**< [ 9: 0](R/W) Slottime (refer to IEEE 802.3 to set correctly):
+ 10/100 Mbs: Set SLOT to 0x40.
+ 1000 Mbs: Set SLOT to 0x200.
+
+ SGMII/QSGMII/1000Base-X only. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_slot_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_txx_slot_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t slot : 10; /**< [ 9: 0](R/W) Slottime (refer to IEEE 802.3 to set correctly):
+ 10/100 Mbs: Set SLOT to 0x40.
+ 1000 Mbs: Set SLOT to 0x200.
+
+ SGMII/1000Base-X only. */
+#else /* Word 0 - Little Endian */
+ uint64_t slot : 10; /**< [ 9: 0](R/W) Slottime (refer to IEEE 802.3 to set correctly):
+ 10/100 Mbs: Set SLOT to 0x40.
+ 1000 Mbs: Set SLOT to 0x200.
+
+ SGMII/1000Base-X only. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_txx_slot_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_slot bdk_bgxx_gmp_gmi_txx_slot_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_SLOT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_SLOT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038220ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038220ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038220ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_SLOT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_SLOT(a,b) bdk_bgxx_gmp_gmi_txx_slot_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_SLOT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_SLOT(a,b) "BGXX_GMP_GMI_TXX_SLOT"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_SLOT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_SLOT(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_SLOT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_soft_pause
+ *
+ * BGX GMI TX Software PAUSE Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_soft_pause
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_soft_pause_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptime : 16; /**< [ 15: 0](R/W) Back off the TX bus for ([PTIME] * 512) bit-times.
+ [PTIME] should be 0x0 if BGX()_GMP_GMI_TX()_CTL[TX_FC_TYPE] is clear (G.999.1). */
+#else /* Word 0 - Little Endian */
+ uint64_t ptime : 16; /**< [ 15: 0](R/W) Back off the TX bus for ([PTIME] * 512) bit-times.
+ [PTIME] should be 0x0 if BGX()_GMP_GMI_TX()_CTL[TX_FC_TYPE] is clear (G.999.1). */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_soft_pause_s cn81xx; */
+ struct bdk_bgxx_gmp_gmi_txx_soft_pause_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptime : 16; /**< [ 15: 0](R/W) Back off the TX bus for ([PTIME] * 512) bit-times. */
+#else /* Word 0 - Little Endian */
+ uint64_t ptime : 16; /**< [ 15: 0](R/W) Back off the TX bus for ([PTIME] * 512) bit-times. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_gmi_txx_soft_pause_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_soft_pause bdk_bgxx_gmp_gmi_txx_soft_pause_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_SOFT_PAUSE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_SOFT_PAUSE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038250ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038250ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038250ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_SOFT_PAUSE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_SOFT_PAUSE(a,b) bdk_bgxx_gmp_gmi_txx_soft_pause_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_SOFT_PAUSE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_SOFT_PAUSE(a,b) "BGXX_GMP_GMI_TXX_SOFT_PAUSE"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_SOFT_PAUSE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_SOFT_PAUSE(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_SOFT_PAUSE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx#_thresh
+ *
+ * BGX GMI TX Threshold Registers
+ */
+union bdk_bgxx_gmp_gmi_txx_thresh
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_txx_thresh_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t cnt : 11; /**< [ 10: 0](R/W) Number of 128-bit words to accumulate in the TX FIFO before sending on the packet
+ interface. This field should be large enough to prevent underflow on the packet interface
+ and must never be set to 0x0. The recommended setting for
+
+ In all modes, this register cannot exceed the TX FIFO depth configured by
+ BGX()_CMR_TX_LMACS[LMACS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 11; /**< [ 10: 0](R/W) Number of 128-bit words to accumulate in the TX FIFO before sending on the packet
+ interface. This field should be large enough to prevent underflow on the packet interface
+ and must never be set to 0x0. The recommended setting for
+
+ In all modes, this register cannot exceed the TX FIFO depth configured by
+ BGX()_CMR_TX_LMACS[LMACS]. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_txx_thresh_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_txx_thresh bdk_bgxx_gmp_gmi_txx_thresh_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_THRESH(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TXX_THRESH(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038210ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0038210ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0038210ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TXX_THRESH", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TXX_THRESH(a,b) bdk_bgxx_gmp_gmi_txx_thresh_t
+#define bustype_BDK_BGXX_GMP_GMI_TXX_THRESH(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TXX_THRESH(a,b) "BGXX_GMP_GMI_TXX_THRESH"
+#define device_bar_BDK_BGXX_GMP_GMI_TXX_THRESH(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TXX_THRESH(a,b) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TXX_THRESH(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx_col_attempt
+ *
+ * BGX TX Collision Attempts Before Dropping Frame Registers
+ */
+union bdk_bgxx_gmp_gmi_tx_col_attempt
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_tx_col_attempt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t limit : 5; /**< [ 4: 0](R/W) Number of collision attempts allowed. (SGMII/1000BASE-X half-duplex only.) */
+#else /* Word 0 - Little Endian */
+ uint64_t limit : 5; /**< [ 4: 0](R/W) Number of collision attempts allowed. (SGMII/1000BASE-X half-duplex only.) */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_tx_col_attempt_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_tx_col_attempt bdk_bgxx_gmp_gmi_tx_col_attempt_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_COL_ATTEMPT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_COL_ATTEMPT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0039010ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0039010ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0039010ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TX_COL_ATTEMPT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TX_COL_ATTEMPT(a) bdk_bgxx_gmp_gmi_tx_col_attempt_t
+#define bustype_BDK_BGXX_GMP_GMI_TX_COL_ATTEMPT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TX_COL_ATTEMPT(a) "BGXX_GMP_GMI_TX_COL_ATTEMPT"
+#define device_bar_BDK_BGXX_GMP_GMI_TX_COL_ATTEMPT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TX_COL_ATTEMPT(a) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TX_COL_ATTEMPT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx_ifg
+ *
+ * BGX GMI TX Interframe-Gap Cycles Registers
+ * Consider the following when programming IFG1 and IFG2:
+ * * For 10/100/1000 Mb/s half-duplex systems that require IEEE 802.3 compatibility, IFG1 must be
+ * in the range of 1-8, IFG2 must be in the range of 4-12, and the IFG1 + IFG2 sum must be 12.
+ * * For 10/100/1000 Mb/s full-duplex systems that require IEEE 802.3 compatibility, IFG1 must be
+ * in the range of 1-11, IFG2 must be in the range of 1-11, and the IFG1 + IFG2 sum must be 12.
+ * For all other systems, IFG1 and IFG2 can be any value in the range of 1-15, allowing for a
+ * total possible IFG sum of 2-30.
+ */
+union bdk_bgxx_gmp_gmi_tx_ifg
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_tx_ifg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t ifg2 : 4; /**< [ 7: 4](R/W) Remainder of interFrameGap timing, equal to interFrameGap - IFG1 (in IFG2 * 8 bits). If
+ CRS is detected during IFG2, the interFrameSpacing timer is not reset and a frame is
+ transmitted once the timer expires. */
+ uint64_t ifg1 : 4; /**< [ 3: 0](R/W) First portion of interFrameGap timing, in the range of 0 to 2/3 (in IFG2 * 8 bits). If CRS
+ is detected during IFG1, the interFrameSpacing timer is reset and a frame is not
+ transmitted. */
+#else /* Word 0 - Little Endian */
+ uint64_t ifg1 : 4; /**< [ 3: 0](R/W) First portion of interFrameGap timing, in the range of 0 to 2/3 (in IFG2 * 8 bits). If CRS
+ is detected during IFG1, the interFrameSpacing timer is reset and a frame is not
+ transmitted. */
+ uint64_t ifg2 : 4; /**< [ 7: 4](R/W) Remainder of interFrameGap timing, equal to interFrameGap - IFG1 (in IFG2 * 8 bits). If
+ CRS is detected during IFG2, the interFrameSpacing timer is not reset and a frame is
+ transmitted once the timer expires. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_tx_ifg_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_tx_ifg bdk_bgxx_gmp_gmi_tx_ifg_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_IFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_IFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0039000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0039000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0039000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TX_IFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TX_IFG(a) bdk_bgxx_gmp_gmi_tx_ifg_t
+#define bustype_BDK_BGXX_GMP_GMI_TX_IFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TX_IFG(a) "BGXX_GMP_GMI_TX_IFG"
+#define device_bar_BDK_BGXX_GMP_GMI_TX_IFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TX_IFG(a) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TX_IFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx_jam
+ *
+ * BGX GMI TX JAM Pattern Registers
+ * This register provides the pattern used in JAM bytes.
+ */
+union bdk_bgxx_gmp_gmi_tx_jam
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_tx_jam_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t jam : 8; /**< [ 7: 0](R/W) JAM pattern. (SGMII/1000BASE-X half-duplex only.) */
+#else /* Word 0 - Little Endian */
+ uint64_t jam : 8; /**< [ 7: 0](R/W) JAM pattern. (SGMII/1000BASE-X half-duplex only.) */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_tx_jam_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_tx_jam bdk_bgxx_gmp_gmi_tx_jam_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_JAM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_JAM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0039008ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0039008ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0039008ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TX_JAM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TX_JAM(a) bdk_bgxx_gmp_gmi_tx_jam_t
+#define bustype_BDK_BGXX_GMP_GMI_TX_JAM(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TX_JAM(a) "BGXX_GMP_GMI_TX_JAM"
+#define device_bar_BDK_BGXX_GMP_GMI_TX_JAM(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TX_JAM(a) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TX_JAM(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx_lfsr
+ *
+ * BGX GMI TX LFSR Registers
+ * This register shows the contents of the linear feedback shift register (LFSR), which is used
+ * to implement truncated binary exponential backoff.
+ */
+union bdk_bgxx_gmp_gmi_tx_lfsr
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_tx_lfsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t lfsr : 16; /**< [ 15: 0](R/W/H) Contains the current state of the LFSR, which is used to feed random numbers to compute
+ truncated binary exponential backoff. (SGMII/1000Base-X half-duplex only.) */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr : 16; /**< [ 15: 0](R/W/H) Contains the current state of the LFSR, which is used to feed random numbers to compute
+ truncated binary exponential backoff. (SGMII/1000Base-X half-duplex only.) */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_tx_lfsr_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_tx_lfsr bdk_bgxx_gmp_gmi_tx_lfsr_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_LFSR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_LFSR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0039028ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0039028ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0039028ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TX_LFSR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TX_LFSR(a) bdk_bgxx_gmp_gmi_tx_lfsr_t
+#define bustype_BDK_BGXX_GMP_GMI_TX_LFSR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TX_LFSR(a) "BGXX_GMP_GMI_TX_LFSR"
+#define device_bar_BDK_BGXX_GMP_GMI_TX_LFSR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TX_LFSR(a) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TX_LFSR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx_pause_pkt_dmac
+ *
+ * BGX TX PAUSE-Packet DMAC-Field Registers
+ */
+union bdk_bgxx_gmp_gmi_tx_pause_pkt_dmac
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_tx_pause_pkt_dmac_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t dmac : 48; /**< [ 47: 0](R/W) The DMAC field, which is placed in outbound PAUSE packets. */
+#else /* Word 0 - Little Endian */
+ uint64_t dmac : 48; /**< [ 47: 0](R/W) The DMAC field, which is placed in outbound PAUSE packets. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_tx_pause_pkt_dmac_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_tx_pause_pkt_dmac bdk_bgxx_gmp_gmi_tx_pause_pkt_dmac_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0039018ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0039018ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0039018ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(a) bdk_bgxx_gmp_gmi_tx_pause_pkt_dmac_t
+#define bustype_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(a) "BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC"
+#define device_bar_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(a) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_DMAC(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_gmi_tx_pause_pkt_type
+ *
+ * BGX GMI TX PAUSE-Packet-PTYPE Field Registers
+ * This register provides the PTYPE field that is placed in outbound PAUSE packets.
+ */
+union bdk_bgxx_gmp_gmi_tx_pause_pkt_type
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_gmi_tx_pause_pkt_type_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ptype : 16; /**< [ 15: 0](R/W) The PTYPE field placed in outbound PAUSE packets. */
+#else /* Word 0 - Little Endian */
+ uint64_t ptype : 16; /**< [ 15: 0](R/W) The PTYPE field placed in outbound PAUSE packets. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_gmi_tx_pause_pkt_type_s cn; */
+};
+typedef union bdk_bgxx_gmp_gmi_tx_pause_pkt_type bdk_bgxx_gmp_gmi_tx_pause_pkt_type_t;
+
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0039020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0039020ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0039020ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(a) bdk_bgxx_gmp_gmi_tx_pause_pkt_type_t
+#define bustype_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(a) "BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE"
+#define device_bar_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(a) (a)
+#define arguments_BDK_BGXX_GMP_GMI_TX_PAUSE_PKT_TYPE(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_an#_adv
+ *
+ * BGX GMP PCS Autonegotiation Advertisement Registers
+ */
+union bdk_bgxx_gmp_pcs_anx_adv
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_anx_adv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t np : 1; /**< [ 15: 15](RO/H) Next page capable. This feature is not supported; this field is always 0. */
+ uint64_t reserved_14 : 1;
+ uint64_t rem_flt : 2; /**< [ 13: 12](R/W/H) Remote fault.
+ 0x0 = Link OK, XMIT = DATA.
+ 0x1 = Link failure (loss of sync, XMIT !=DATA).
+ 0x2 = Local device offline.
+ 0x3 = Autonegotiation error; failure to complete autonegotiation. AN error is set if
+ resolution function precludes operation with link partner. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t pause : 2; /**< [ 8: 7](R/W) PAUSE frame flow capability across link, exchanged during autonegotiation as follows:
+ 0x0 = No PAUSE.
+ 0x1 = Symmetric PAUSE.
+ 0x2 = Asymmetric PAUSE.
+ 0x3 = Both symmetric and asymmetric PAUSE to local device. */
+ uint64_t hfd : 1; /**< [ 6: 6](R/W) Half-duplex. When set, local device is half-duplex capable. */
+ uint64_t fd : 1; /**< [ 5: 5](R/W) Full-duplex. When set, local device is full-duplex capable. */
+ uint64_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_4 : 5;
+ uint64_t fd : 1; /**< [ 5: 5](R/W) Full-duplex. When set, local device is full-duplex capable. */
+ uint64_t hfd : 1; /**< [ 6: 6](R/W) Half-duplex. When set, local device is half-duplex capable. */
+ uint64_t pause : 2; /**< [ 8: 7](R/W) PAUSE frame flow capability across link, exchanged during autonegotiation as follows:
+ 0x0 = No PAUSE.
+ 0x1 = Symmetric PAUSE.
+ 0x2 = Asymmetric PAUSE.
+ 0x3 = Both symmetric and asymmetric PAUSE to local device. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t rem_flt : 2; /**< [ 13: 12](R/W/H) Remote fault.
+ 0x0 = Link OK, XMIT = DATA.
+ 0x1 = Link failure (loss of sync, XMIT !=DATA).
+ 0x2 = Local device offline.
+ 0x3 = Autonegotiation error; failure to complete autonegotiation. AN error is set if
+ resolution function precludes operation with link partner. */
+ uint64_t reserved_14 : 1;
+ uint64_t np : 1; /**< [ 15: 15](RO/H) Next page capable. This feature is not supported; this field is always 0. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_anx_adv_s cn; */
+};
+typedef union bdk_bgxx_gmp_pcs_anx_adv bdk_bgxx_gmp_pcs_anx_adv_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_ANX_ADV(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_ANX_ADV(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030010ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030010ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030010ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_ANX_ADV", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_ANX_ADV(a,b) bdk_bgxx_gmp_pcs_anx_adv_t
+#define bustype_BDK_BGXX_GMP_PCS_ANX_ADV(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_ANX_ADV(a,b) "BGXX_GMP_PCS_ANX_ADV"
+#define device_bar_BDK_BGXX_GMP_PCS_ANX_ADV(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_ANX_ADV(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_ANX_ADV(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_an#_ext_st
+ *
+ * BGX GMO PCS Autonegotiation Extended Status Registers
+ */
+union bdk_bgxx_gmp_pcs_anx_ext_st
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_anx_ext_st_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t thou_xfd : 1; /**< [ 15: 15](RO/H) When set, PHY is 1000 BASE-X full duplex capable. */
+ uint64_t thou_xhd : 1; /**< [ 14: 14](RO/H) When set, PHY is 1000 BASE-X half duplex capable. */
+ uint64_t thou_tfd : 1; /**< [ 13: 13](RO/H) When set, PHY is 1000 BASE-T full duplex capable. */
+ uint64_t thou_thd : 1; /**< [ 12: 12](RO/H) When set, PHY is 1000 BASE-T half duplex capable. */
+ uint64_t reserved_0_11 : 12;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_11 : 12;
+ uint64_t thou_thd : 1; /**< [ 12: 12](RO/H) When set, PHY is 1000 BASE-T half duplex capable. */
+ uint64_t thou_tfd : 1; /**< [ 13: 13](RO/H) When set, PHY is 1000 BASE-T full duplex capable. */
+ uint64_t thou_xhd : 1; /**< [ 14: 14](RO/H) When set, PHY is 1000 BASE-X half duplex capable. */
+ uint64_t thou_xfd : 1; /**< [ 15: 15](RO/H) When set, PHY is 1000 BASE-X full duplex capable. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_anx_ext_st_s cn; */
+};
+typedef union bdk_bgxx_gmp_pcs_anx_ext_st bdk_bgxx_gmp_pcs_anx_ext_st_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_ANX_EXT_ST(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_ANX_EXT_ST(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030028ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030028ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030028ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_ANX_EXT_ST", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_ANX_EXT_ST(a,b) bdk_bgxx_gmp_pcs_anx_ext_st_t
+#define bustype_BDK_BGXX_GMP_PCS_ANX_EXT_ST(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_ANX_EXT_ST(a,b) "BGXX_GMP_PCS_ANX_EXT_ST"
+#define device_bar_BDK_BGXX_GMP_PCS_ANX_EXT_ST(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_ANX_EXT_ST(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_ANX_EXT_ST(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_an#_lp_abil
+ *
+ * BGX GMP PCS Autonegotiation Link Partner Ability Registers
+ * This is the autonegotiation link partner ability register 5 as per IEEE 802.3, Clause 37.
+ */
+union bdk_bgxx_gmp_pcs_anx_lp_abil
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_anx_lp_abil_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t np : 1; /**< [ 15: 15](RO/H) Next page capable:
+ 0 = Link partner not next page capable.
+ 1 = Link partner next page capable. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) When set, indicates acknowledgement received. */
+ uint64_t rem_flt : 2; /**< [ 13: 12](RO/H) Link partner's link status as follows:
+ 0x0 = Link OK.
+ 0x1 = Offline.
+ 0x2 = Link failure.
+ 0x3 = Autonegotiation error. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t pause : 2; /**< [ 8: 7](RO/H) Link partner PAUSE setting as follows:
+ 0x0 = No PAUSE.
+ 0x1 = Symmetric PAUSE.
+ 0x2 = Asymmetric PAUSE.
+ 0x3 = Both symmetric and asymmetric PAUSE to local device. */
+ uint64_t hfd : 1; /**< [ 6: 6](RO/H) Half-duplex. When set, link partner is half-duplex capable. */
+ uint64_t fd : 1; /**< [ 5: 5](RO/H) Full-duplex. When set, link partner is full-duplex capable. */
+ uint64_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_4 : 5;
+ uint64_t fd : 1; /**< [ 5: 5](RO/H) Full-duplex. When set, link partner is full-duplex capable. */
+ uint64_t hfd : 1; /**< [ 6: 6](RO/H) Half-duplex. When set, link partner is half-duplex capable. */
+ uint64_t pause : 2; /**< [ 8: 7](RO/H) Link partner PAUSE setting as follows:
+ 0x0 = No PAUSE.
+ 0x1 = Symmetric PAUSE.
+ 0x2 = Asymmetric PAUSE.
+ 0x3 = Both symmetric and asymmetric PAUSE to local device. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t rem_flt : 2; /**< [ 13: 12](RO/H) Link partner's link status as follows:
+ 0x0 = Link OK.
+ 0x1 = Offline.
+ 0x2 = Link failure.
+ 0x3 = Autonegotiation error. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) When set, indicates acknowledgement received. */
+ uint64_t np : 1; /**< [ 15: 15](RO/H) Next page capable:
+ 0 = Link partner not next page capable.
+ 1 = Link partner next page capable. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_anx_lp_abil_s cn; */
+};
+typedef union bdk_bgxx_gmp_pcs_anx_lp_abil bdk_bgxx_gmp_pcs_anx_lp_abil_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_ANX_LP_ABIL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_ANX_LP_ABIL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030018ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030018ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030018ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_ANX_LP_ABIL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_ANX_LP_ABIL(a,b) bdk_bgxx_gmp_pcs_anx_lp_abil_t
+#define bustype_BDK_BGXX_GMP_PCS_ANX_LP_ABIL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_ANX_LP_ABIL(a,b) "BGXX_GMP_PCS_ANX_LP_ABIL"
+#define device_bar_BDK_BGXX_GMP_PCS_ANX_LP_ABIL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_ANX_LP_ABIL(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_ANX_LP_ABIL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_an#_results
+ *
+ * BGX GMP PCS Autonegotiation Results Registers
+ * This register is not valid when BGX()_GMP_PCS_MISC()_CTL[AN_OVRD] is set to 1. If
+ * BGX()_GMP_PCS_MISC()_CTL[AN_OVRD] is set to 0 and
+ * BGX()_GMP_PCS_AN()_RESULTS[AN_CPT] is set to 1, this register is valid.
+ */
+union bdk_bgxx_gmp_pcs_anx_results
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_anx_results_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t pause : 2; /**< [ 6: 5](RO/H) PAUSE selection ('don't care' for SGMII/QSGMII) as follows:
+ 0x0 = Disable PAUSE, TX and RX.
+ 0x1 = Enable PAUSE frames, RX only.
+ 0x2 = Enable PAUSE frames, TX only.
+ 0x3 = Enable PAUSE frames, TX and RX. */
+ uint64_t spd : 2; /**< [ 4: 3](RO/H) Link speed selection as follows:
+ 0x0 = 10 Mb/s.
+ 0x1 = 100 Mb/s.
+ 0x2 = 1000 Mb/s.
+ 0x3 = Reserved. */
+ uint64_t an_cpt : 1; /**< [ 2: 2](RO/H) Autonegotiation completed.
+ 1 = Autonegotiation completed.
+ 0 = Autonegotiation not completed or failed. */
+ uint64_t dup : 1; /**< [ 1: 1](RO/H) Duplex mode. 1 = full duplex, 0 = half duplex. */
+ uint64_t link_ok : 1; /**< [ 0: 0](RO/H) Link status: 1 = link up (OK), 1 = link down. */
+#else /* Word 0 - Little Endian */
+ uint64_t link_ok : 1; /**< [ 0: 0](RO/H) Link status: 1 = link up (OK), 1 = link down. */
+ uint64_t dup : 1; /**< [ 1: 1](RO/H) Duplex mode. 1 = full duplex, 0 = half duplex. */
+ uint64_t an_cpt : 1; /**< [ 2: 2](RO/H) Autonegotiation completed.
+ 1 = Autonegotiation completed.
+ 0 = Autonegotiation not completed or failed. */
+ uint64_t spd : 2; /**< [ 4: 3](RO/H) Link speed selection as follows:
+ 0x0 = 10 Mb/s.
+ 0x1 = 100 Mb/s.
+ 0x2 = 1000 Mb/s.
+ 0x3 = Reserved. */
+ uint64_t pause : 2; /**< [ 6: 5](RO/H) PAUSE selection ('don't care' for SGMII/QSGMII) as follows:
+ 0x0 = Disable PAUSE, TX and RX.
+ 0x1 = Enable PAUSE frames, RX only.
+ 0x2 = Enable PAUSE frames, TX only.
+ 0x3 = Enable PAUSE frames, TX and RX. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_anx_results_s cn81xx; */
+ struct bdk_bgxx_gmp_pcs_anx_results_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t pause : 2; /**< [ 6: 5](RO/H) PAUSE selection ('don't care' for SGMII) as follows:
+ 0x0 = Disable PAUSE, TX and RX.
+ 0x1 = Enable PAUSE frames, RX only.
+ 0x2 = Enable PAUSE frames, TX only.
+ 0x3 = Enable PAUSE frames, TX and RX. */
+ uint64_t spd : 2; /**< [ 4: 3](RO/H) Link speed selection as follows:
+ 0x0 = 10 Mb/s.
+ 0x1 = 100 Mb/s.
+ 0x2 = 1000 Mb/s.
+ 0x3 = Reserved. */
+ uint64_t an_cpt : 1; /**< [ 2: 2](RO/H) Autonegotiation completed.
+ 1 = Autonegotiation completed.
+ 0 = Autonegotiation not completed or failed. */
+ uint64_t dup : 1; /**< [ 1: 1](RO/H) Duplex mode. 1 = full duplex, 0 = half duplex. */
+ uint64_t link_ok : 1; /**< [ 0: 0](RO/H) Link status: 1 = link up (OK), 1 = link down. */
+#else /* Word 0 - Little Endian */
+ uint64_t link_ok : 1; /**< [ 0: 0](RO/H) Link status: 1 = link up (OK), 1 = link down. */
+ uint64_t dup : 1; /**< [ 1: 1](RO/H) Duplex mode. 1 = full duplex, 0 = half duplex. */
+ uint64_t an_cpt : 1; /**< [ 2: 2](RO/H) Autonegotiation completed.
+ 1 = Autonegotiation completed.
+ 0 = Autonegotiation not completed or failed. */
+ uint64_t spd : 2; /**< [ 4: 3](RO/H) Link speed selection as follows:
+ 0x0 = 10 Mb/s.
+ 0x1 = 100 Mb/s.
+ 0x2 = 1000 Mb/s.
+ 0x3 = Reserved. */
+ uint64_t pause : 2; /**< [ 6: 5](RO/H) PAUSE selection ('don't care' for SGMII) as follows:
+ 0x0 = Disable PAUSE, TX and RX.
+ 0x1 = Enable PAUSE frames, RX only.
+ 0x2 = Enable PAUSE frames, TX only.
+ 0x3 = Enable PAUSE frames, TX and RX. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_pcs_anx_results_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_pcs_anx_results bdk_bgxx_gmp_pcs_anx_results_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_ANX_RESULTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_ANX_RESULTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030020ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030020ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030020ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_ANX_RESULTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_ANX_RESULTS(a,b) bdk_bgxx_gmp_pcs_anx_results_t
+#define bustype_BDK_BGXX_GMP_PCS_ANX_RESULTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_ANX_RESULTS(a,b) "BGXX_GMP_PCS_ANX_RESULTS"
+#define device_bar_BDK_BGXX_GMP_PCS_ANX_RESULTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_ANX_RESULTS(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_ANX_RESULTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_int#
+ *
+ * BGX GMP PCS Interrupt Registers
+ */
+union bdk_bgxx_gmp_pcs_intx
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_intx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1C/H) Code group sync failure debug help. BGX()_GMP_PCS_INT()[DBG_SYNC] interrupt fires when
+ code group
+ synchronization state machine makes a transition from SYNC_ACQUIRED_1 state to
+ SYNC_ACQUIRED_2 state. (See IEEE 802.3-2005, figure 37-9). It indicates that a bad code
+ group was received after code group synchronization was achieved. This interrupt should be
+ disabled during normal link operation. Use it as a debug help feature only. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1C/H) Set whenever duplex mode changes on the link. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1C/H) Set by hardware whenever RX sync state machine reaches a bad state. Should never be set
+ during normal operation. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1C/H) Set by hardware whenever autonegotiation state machine reaches a bad state. Should never
+ be set during normal operation. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1C/H) Set by hardware whenever code group sync or bit lock failure occurs. Cannot fire in loopback1 mode. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1C/H) Set by hardware whenever RX state machine reaches a bad state. Should never be set during
+ normal operation. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1C/H) Set whenever RX receives a code group error in 10-bit to 8-bit decode logic. Cannot fire
+ in loopback1 mode. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1C/H) Set by hardware whenever TX state machine reaches a bad state. Should never be set during
+ normal operation. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1C/H) Set whenever hardware detects a TX FIFO overflow condition. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1C/H) Set whenever hardware detects a TX FIFO underflow condition. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1C/H) Autonegotiation error; AN resolution function failed. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1C/H) Set whenever hardware detects a change in the XMIT variable. XMIT variable states are
+ IDLE, CONFIG and DATA. */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1C/H) Set by hardware whenever link speed has changed. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1C/H) Set by hardware whenever link speed has changed. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1C/H) Set whenever hardware detects a change in the XMIT variable. XMIT variable states are
+ IDLE, CONFIG and DATA. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1C/H) Autonegotiation error; AN resolution function failed. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1C/H) Set whenever hardware detects a TX FIFO underflow condition. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1C/H) Set whenever hardware detects a TX FIFO overflow condition. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1C/H) Set by hardware whenever TX state machine reaches a bad state. Should never be set during
+ normal operation. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1C/H) Set whenever RX receives a code group error in 10-bit to 8-bit decode logic. Cannot fire
+ in loopback1 mode. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1C/H) Set by hardware whenever RX state machine reaches a bad state. Should never be set during
+ normal operation. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1C/H) Set by hardware whenever code group sync or bit lock failure occurs. Cannot fire in loopback1 mode. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1C/H) Set by hardware whenever autonegotiation state machine reaches a bad state. Should never
+ be set during normal operation. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1C/H) Set by hardware whenever RX sync state machine reaches a bad state. Should never be set
+ during normal operation. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1C/H) Set whenever duplex mode changes on the link. */
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1C/H) Code group sync failure debug help. BGX()_GMP_PCS_INT()[DBG_SYNC] interrupt fires when
+ code group
+ synchronization state machine makes a transition from SYNC_ACQUIRED_1 state to
+ SYNC_ACQUIRED_2 state. (See IEEE 802.3-2005, figure 37-9). It indicates that a bad code
+ group was received after code group synchronization was achieved. This interrupt should be
+ disabled during normal link operation. Use it as a debug help feature only. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_intx_s cn; */
+};
+typedef union bdk_bgxx_gmp_pcs_intx bdk_bgxx_gmp_pcs_intx_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_INTX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_INTX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030080ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030080ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030080ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_INTX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_INTX(a,b) bdk_bgxx_gmp_pcs_intx_t
+#define bustype_BDK_BGXX_GMP_PCS_INTX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_INTX(a,b) "BGXX_GMP_PCS_INTX"
+#define device_bar_BDK_BGXX_GMP_PCS_INTX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_INTX(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_INTX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_int#_ena_w1c
+ *
+ * BGX GMP PCS Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_bgxx_gmp_pcs_intx_ena_w1c
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_intx_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[LNKSPD]. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[LNKSPD]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..1)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_intx_ena_w1c_s cn81xx; */
+ /* struct bdk_bgxx_gmp_pcs_intx_ena_w1c_s cn88xx; */
+ struct bdk_bgxx_gmp_pcs_intx_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[LNKSPD]. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[LNKSPD]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..3)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_gmp_pcs_intx_ena_w1c bdk_bgxx_gmp_pcs_intx_ena_w1c_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_INTX_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_INTX_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030090ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030090ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030090ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_INTX_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_INTX_ENA_W1C(a,b) bdk_bgxx_gmp_pcs_intx_ena_w1c_t
+#define bustype_BDK_BGXX_GMP_PCS_INTX_ENA_W1C(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_INTX_ENA_W1C(a,b) "BGXX_GMP_PCS_INTX_ENA_W1C"
+#define device_bar_BDK_BGXX_GMP_PCS_INTX_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_INTX_ENA_W1C(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_INTX_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_int#_ena_w1s
+ *
+ * BGX GMP PCS Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_bgxx_gmp_pcs_intx_ena_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_intx_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[LNKSPD]. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[LNKSPD]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..1)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_intx_ena_w1s_s cn81xx; */
+ /* struct bdk_bgxx_gmp_pcs_intx_ena_w1s_s cn88xx; */
+ struct bdk_bgxx_gmp_pcs_intx_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[LNKSPD]. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[LNKSPD]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..3)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_gmp_pcs_intx_ena_w1s bdk_bgxx_gmp_pcs_intx_ena_w1s_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_INTX_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_INTX_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030098ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030098ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030098ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_INTX_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_INTX_ENA_W1S(a,b) bdk_bgxx_gmp_pcs_intx_ena_w1s_t
+#define bustype_BDK_BGXX_GMP_PCS_INTX_ENA_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_INTX_ENA_W1S(a,b) "BGXX_GMP_PCS_INTX_ENA_W1S"
+#define device_bar_BDK_BGXX_GMP_PCS_INTX_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_INTX_ENA_W1S(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_INTX_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_int#_w1s
+ *
+ * BGX GMP PCS Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_bgxx_gmp_pcs_intx_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_intx_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[LNKSPD]. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[LNKSPD]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..1)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_intx_w1s_s cn81xx; */
+ /* struct bdk_bgxx_gmp_pcs_intx_w1s_s cn88xx; */
+ struct bdk_bgxx_gmp_pcs_intx_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[LNKSPD]. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnkspd : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[LNKSPD]. */
+ uint64_t xmit : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[XMIT]. */
+ uint64_t an_err : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[AN_ERR]. */
+ uint64_t txfifu : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFU]. */
+ uint64_t txfifo : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[TXFIFO]. */
+ uint64_t txbad : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[TXBAD]. */
+ uint64_t rxerr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[RXERR]. */
+ uint64_t rxbad : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[RXBAD]. */
+ uint64_t rxlock : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[RXLOCK]. */
+ uint64_t an_bad : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[AN_BAD]. */
+ uint64_t sync_bad : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[SYNC_BAD]. */
+ uint64_t dup : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[DUP]. */
+ uint64_t dbg_sync : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..3)_GMP_PCS_INT(0..3)[DBG_SYNC]. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_gmp_pcs_intx_w1s bdk_bgxx_gmp_pcs_intx_w1s_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_INTX_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_INTX_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030088ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030088ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030088ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_INTX_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_INTX_W1S(a,b) bdk_bgxx_gmp_pcs_intx_w1s_t
+#define bustype_BDK_BGXX_GMP_PCS_INTX_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_INTX_W1S(a,b) "BGXX_GMP_PCS_INTX_W1S"
+#define device_bar_BDK_BGXX_GMP_PCS_INTX_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_INTX_W1S(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_INTX_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_link#_timer
+ *
+ * BGX GMP PCS Link Timer Registers
+ * This is the 1.6 ms nominal link timer register.
+ */
+union bdk_bgxx_gmp_pcs_linkx_timer
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_linkx_timer_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t count : 16; /**< [ 15: 0](R/W) (Coprocessor clock period * 1024) * [COUNT] should be 1.6 ms for SGMII/QSGMII and 10 ms
+ otherwise,
+ which is the link timer used in autonegotiation. Reset assumes a 700 MHz coprocessor
+ clock for 1.6 ms link timer. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 16; /**< [ 15: 0](R/W) (Coprocessor clock period * 1024) * [COUNT] should be 1.6 ms for SGMII/QSGMII and 10 ms
+ otherwise,
+ which is the link timer used in autonegotiation. Reset assumes a 700 MHz coprocessor
+ clock for 1.6 ms link timer. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_linkx_timer_s cn81xx; */
+ struct bdk_bgxx_gmp_pcs_linkx_timer_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t count : 16; /**< [ 15: 0](R/W) (Coprocessor clock period * 1024) * COUNT should be 1.6 ms for SGMII and 10 ms otherwise,
+ which is the link timer used in autonegotiation. Reset assumes a 700 MHz coprocessor
+ clock for 1.6 ms link timer. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 16; /**< [ 15: 0](R/W) (Coprocessor clock period * 1024) * COUNT should be 1.6 ms for SGMII and 10 ms otherwise,
+ which is the link timer used in autonegotiation. Reset assumes a 700 MHz coprocessor
+ clock for 1.6 ms link timer. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_pcs_linkx_timer_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_pcs_linkx_timer bdk_bgxx_gmp_pcs_linkx_timer_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_LINKX_TIMER(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_LINKX_TIMER(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030040ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030040ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030040ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_LINKX_TIMER", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_LINKX_TIMER(a,b) bdk_bgxx_gmp_pcs_linkx_timer_t
+#define bustype_BDK_BGXX_GMP_PCS_LINKX_TIMER(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_LINKX_TIMER(a,b) "BGXX_GMP_PCS_LINKX_TIMER"
+#define device_bar_BDK_BGXX_GMP_PCS_LINKX_TIMER(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_LINKX_TIMER(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_LINKX_TIMER(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_misc#_ctl
+ *
+ * BGX GMP SGMII Miscellaneous Control Registers
+ * Internal:
+ * SGMII bit [12] is really a misnomer, it is a decode of pi_qlm_cfg pins to indicate SGMII or
+ * 1000Base-X modes.
+ *
+ * Note: The SGMII AN Advertisement Register above will be sent during Auto Negotiation if
+ * [MAC_PHY] is set (1=PHY mode). If the bit is not set (0=MAC mode), the
+ * tx_Config_Reg\<14\> becomes ACK bit and tx_Config_Reg\<0\> is always 1.
+ * All other bits in tx_Config_Reg sent will be 0. The PHY dictates the Auto Negotiation results.
+ */
+union bdk_bgxx_gmp_pcs_miscx_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_miscx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t qsgmii_comma_wd_en : 1; /**< [ 32: 32](R/W) QSGMII comma watchdog byte counter enable. */
+ uint64_t qsgmii_comma_wd : 16; /**< [ 31: 16](R/W) QSGMII comma watchdog byte counter. This counter is used in QSGMII mode and
+ counts incoming bytes to ensure state transitions in the PCS receive side state
+ machine when disparity enable is turned off and bad code groups and commas are
+ not communicated from the code group processor after code group lock. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t disp_en : 1; /**< [ 13: 13](R/W) Disparity check enable. When LMAC_TYPE=QSGMII the running disparity check should be
+ disabled
+ to
+ prevent propogation across ports.
+ 0 = disable disparity check
+ 1 = enable disparity checking
+
+ See GSER()_LANE_MODE[LMODE]. */
+ uint64_t sgmii : 1; /**< [ 12: 12](RO/H) Reserved. Always 1. */
+ uint64_t gmxeno : 1; /**< [ 11: 11](R/W) GMI enable override. When set, forces GMI to appear disabled. The enable/disable status of
+ GMI is checked only at SOP of every packet. */
+ uint64_t loopbck2 : 1; /**< [ 10: 10](R/W) Sets external loopback mode to return RX data back out via the TX data path. 0 = No
+ loopback. 1 = Loopback.
+ LOOPBCK1 and LOOPBCK2 modes may not be supported simultaneously. */
+ uint64_t mac_phy : 1; /**< [ 9: 9](R/W) MAC/PHY.
+ 0 = MAC.
+ 1 = PHY decides the tx_Config_Reg value to be sent during autonegotiation. */
+ uint64_t mode : 1; /**< [ 8: 8](R/W) Mode bit.
+
+ _ 0 = SGMII mode is selected and the following note applies.
+ The SGMII AN advertisement register (BGX()_GMP_PCS_SGM()_AN_ADV) is sent during
+ autonegotiation if BGX()_GMP_PCS_MISC()_CTL[MAC_PHY] = 1 (PHY mode). If [MAC_PHY]
+ = 0 (MAC mode), the tx_Config_Reg\<14\> becomes ACK bit and \<0\> is always 1. All other bits
+ in tx_Config_Reg sent are 0. The PHY dictates the autonegotiation results.
+
+ _ 1 = 1000Base-X mode is selected. Autonegotiation follows IEEE 802.3 clause 37. */
+ uint64_t an_ovrd : 1; /**< [ 7: 7](R/W) Autonegotiation results override:
+ 0 = Disable.
+ 1 = Enable override. Autonegotiation is allowed to happen but the results are ignored
+ when this bit is set. Duplex and Link speed values are set from BGX()_GMP_PCS_MISC()_CTL. */
+ uint64_t samp_pt : 7; /**< [ 6: 0](R/W) Byte number in elongated frames for 10/100 Mb/s operation for data sampling on RX side in
+ PCS. Recommended values are 0x5 for 100 Mb/s operation and 0x32 for 10 Mb/s operation.
+
+ For 10 Mb/s operation, this field should be set to a value less than 99 and greater than
+ 0.
+ If set out of this range, a value of 50 is used for actual sampling internally without
+ affecting the CSR field.
+
+ For 100 Mb/s operation this field should be set to a value less than 9 and greater than 0.
+ If set out of this range, a value of 5 is used for actual sampling internally without
+ affecting the CSR field. */
+#else /* Word 0 - Little Endian */
+ uint64_t samp_pt : 7; /**< [ 6: 0](R/W) Byte number in elongated frames for 10/100 Mb/s operation for data sampling on RX side in
+ PCS. Recommended values are 0x5 for 100 Mb/s operation and 0x32 for 10 Mb/s operation.
+
+ For 10 Mb/s operation, this field should be set to a value less than 99 and greater than
+ 0.
+ If set out of this range, a value of 50 is used for actual sampling internally without
+ affecting the CSR field.
+
+ For 100 Mb/s operation this field should be set to a value less than 9 and greater than 0.
+ If set out of this range, a value of 5 is used for actual sampling internally without
+ affecting the CSR field. */
+ uint64_t an_ovrd : 1; /**< [ 7: 7](R/W) Autonegotiation results override:
+ 0 = Disable.
+ 1 = Enable override. Autonegotiation is allowed to happen but the results are ignored
+ when this bit is set. Duplex and Link speed values are set from BGX()_GMP_PCS_MISC()_CTL. */
+ uint64_t mode : 1; /**< [ 8: 8](R/W) Mode bit.
+
+ _ 0 = SGMII mode is selected and the following note applies.
+ The SGMII AN advertisement register (BGX()_GMP_PCS_SGM()_AN_ADV) is sent during
+ autonegotiation if BGX()_GMP_PCS_MISC()_CTL[MAC_PHY] = 1 (PHY mode). If [MAC_PHY]
+ = 0 (MAC mode), the tx_Config_Reg\<14\> becomes ACK bit and \<0\> is always 1. All other bits
+ in tx_Config_Reg sent are 0. The PHY dictates the autonegotiation results.
+
+ _ 1 = 1000Base-X mode is selected. Autonegotiation follows IEEE 802.3 clause 37. */
+ uint64_t mac_phy : 1; /**< [ 9: 9](R/W) MAC/PHY.
+ 0 = MAC.
+ 1 = PHY decides the tx_Config_Reg value to be sent during autonegotiation. */
+ uint64_t loopbck2 : 1; /**< [ 10: 10](R/W) Sets external loopback mode to return RX data back out via the TX data path. 0 = No
+ loopback. 1 = Loopback.
+ LOOPBCK1 and LOOPBCK2 modes may not be supported simultaneously. */
+ uint64_t gmxeno : 1; /**< [ 11: 11](R/W) GMI enable override. When set, forces GMI to appear disabled. The enable/disable status of
+ GMI is checked only at SOP of every packet. */
+ uint64_t sgmii : 1; /**< [ 12: 12](RO/H) Reserved. Always 1. */
+ uint64_t disp_en : 1; /**< [ 13: 13](R/W) Disparity check enable. When LMAC_TYPE=QSGMII the running disparity check should be
+ disabled
+ to
+ prevent propogation across ports.
+ 0 = disable disparity check
+ 1 = enable disparity checking
+
+ See GSER()_LANE_MODE[LMODE]. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t qsgmii_comma_wd : 16; /**< [ 31: 16](R/W) QSGMII comma watchdog byte counter. This counter is used in QSGMII mode and
+ counts incoming bytes to ensure state transitions in the PCS receive side state
+ machine when disparity enable is turned off and bad code groups and commas are
+ not communicated from the code group processor after code group lock. */
+ uint64_t qsgmii_comma_wd_en : 1; /**< [ 32: 32](R/W) QSGMII comma watchdog byte counter enable. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_miscx_ctl_s cn81xx; */
+ struct bdk_bgxx_gmp_pcs_miscx_ctl_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t sgmii : 1; /**< [ 12: 12](RO/H) Reserved. Always 1. */
+ uint64_t gmxeno : 1; /**< [ 11: 11](R/W) GMI enable override. When set, forces GMI to appear disabled. The enable/disable status of
+ GMI is checked only at SOP of every packet. */
+ uint64_t loopbck2 : 1; /**< [ 10: 10](R/W) Sets external loopback mode to return RX data back out via the TX data path. 0 = No
+ loopback. 1 = Loopback.
+ LOOPBCK1 and LOOPBCK2 modes may not be supported simultaneously. */
+ uint64_t mac_phy : 1; /**< [ 9: 9](R/W) MAC/PHY.
+ 0 = MAC.
+ 1 = PHY decides the tx_Config_Reg value to be sent during autonegotiation. */
+ uint64_t mode : 1; /**< [ 8: 8](R/W) Mode bit.
+
+ _ 0 = SGMII mode is selected and the following note applies.
+ The SGMII AN advertisement register (BGX()_GMP_PCS_SGM()_AN_ADV) is sent during
+ autonegotiation if BGX()_GMP_PCS_MISC()_CTL[MAC_PHY] = 1 (PHY mode). If [MAC_PHY]
+ = 0 (MAC mode), the tx_Config_Reg\<14\> becomes ACK bit and \<0\> is always 1. All other bits
+ in tx_Config_Reg sent are 0. The PHY dictates the autonegotiation results.
+
+ _ 1 = 1000Base-X mode is selected. Autonegotiation follows IEEE 802.3 clause 37. */
+ uint64_t an_ovrd : 1; /**< [ 7: 7](R/W) Autonegotiation results override:
+ 0 = Disable.
+ 1 = Enable override. Autonegotiation is allowed to happen but the results are ignored
+ when this bit is set. Duplex and Link speed values are set from BGX()_GMP_PCS_MISC()_CTL. */
+ uint64_t samp_pt : 7; /**< [ 6: 0](R/W) Byte number in elongated frames for 10/100 Mb/s operation for data sampling on RX side in
+ PCS. Recommended values are 0x5 for 100 Mb/s operation and 0x32 for 10 Mb/s operation.
+
+ For 10 Mb/s operation, this field should be set to a value less than 99 and greater than
+ 0.
+ If set out of this range, a value of 50 is used for actual sampling internally without
+ affecting the CSR field.
+
+ For 100 Mb/s operation this field should be set to a value less than 9 and greater than 0.
+ If set out of this range, a value of 5 is used for actual sampling internally without
+ affecting the CSR field. */
+#else /* Word 0 - Little Endian */
+ uint64_t samp_pt : 7; /**< [ 6: 0](R/W) Byte number in elongated frames for 10/100 Mb/s operation for data sampling on RX side in
+ PCS. Recommended values are 0x5 for 100 Mb/s operation and 0x32 for 10 Mb/s operation.
+
+ For 10 Mb/s operation, this field should be set to a value less than 99 and greater than
+ 0.
+ If set out of this range, a value of 50 is used for actual sampling internally without
+ affecting the CSR field.
+
+ For 100 Mb/s operation this field should be set to a value less than 9 and greater than 0.
+ If set out of this range, a value of 5 is used for actual sampling internally without
+ affecting the CSR field. */
+ uint64_t an_ovrd : 1; /**< [ 7: 7](R/W) Autonegotiation results override:
+ 0 = Disable.
+ 1 = Enable override. Autonegotiation is allowed to happen but the results are ignored
+ when this bit is set. Duplex and Link speed values are set from BGX()_GMP_PCS_MISC()_CTL. */
+ uint64_t mode : 1; /**< [ 8: 8](R/W) Mode bit.
+
+ _ 0 = SGMII mode is selected and the following note applies.
+ The SGMII AN advertisement register (BGX()_GMP_PCS_SGM()_AN_ADV) is sent during
+ autonegotiation if BGX()_GMP_PCS_MISC()_CTL[MAC_PHY] = 1 (PHY mode). If [MAC_PHY]
+ = 0 (MAC mode), the tx_Config_Reg\<14\> becomes ACK bit and \<0\> is always 1. All other bits
+ in tx_Config_Reg sent are 0. The PHY dictates the autonegotiation results.
+
+ _ 1 = 1000Base-X mode is selected. Autonegotiation follows IEEE 802.3 clause 37. */
+ uint64_t mac_phy : 1; /**< [ 9: 9](R/W) MAC/PHY.
+ 0 = MAC.
+ 1 = PHY decides the tx_Config_Reg value to be sent during autonegotiation. */
+ uint64_t loopbck2 : 1; /**< [ 10: 10](R/W) Sets external loopback mode to return RX data back out via the TX data path. 0 = No
+ loopback. 1 = Loopback.
+ LOOPBCK1 and LOOPBCK2 modes may not be supported simultaneously. */
+ uint64_t gmxeno : 1; /**< [ 11: 11](R/W) GMI enable override. When set, forces GMI to appear disabled. The enable/disable status of
+ GMI is checked only at SOP of every packet. */
+ uint64_t sgmii : 1; /**< [ 12: 12](RO/H) Reserved. Always 1. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_pcs_miscx_ctl_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_pcs_miscx_ctl bdk_bgxx_gmp_pcs_miscx_ctl_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_MISCX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_MISCX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030078ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030078ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030078ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_MISCX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_MISCX_CTL(a,b) bdk_bgxx_gmp_pcs_miscx_ctl_t
+#define bustype_BDK_BGXX_GMP_PCS_MISCX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_MISCX_CTL(a,b) "BGXX_GMP_PCS_MISCX_CTL"
+#define device_bar_BDK_BGXX_GMP_PCS_MISCX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_MISCX_CTL(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_MISCX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_mr#_control
+ *
+ * BGX GMP PCS Control Registers
+ */
+union bdk_bgxx_gmp_pcs_mrx_control
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_mrx_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t reset : 1; /**< [ 15: 15](R/W/H) Set to reset.
+ 0 = Normal operation.
+ 1 = Software PCS reset.
+
+ The bit returns to 0 after PCS has been reset. Takes 32 coprocessor-clock cycles to reset
+ PCS. This bit, when set, also drains the tx gmi fifo and can be used as a fifo draining
+ mechanism for both SerDes reset conditions and for XCV reset conditions. */
+ uint64_t loopbck1 : 1; /**< [ 14: 14](R/W) Enable loopback:
+ 0 = Normal operation.
+ 1 = Internal loopback mode.
+
+ The loopback mode returns loopback TX data from GMII TX back to GMII RX interface. The
+ loopback happens in the PCS module. Autonegotiation is disabled even if [AN_EN] is set
+ during loopback. */
+ uint64_t spdlsb : 1; /**< [ 13: 13](R/W) Least-significant bit of the link-speed field, i.e. SPD\<0\>. Refer to SPDMSB. */
+ uint64_t an_en : 1; /**< [ 12: 12](R/W) Autonegotiation enable. */
+ uint64_t pwr_dn : 1; /**< [ 11: 11](R/W) Power down:
+ 0 = Normal operation.
+ 1 = Power down (hardware reset). */
+ uint64_t reserved_10 : 1;
+ uint64_t rst_an : 1; /**< [ 9: 9](R/W/H) Reset autonegotiation. When set, if [AN_EN] = 1 and
+ BGX()_GMP_PCS_MR()_STATUS[AN_ABIL] = 1, autonegotiation begins. Otherwise,
+ software write requests are ignored and this bit remains at 0. This bit clears itself to
+ 0, when autonegotiation starts. */
+ uint64_t dup : 1; /**< [ 8: 8](R/W) Duplex mode:
+ 0 = half duplex; effective only if autonegotiation is disabled.
+ 1 = full duplex.
+
+ If BGX()_GMP_PCS_MR()_STATUS \<15:9\> and
+ BGX()_GMP_PCS_AN()_ADV\<15:12\> allow only one duplex mode, this bit corresponds to
+ that value and any attempts to write are ignored. */
+ uint64_t coltst : 1; /**< [ 7: 7](R/W) Enable COL signal test.
+ During COL test, the COL signal reflects the GMII TX_EN signal with less than 16BT delay. */
+ uint64_t spdmsb : 1; /**< [ 6: 6](R/W) Link speed most-significant bit, i.e SPD\<1\>; effective only if autonegotiation is
+ disabled.
+
+ \<pre\>
+ [SPDMSB] [SPDLSB] Link Speed
+ 0 0 10 Mb/s
+ 0 1 100 Mb/s
+ 1 0 1000 Mb/s
+ 1 1 reserved
+ \</pre\> */
+ uint64_t uni : 1; /**< [ 5: 5](R/W) Unidirectional (IEEE 802.3-2005, Clause 66.2). When set to 1, this bit overrides [AN_EN]
+ and
+ disables the autonegotiation variable mr_an_enable. Used in both 1000BASE-X and
+ SGMII/QSGMII
+ modes. */
+ uint64_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_4 : 5;
+ uint64_t uni : 1; /**< [ 5: 5](R/W) Unidirectional (IEEE 802.3-2005, Clause 66.2). When set to 1, this bit overrides [AN_EN]
+ and
+ disables the autonegotiation variable mr_an_enable. Used in both 1000BASE-X and
+ SGMII/QSGMII
+ modes. */
+ uint64_t spdmsb : 1; /**< [ 6: 6](R/W) Link speed most-significant bit, i.e SPD\<1\>; effective only if autonegotiation is
+ disabled.
+
+ \<pre\>
+ [SPDMSB] [SPDLSB] Link Speed
+ 0 0 10 Mb/s
+ 0 1 100 Mb/s
+ 1 0 1000 Mb/s
+ 1 1 reserved
+ \</pre\> */
+ uint64_t coltst : 1; /**< [ 7: 7](R/W) Enable COL signal test.
+ During COL test, the COL signal reflects the GMII TX_EN signal with less than 16BT delay. */
+ uint64_t dup : 1; /**< [ 8: 8](R/W) Duplex mode:
+ 0 = half duplex; effective only if autonegotiation is disabled.
+ 1 = full duplex.
+
+ If BGX()_GMP_PCS_MR()_STATUS \<15:9\> and
+ BGX()_GMP_PCS_AN()_ADV\<15:12\> allow only one duplex mode, this bit corresponds to
+ that value and any attempts to write are ignored. */
+ uint64_t rst_an : 1; /**< [ 9: 9](R/W/H) Reset autonegotiation. When set, if [AN_EN] = 1 and
+ BGX()_GMP_PCS_MR()_STATUS[AN_ABIL] = 1, autonegotiation begins. Otherwise,
+ software write requests are ignored and this bit remains at 0. This bit clears itself to
+ 0, when autonegotiation starts. */
+ uint64_t reserved_10 : 1;
+ uint64_t pwr_dn : 1; /**< [ 11: 11](R/W) Power down:
+ 0 = Normal operation.
+ 1 = Power down (hardware reset). */
+ uint64_t an_en : 1; /**< [ 12: 12](R/W) Autonegotiation enable. */
+ uint64_t spdlsb : 1; /**< [ 13: 13](R/W) Least-significant bit of the link-speed field, i.e. SPD\<0\>. Refer to SPDMSB. */
+ uint64_t loopbck1 : 1; /**< [ 14: 14](R/W) Enable loopback:
+ 0 = Normal operation.
+ 1 = Internal loopback mode.
+
+ The loopback mode returns loopback TX data from GMII TX back to GMII RX interface. The
+ loopback happens in the PCS module. Autonegotiation is disabled even if [AN_EN] is set
+ during loopback. */
+ uint64_t reset : 1; /**< [ 15: 15](R/W/H) Set to reset.
+ 0 = Normal operation.
+ 1 = Software PCS reset.
+
+ The bit returns to 0 after PCS has been reset. Takes 32 coprocessor-clock cycles to reset
+ PCS. This bit, when set, also drains the tx gmi fifo and can be used as a fifo draining
+ mechanism for both SerDes reset conditions and for XCV reset conditions. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_mrx_control_s cn81xx; */
+ struct bdk_bgxx_gmp_pcs_mrx_control_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t reset : 1; /**< [ 15: 15](R/W/H) Set to reset.
+ 0 = Normal operation.
+ 1 = Software PCS reset.
+
+ The bit returns to 0 after PCS has been reset. Takes 32 coprocessor-clock cycles to reset
+ PCS. This bit, when set, also drains the tx gmi fifo and can be used as a fifo draining
+ mechanism for both SerDes reset conditions and for XCV reset conditions. */
+ uint64_t loopbck1 : 1; /**< [ 14: 14](R/W) Enable loopback:
+ 0 = Normal operation.
+ 1 = Internal loopback mode.
+
+ The loopback mode returns loopback TX data from GMII TX back to GMII RX interface. The
+ loopback happens in the PCS module. Autonegotiation is disabled even if [AN_EN] is set
+ during loopback. */
+ uint64_t spdlsb : 1; /**< [ 13: 13](R/W) Least-significant bit of the link-speed field, i.e. SPD\<0\>. Refer to SPDMSB. */
+ uint64_t an_en : 1; /**< [ 12: 12](R/W) Autonegotiation enable. */
+ uint64_t pwr_dn : 1; /**< [ 11: 11](R/W) Power down:
+ 0 = Normal operation.
+ 1 = Power down (hardware reset). */
+ uint64_t reserved_10 : 1;
+ uint64_t rst_an : 1; /**< [ 9: 9](R/W/H) Reset autonegotiation. When set, if [AN_EN] = 1 and
+ BGX()_GMP_PCS_MR()_STATUS[AN_ABIL] = 1, autonegotiation begins. Otherwise,
+ software write requests are ignored and this bit remains at 0. This bit clears itself to
+ 0, when autonegotiation starts. */
+ uint64_t dup : 1; /**< [ 8: 8](R/W) Duplex mode:
+ 0 = half duplex; effective only if autonegotiation is disabled.
+ 1 = full duplex.
+
+ If BGX()_GMP_PCS_MR()_STATUS \<15:9\> and
+ BGX()_GMP_PCS_AN()_ADV\<15:12\> allow only one duplex mode, this bit corresponds to
+ that value and any attempts to write are ignored. */
+ uint64_t coltst : 1; /**< [ 7: 7](R/W) Enable COL signal test.
+ During COL test, the COL signal reflects the GMII TX_EN signal with less than 16BT delay. */
+ uint64_t spdmsb : 1; /**< [ 6: 6](R/W) Link speed most-significant bit, i.e SPD\<1\>; effective only if autonegotiation is
+ disabled.
+
+ \<pre\>
+ [SPDMSB] [SPDLSB] Link Speed
+ 0 0 10 Mb/s
+ 0 1 100 Mb/s
+ 1 0 1000 Mb/s
+ 1 1 reserved
+ \</pre\> */
+ uint64_t uni : 1; /**< [ 5: 5](R/W) Unidirectional (IEEE 802.3-2005, Clause 66.2). When set to 1, this bit overrides [AN_EN]
+ and
+ disables the autonegotiation variable mr_an_enable. Used in both 1000BASE-X and SGMII
+ modes. */
+ uint64_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_4 : 5;
+ uint64_t uni : 1; /**< [ 5: 5](R/W) Unidirectional (IEEE 802.3-2005, Clause 66.2). When set to 1, this bit overrides [AN_EN]
+ and
+ disables the autonegotiation variable mr_an_enable. Used in both 1000BASE-X and SGMII
+ modes. */
+ uint64_t spdmsb : 1; /**< [ 6: 6](R/W) Link speed most-significant bit, i.e SPD\<1\>; effective only if autonegotiation is
+ disabled.
+
+ \<pre\>
+ [SPDMSB] [SPDLSB] Link Speed
+ 0 0 10 Mb/s
+ 0 1 100 Mb/s
+ 1 0 1000 Mb/s
+ 1 1 reserved
+ \</pre\> */
+ uint64_t coltst : 1; /**< [ 7: 7](R/W) Enable COL signal test.
+ During COL test, the COL signal reflects the GMII TX_EN signal with less than 16BT delay. */
+ uint64_t dup : 1; /**< [ 8: 8](R/W) Duplex mode:
+ 0 = half duplex; effective only if autonegotiation is disabled.
+ 1 = full duplex.
+
+ If BGX()_GMP_PCS_MR()_STATUS \<15:9\> and
+ BGX()_GMP_PCS_AN()_ADV\<15:12\> allow only one duplex mode, this bit corresponds to
+ that value and any attempts to write are ignored. */
+ uint64_t rst_an : 1; /**< [ 9: 9](R/W/H) Reset autonegotiation. When set, if [AN_EN] = 1 and
+ BGX()_GMP_PCS_MR()_STATUS[AN_ABIL] = 1, autonegotiation begins. Otherwise,
+ software write requests are ignored and this bit remains at 0. This bit clears itself to
+ 0, when autonegotiation starts. */
+ uint64_t reserved_10 : 1;
+ uint64_t pwr_dn : 1; /**< [ 11: 11](R/W) Power down:
+ 0 = Normal operation.
+ 1 = Power down (hardware reset). */
+ uint64_t an_en : 1; /**< [ 12: 12](R/W) Autonegotiation enable. */
+ uint64_t spdlsb : 1; /**< [ 13: 13](R/W) Least-significant bit of the link-speed field, i.e. SPD\<0\>. Refer to SPDMSB. */
+ uint64_t loopbck1 : 1; /**< [ 14: 14](R/W) Enable loopback:
+ 0 = Normal operation.
+ 1 = Internal loopback mode.
+
+ The loopback mode returns loopback TX data from GMII TX back to GMII RX interface. The
+ loopback happens in the PCS module. Autonegotiation is disabled even if [AN_EN] is set
+ during loopback. */
+ uint64_t reset : 1; /**< [ 15: 15](R/W/H) Set to reset.
+ 0 = Normal operation.
+ 1 = Software PCS reset.
+
+ The bit returns to 0 after PCS has been reset. Takes 32 coprocessor-clock cycles to reset
+ PCS. This bit, when set, also drains the tx gmi fifo and can be used as a fifo draining
+ mechanism for both SerDes reset conditions and for XCV reset conditions. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_gmp_pcs_mrx_control_s cn83xx; */
+};
+typedef union bdk_bgxx_gmp_pcs_mrx_control bdk_bgxx_gmp_pcs_mrx_control_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_MRX_CONTROL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_MRX_CONTROL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030000ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030000ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030000ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_MRX_CONTROL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_MRX_CONTROL(a,b) bdk_bgxx_gmp_pcs_mrx_control_t
+#define bustype_BDK_BGXX_GMP_PCS_MRX_CONTROL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_MRX_CONTROL(a,b) "BGXX_GMP_PCS_MRX_CONTROL"
+#define device_bar_BDK_BGXX_GMP_PCS_MRX_CONTROL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_MRX_CONTROL(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_MRX_CONTROL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_mr#_status
+ *
+ * BGX GMP PCS Status Registers
+ * Bits \<15:9\> in this register indicate the ability to operate when
+ * BGX()_GMP_PCS_MISC()_CTL[MAC_PHY] is set to MAC mode. Bits \<15:9\> are always read as
+ * 0, indicating that the chip cannot operate in the corresponding modes. The field [RM_FLT] is a
+ * 'don't care' when the selected mode is SGMII.
+ */
+union bdk_bgxx_gmp_pcs_mrx_status
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_mrx_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t hun_t4 : 1; /**< [ 15: 15](RO/H) Indicates 100BASE-T4 capable. */
+ uint64_t hun_xfd : 1; /**< [ 14: 14](RO/H) Indicates 100BASE-X full duplex. */
+ uint64_t hun_xhd : 1; /**< [ 13: 13](RO/H) Indicates 100BASE-X half duplex. */
+ uint64_t ten_fd : 1; /**< [ 12: 12](RO/H) Indicates 10Mb/s full duplex. */
+ uint64_t ten_hd : 1; /**< [ 11: 11](RO/H) Indicates 10Mb/s half duplex. */
+ uint64_t hun_t2fd : 1; /**< [ 10: 10](RO/H) Indicates 100BASE-T2 full duplex. */
+ uint64_t hun_t2hd : 1; /**< [ 9: 9](RO/H) Indicates 100BASE-T2 half duplex. */
+ uint64_t ext_st : 1; /**< [ 8: 8](RO/H) Extended status information. When set to 1, indicates that additional status data is
+ available in BGX()_GMP_PCS_AN()_EXT_ST. */
+ uint64_t reserved_7 : 1;
+ uint64_t prb_sup : 1; /**< [ 6: 6](RO/H) Preamble not needed.
+ 0 = Cannot accept frames without preamble bytes.
+ 1 = Can work without preamble bytes at the beginning of frames. */
+ uint64_t an_cpt : 1; /**< [ 5: 5](RO/H) Indicates autonegotiation is complete; the contents of the
+ BGX()_GMP_PCS_AN()_RESULTS are valid. */
+ uint64_t rm_flt : 1; /**< [ 4: 4](RO/H) Indicates remote fault condition occurred. This bit implements a latching-high behavior.
+ It is cleared when software reads this register or when
+ BGX()_GMP_PCS_MR()_CONTROL[RESET] is asserted.
+ See BGX()_GMP_PCS_AN()_ADV[REM_FLT] for fault conditions. */
+ uint64_t an_abil : 1; /**< [ 3: 3](RO/H) Indicates autonegotiation capable. */
+ uint64_t lnk_st : 1; /**< [ 2: 2](RO/H) Link state:
+ 0 = link down.
+ 1 = link up.
+
+ Set during autonegotiation process. Set whenever XMIT = DATA. Latching-low behavior when
+ link goes down. Link down value of the bit stays low until software reads the register. */
+ uint64_t reserved_1 : 1;
+ uint64_t extnd : 1; /**< [ 0: 0](RO/H) This field is always 0, extended capability registers not present. */
+#else /* Word 0 - Little Endian */
+ uint64_t extnd : 1; /**< [ 0: 0](RO/H) This field is always 0, extended capability registers not present. */
+ uint64_t reserved_1 : 1;
+ uint64_t lnk_st : 1; /**< [ 2: 2](RO/H) Link state:
+ 0 = link down.
+ 1 = link up.
+
+ Set during autonegotiation process. Set whenever XMIT = DATA. Latching-low behavior when
+ link goes down. Link down value of the bit stays low until software reads the register. */
+ uint64_t an_abil : 1; /**< [ 3: 3](RO/H) Indicates autonegotiation capable. */
+ uint64_t rm_flt : 1; /**< [ 4: 4](RO/H) Indicates remote fault condition occurred. This bit implements a latching-high behavior.
+ It is cleared when software reads this register or when
+ BGX()_GMP_PCS_MR()_CONTROL[RESET] is asserted.
+ See BGX()_GMP_PCS_AN()_ADV[REM_FLT] for fault conditions. */
+ uint64_t an_cpt : 1; /**< [ 5: 5](RO/H) Indicates autonegotiation is complete; the contents of the
+ BGX()_GMP_PCS_AN()_RESULTS are valid. */
+ uint64_t prb_sup : 1; /**< [ 6: 6](RO/H) Preamble not needed.
+ 0 = Cannot accept frames without preamble bytes.
+ 1 = Can work without preamble bytes at the beginning of frames. */
+ uint64_t reserved_7 : 1;
+ uint64_t ext_st : 1; /**< [ 8: 8](RO/H) Extended status information. When set to 1, indicates that additional status data is
+ available in BGX()_GMP_PCS_AN()_EXT_ST. */
+ uint64_t hun_t2hd : 1; /**< [ 9: 9](RO/H) Indicates 100BASE-T2 half duplex. */
+ uint64_t hun_t2fd : 1; /**< [ 10: 10](RO/H) Indicates 100BASE-T2 full duplex. */
+ uint64_t ten_hd : 1; /**< [ 11: 11](RO/H) Indicates 10Mb/s half duplex. */
+ uint64_t ten_fd : 1; /**< [ 12: 12](RO/H) Indicates 10Mb/s full duplex. */
+ uint64_t hun_xhd : 1; /**< [ 13: 13](RO/H) Indicates 100BASE-X half duplex. */
+ uint64_t hun_xfd : 1; /**< [ 14: 14](RO/H) Indicates 100BASE-X full duplex. */
+ uint64_t hun_t4 : 1; /**< [ 15: 15](RO/H) Indicates 100BASE-T4 capable. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_mrx_status_s cn; */
+};
+typedef union bdk_bgxx_gmp_pcs_mrx_status bdk_bgxx_gmp_pcs_mrx_status_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_MRX_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_MRX_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030008ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030008ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030008ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_MRX_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_MRX_STATUS(a,b) bdk_bgxx_gmp_pcs_mrx_status_t
+#define bustype_BDK_BGXX_GMP_PCS_MRX_STATUS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_MRX_STATUS(a,b) "BGXX_GMP_PCS_MRX_STATUS"
+#define device_bar_BDK_BGXX_GMP_PCS_MRX_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_MRX_STATUS(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_MRX_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_rx#_states
+ *
+ * BGX GMP PCS RX State-Machines States Registers
+ */
+union bdk_bgxx_gmp_pcs_rxx_states
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_rxx_states_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t rx_bad : 1; /**< [ 15: 15](RO/H) Receive state machine is in an illegal state. */
+ uint64_t rx_st : 5; /**< [ 14: 10](RO/H) Receive state-machine state. */
+ uint64_t sync_bad : 1; /**< [ 9: 9](RO/H) Receive synchronization state machine is in an illegal state. */
+ uint64_t sync : 4; /**< [ 8: 5](RO/H) Receive synchronization state-machine state. */
+ uint64_t an_bad : 1; /**< [ 4: 4](RO/H) Autonegotiation state machine is in an illegal state. */
+ uint64_t an_st : 4; /**< [ 3: 0](RO/H) Autonegotiation state-machine state. */
+#else /* Word 0 - Little Endian */
+ uint64_t an_st : 4; /**< [ 3: 0](RO/H) Autonegotiation state-machine state. */
+ uint64_t an_bad : 1; /**< [ 4: 4](RO/H) Autonegotiation state machine is in an illegal state. */
+ uint64_t sync : 4; /**< [ 8: 5](RO/H) Receive synchronization state-machine state. */
+ uint64_t sync_bad : 1; /**< [ 9: 9](RO/H) Receive synchronization state machine is in an illegal state. */
+ uint64_t rx_st : 5; /**< [ 14: 10](RO/H) Receive state-machine state. */
+ uint64_t rx_bad : 1; /**< [ 15: 15](RO/H) Receive state machine is in an illegal state. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_rxx_states_s cn; */
+};
+typedef union bdk_bgxx_gmp_pcs_rxx_states bdk_bgxx_gmp_pcs_rxx_states_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_RXX_STATES(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_RXX_STATES(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030058ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030058ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030058ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_RXX_STATES", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_RXX_STATES(a,b) bdk_bgxx_gmp_pcs_rxx_states_t
+#define bustype_BDK_BGXX_GMP_PCS_RXX_STATES(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_RXX_STATES(a,b) "BGXX_GMP_PCS_RXX_STATES"
+#define device_bar_BDK_BGXX_GMP_PCS_RXX_STATES(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_RXX_STATES(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_RXX_STATES(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_rx#_sync
+ *
+ * BGX GMP PCS Code Group Synchronization Registers
+ */
+union bdk_bgxx_gmp_pcs_rxx_sync
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_rxx_sync_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t sync : 1; /**< [ 1: 1](RO/H) When set, code group synchronization achieved. */
+ uint64_t bit_lock : 1; /**< [ 0: 0](RO/H) When set, bit lock achieved. */
+#else /* Word 0 - Little Endian */
+ uint64_t bit_lock : 1; /**< [ 0: 0](RO/H) When set, bit lock achieved. */
+ uint64_t sync : 1; /**< [ 1: 1](RO/H) When set, code group synchronization achieved. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_rxx_sync_s cn; */
+};
+typedef union bdk_bgxx_gmp_pcs_rxx_sync bdk_bgxx_gmp_pcs_rxx_sync_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_RXX_SYNC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_RXX_SYNC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030050ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030050ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030050ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_RXX_SYNC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_RXX_SYNC(a,b) bdk_bgxx_gmp_pcs_rxx_sync_t
+#define bustype_BDK_BGXX_GMP_PCS_RXX_SYNC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_RXX_SYNC(a,b) "BGXX_GMP_PCS_RXX_SYNC"
+#define device_bar_BDK_BGXX_GMP_PCS_RXX_SYNC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_RXX_SYNC(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_RXX_SYNC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_sgm#_an_adv
+ *
+ * BGX GMP PCS SGMII Autonegotiation Advertisement Registers
+ * This is the SGMII autonegotiation advertisement register (sent out as tx_Config_Reg\<15:0\> as
+ * defined in IEEE 802.3 clause 37). This register is sent during autonegotiation if
+ * BGX()_GMP_PCS_MISC()_CTL[MAC_PHY] is set (1 = PHY mode). If the bit is not set (0 =
+ * MAC mode), then tx_Config_Reg\<14\> becomes ACK bit and tx_Config_Reg\<0\> is always 1. All other
+ * bits in tx_Config_Reg sent will be 0. The PHY dictates the autonegotiation results.
+ */
+union bdk_bgxx_gmp_pcs_sgmx_an_adv
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_sgmx_an_adv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t link : 1; /**< [ 15: 15](R/W) Link status: 1 = Link up. 0 = Link down. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) Autonegotiation acknowledgement. */
+ uint64_t reserved_13 : 1;
+ uint64_t dup : 1; /**< [ 12: 12](R/W) Duplex mode: 1 = full duplex, 0 = half duplex. */
+ uint64_t speed : 2; /**< [ 11: 10](R/W) Link speed:
+ 0x0 = 10 Mb/s.
+ 0x1 = 100 Mb/s.
+ 0x2 = 1000 Mb/s.
+ 0x3 = Reserved. */
+ uint64_t reserved_1_9 : 9;
+ uint64_t one : 1; /**< [ 0: 0](RO/H) Always set to match tx_Config_Reg\<0\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t one : 1; /**< [ 0: 0](RO/H) Always set to match tx_Config_Reg\<0\>. */
+ uint64_t reserved_1_9 : 9;
+ uint64_t speed : 2; /**< [ 11: 10](R/W) Link speed:
+ 0x0 = 10 Mb/s.
+ 0x1 = 100 Mb/s.
+ 0x2 = 1000 Mb/s.
+ 0x3 = Reserved. */
+ uint64_t dup : 1; /**< [ 12: 12](R/W) Duplex mode: 1 = full duplex, 0 = half duplex. */
+ uint64_t reserved_13 : 1;
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) Autonegotiation acknowledgement. */
+ uint64_t link : 1; /**< [ 15: 15](R/W) Link status: 1 = Link up. 0 = Link down. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_sgmx_an_adv_s cn; */
+};
+typedef union bdk_bgxx_gmp_pcs_sgmx_an_adv bdk_bgxx_gmp_pcs_sgmx_an_adv_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_SGMX_AN_ADV(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_SGMX_AN_ADV(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030068ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030068ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030068ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_SGMX_AN_ADV", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_SGMX_AN_ADV(a,b) bdk_bgxx_gmp_pcs_sgmx_an_adv_t
+#define bustype_BDK_BGXX_GMP_PCS_SGMX_AN_ADV(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_SGMX_AN_ADV(a,b) "BGXX_GMP_PCS_SGMX_AN_ADV"
+#define device_bar_BDK_BGXX_GMP_PCS_SGMX_AN_ADV(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_SGMX_AN_ADV(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_SGMX_AN_ADV(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_sgm#_lp_adv
+ *
+ * BGX GMP PCS SGMII Link-Partner-Advertisement Registers
+ * This is the SGMII link partner advertisement register (received as rx_Config_Reg\<15:0\> as
+ * defined in IEEE 802.3 clause 37).
+ */
+union bdk_bgxx_gmp_pcs_sgmx_lp_adv
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_sgmx_lp_adv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t link : 1; /**< [ 15: 15](RO/H) Link status: 1 = Link up. 0 = Link down. */
+ uint64_t reserved_13_14 : 2;
+ uint64_t dup : 1; /**< [ 12: 12](RO/H) Duplex mode: 1 = Full duplex, 0 = Half duplex. */
+ uint64_t speed : 2; /**< [ 11: 10](RO/H) Link speed:
+ 0x0 = 10 Mb/s.
+ 0x1 = 100 Mb/s.
+ 0x2 = 1000 Mb/s.
+ 0x3 = Reserved. */
+ uint64_t reserved_1_9 : 9;
+ uint64_t one : 1; /**< [ 0: 0](RO/H) Always set to match tx_Config_Reg\<0\> */
+#else /* Word 0 - Little Endian */
+ uint64_t one : 1; /**< [ 0: 0](RO/H) Always set to match tx_Config_Reg\<0\> */
+ uint64_t reserved_1_9 : 9;
+ uint64_t speed : 2; /**< [ 11: 10](RO/H) Link speed:
+ 0x0 = 10 Mb/s.
+ 0x1 = 100 Mb/s.
+ 0x2 = 1000 Mb/s.
+ 0x3 = Reserved. */
+ uint64_t dup : 1; /**< [ 12: 12](RO/H) Duplex mode: 1 = Full duplex, 0 = Half duplex. */
+ uint64_t reserved_13_14 : 2;
+ uint64_t link : 1; /**< [ 15: 15](RO/H) Link status: 1 = Link up. 0 = Link down. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_gmp_pcs_sgmx_lp_adv_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t link : 1; /**< [ 15: 15](RO/H) Link status: 1 = Link up. 0 = Link down. */
+ uint64_t reserved_14 : 1;
+ uint64_t reserved_13 : 1;
+ uint64_t dup : 1; /**< [ 12: 12](RO/H) Duplex mode: 1 = Full duplex, 0 = Half duplex. */
+ uint64_t speed : 2; /**< [ 11: 10](RO/H) Link speed:
+ 0x0 = 10 Mb/s.
+ 0x1 = 100 Mb/s.
+ 0x2 = 1000 Mb/s.
+ 0x3 = Reserved. */
+ uint64_t reserved_1_9 : 9;
+ uint64_t one : 1; /**< [ 0: 0](RO/H) Always set to match tx_Config_Reg\<0\> */
+#else /* Word 0 - Little Endian */
+ uint64_t one : 1; /**< [ 0: 0](RO/H) Always set to match tx_Config_Reg\<0\> */
+ uint64_t reserved_1_9 : 9;
+ uint64_t speed : 2; /**< [ 11: 10](RO/H) Link speed:
+ 0x0 = 10 Mb/s.
+ 0x1 = 100 Mb/s.
+ 0x2 = 1000 Mb/s.
+ 0x3 = Reserved. */
+ uint64_t dup : 1; /**< [ 12: 12](RO/H) Duplex mode: 1 = Full duplex, 0 = Half duplex. */
+ uint64_t reserved_13 : 1;
+ uint64_t reserved_14 : 1;
+ uint64_t link : 1; /**< [ 15: 15](RO/H) Link status: 1 = Link up. 0 = Link down. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_bgxx_gmp_pcs_sgmx_lp_adv bdk_bgxx_gmp_pcs_sgmx_lp_adv_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_SGMX_LP_ADV(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_SGMX_LP_ADV(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030070ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030070ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030070ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_SGMX_LP_ADV", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_SGMX_LP_ADV(a,b) bdk_bgxx_gmp_pcs_sgmx_lp_adv_t
+#define bustype_BDK_BGXX_GMP_PCS_SGMX_LP_ADV(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_SGMX_LP_ADV(a,b) "BGXX_GMP_PCS_SGMX_LP_ADV"
+#define device_bar_BDK_BGXX_GMP_PCS_SGMX_LP_ADV(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_SGMX_LP_ADV(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_SGMX_LP_ADV(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_tx#_states
+ *
+ * BGX GMP PCS TX State-Machines States Registers
+ */
+union bdk_bgxx_gmp_pcs_txx_states
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_txx_states_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t xmit : 2; /**< [ 6: 5](RO/H) 0x0 = Undefined.
+ 0x1 = Config.
+ 0x2 = Idle.
+ 0x3 = Data. */
+ uint64_t tx_bad : 1; /**< [ 4: 4](RO/H) Transmit state machine in an illegal state. */
+ uint64_t ord_st : 4; /**< [ 3: 0](RO/H) Transmit ordered set state-machine state. */
+#else /* Word 0 - Little Endian */
+ uint64_t ord_st : 4; /**< [ 3: 0](RO/H) Transmit ordered set state-machine state. */
+ uint64_t tx_bad : 1; /**< [ 4: 4](RO/H) Transmit state machine in an illegal state. */
+ uint64_t xmit : 2; /**< [ 6: 5](RO/H) 0x0 = Undefined.
+ 0x1 = Config.
+ 0x2 = Idle.
+ 0x3 = Data. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_txx_states_s cn; */
+};
+typedef union bdk_bgxx_gmp_pcs_txx_states bdk_bgxx_gmp_pcs_txx_states_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_TXX_STATES(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_TXX_STATES(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030060ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030060ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030060ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_TXX_STATES", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_TXX_STATES(a,b) bdk_bgxx_gmp_pcs_txx_states_t
+#define bustype_BDK_BGXX_GMP_PCS_TXX_STATES(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_TXX_STATES(a,b) "BGXX_GMP_PCS_TXX_STATES"
+#define device_bar_BDK_BGXX_GMP_PCS_TXX_STATES(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_TXX_STATES(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_TXX_STATES(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_gmp_pcs_tx_rx#_polarity
+ *
+ * BGX GMP PCS TX/RX Polarity Registers
+ * BGX()_GMP_PCS_TX_RX()_POLARITY[AUTORXPL] shows correct polarity needed on the link
+ * receive path after code group synchronization is achieved.
+ */
+union bdk_bgxx_gmp_pcs_tx_rxx_polarity
+{
+ uint64_t u;
+ struct bdk_bgxx_gmp_pcs_tx_rxx_polarity_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t rxovrd : 1; /**< [ 3: 3](R/W) RX polarity override.
+ 0 = AUTORXPL determines polarity.
+ 1 = [RXPLRT] determines polarity. */
+ uint64_t autorxpl : 1; /**< [ 2: 2](RO/H) Auto RX polarity detected:
+ 0 = Normal polarity.
+ 1 = Inverted polarity.
+
+ This bit always represents the correct RX polarity setting needed for successful RX path
+ operation, once a successful code group sync is obtained. */
+ uint64_t rxplrt : 1; /**< [ 1: 1](R/W) RX polarity: 0 = Normal polarity, 1 = Inverted polarity. */
+ uint64_t txplrt : 1; /**< [ 0: 0](R/W) TX polarity: 0 = Normal polarity, 1 = Inverted polarity. */
+#else /* Word 0 - Little Endian */
+ uint64_t txplrt : 1; /**< [ 0: 0](R/W) TX polarity: 0 = Normal polarity, 1 = Inverted polarity. */
+ uint64_t rxplrt : 1; /**< [ 1: 1](R/W) RX polarity: 0 = Normal polarity, 1 = Inverted polarity. */
+ uint64_t autorxpl : 1; /**< [ 2: 2](RO/H) Auto RX polarity detected:
+ 0 = Normal polarity.
+ 1 = Inverted polarity.
+
+ This bit always represents the correct RX polarity setting needed for successful RX path
+ operation, once a successful code group sync is obtained. */
+ uint64_t rxovrd : 1; /**< [ 3: 3](R/W) RX polarity override.
+ 0 = AUTORXPL determines polarity.
+ 1 = [RXPLRT] determines polarity. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_gmp_pcs_tx_rxx_polarity_s cn; */
+};
+typedef union bdk_bgxx_gmp_pcs_tx_rxx_polarity bdk_bgxx_gmp_pcs_tx_rxx_polarity_t;
+
+static inline uint64_t BDK_BGXX_GMP_PCS_TX_RXX_POLARITY(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_GMP_PCS_TX_RXX_POLARITY(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030048ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0030048ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0030048ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_GMP_PCS_TX_RXX_POLARITY", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_GMP_PCS_TX_RXX_POLARITY(a,b) bdk_bgxx_gmp_pcs_tx_rxx_polarity_t
+#define bustype_BDK_BGXX_GMP_PCS_TX_RXX_POLARITY(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_GMP_PCS_TX_RXX_POLARITY(a,b) "BGXX_GMP_PCS_TX_RXX_POLARITY"
+#define device_bar_BDK_BGXX_GMP_PCS_TX_RXX_POLARITY(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_GMP_PCS_TX_RXX_POLARITY(a,b) (a)
+#define arguments_BDK_BGXX_GMP_PCS_TX_RXX_POLARITY(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_msix_pba#
+ *
+ * BGX MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table, the bit number is indexed by the BGX_INT_VEC_E
+ * enumeration.
+ */
+union bdk_bgxx_msix_pbax
+{
+ uint64_t u;
+ struct bdk_bgxx_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated BGX()_MSIX_VEC()_CTL, enumerated by BGX_INT_VEC_E.
+ Bits that have no associated BGX_INT_VEC_E are zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated BGX()_MSIX_VEC()_CTL, enumerated by BGX_INT_VEC_E.
+ Bits that have no associated BGX_INT_VEC_E are zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_msix_pbax_s cn; */
+};
+typedef union bdk_bgxx_msix_pbax bdk_bgxx_msix_pbax_t;
+
+static inline uint64_t BDK_BGXX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x87e0e04f0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b==0)))
+ return 0x87e0e04f0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x87e0e04f0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("BGXX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_MSIX_PBAX(a,b) bdk_bgxx_msix_pbax_t
+#define bustype_BDK_BGXX_MSIX_PBAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_MSIX_PBAX(a,b) "BGXX_MSIX_PBAX"
+#define device_bar_BDK_BGXX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_BGXX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_BGXX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_msix_vec#_addr
+ *
+ * BGX MSI-X Vector Table Address Registers
+ * This register is the MSI-X vector table, indexed by the BGX_INT_VEC_E enumeration.
+ */
+union bdk_bgxx_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_bgxx_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's BGX()_MSIX_VEC()_ADDR, BGX()_MSIX_VEC()_CTL, and corresponding
+ bit of BGX()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_BGX()_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's BGX()_MSIX_VEC()_ADDR, BGX()_MSIX_VEC()_CTL, and corresponding
+ bit of BGX()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_BGX()_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_msix_vecx_addr_s cn; */
+};
+typedef union bdk_bgxx_msix_vecx_addr bdk_bgxx_msix_vecx_addr_t;
+
+static inline uint64_t BDK_BGXX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=29)))
+ return 0x87e0e0400000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=29)))
+ return 0x87e0e0400000ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=29)))
+ return 0x87e0e0400000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1f);
+ __bdk_csr_fatal("BGXX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_MSIX_VECX_ADDR(a,b) bdk_bgxx_msix_vecx_addr_t
+#define bustype_BDK_BGXX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_MSIX_VECX_ADDR(a,b) "BGXX_MSIX_VECX_ADDR"
+#define device_bar_BDK_BGXX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_BGXX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_BGXX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_msix_vec#_ctl
+ *
+ * BGX MSI-X Vector Table Control and Data Registers
+ * This register is the MSI-X vector table, indexed by the BGX_INT_VEC_E enumeration.
+ */
+union bdk_bgxx_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_msix_vecx_ctl_s cn; */
+};
+typedef union bdk_bgxx_msix_vecx_ctl bdk_bgxx_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_BGXX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=29)))
+ return 0x87e0e0400008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=29)))
+ return 0x87e0e0400008ll + 0x1000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=29)))
+ return 0x87e0e0400008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1f);
+ __bdk_csr_fatal("BGXX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_MSIX_VECX_CTL(a,b) bdk_bgxx_msix_vecx_ctl_t
+#define bustype_BDK_BGXX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_MSIX_VECX_CTL(a,b) "BGXX_MSIX_VECX_CTL"
+#define device_bar_BDK_BGXX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_BGXX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_BGXX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_cbfc_ctl
+ *
+ * BGX SMU PFC Control Registers
+ * Internal:
+ * INTERNAL: XOFF for a specific class/channel \<i\> is XOFF\<i\> = ([PHYS_EN]\<i\> & cmr_rx_phys_bp) |
+ * ([LOGL_EN]\<i\> & cmr_rx_logl_xoff\<i\>).
+ */
+union bdk_bgxx_smux_cbfc_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_cbfc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t phys_en : 16; /**< [ 63: 48](R/W) Physical backpressure enable. Determines which classes/channels in the class enable vector
+ field of a transmitted PFC packet can be asserted due to RX physical backpressure. */
+ uint64_t logl_en : 16; /**< [ 47: 32](R/W) Logical backpressure enable. Determines which classes/channels in the class enable vector
+ field of a transmitted PFC packet can be asserted due to RX logical backpressure. */
+ uint64_t reserved_4_31 : 28;
+ uint64_t bck_en : 1; /**< [ 3: 3](R/W) Forward PFC information to the backpressure block. */
+ uint64_t drp_en : 1; /**< [ 2: 2](R/W) Drop-control enable. When set, drop PFC frames. */
+ uint64_t tx_en : 1; /**< [ 1: 1](R/W) Transmit enable. When set, allow for PFC packets. Must be clear in HiGig2 mode
+ i.e. when BGX()_SMU()_TX_CTL[HG_EN] = 1 and BGX()_SMU()_RX_UDD_SKP[LEN] =
+ 16. */
+ uint64_t rx_en : 1; /**< [ 0: 0](R/W) Receive enable. When set, allow for PFC packets. Must be clear in HiGig2 mode
+ i.e. when BGX()_SMU()_TX_CTL[HG_EN] = 1 and BGX()_SMU()_RX_UDD_SKP[LEN] =
+ 16. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_en : 1; /**< [ 0: 0](R/W) Receive enable. When set, allow for PFC packets. Must be clear in HiGig2 mode
+ i.e. when BGX()_SMU()_TX_CTL[HG_EN] = 1 and BGX()_SMU()_RX_UDD_SKP[LEN] =
+ 16. */
+ uint64_t tx_en : 1; /**< [ 1: 1](R/W) Transmit enable. When set, allow for PFC packets. Must be clear in HiGig2 mode
+ i.e. when BGX()_SMU()_TX_CTL[HG_EN] = 1 and BGX()_SMU()_RX_UDD_SKP[LEN] =
+ 16. */
+ uint64_t drp_en : 1; /**< [ 2: 2](R/W) Drop-control enable. When set, drop PFC frames. */
+ uint64_t bck_en : 1; /**< [ 3: 3](R/W) Forward PFC information to the backpressure block. */
+ uint64_t reserved_4_31 : 28;
+ uint64_t logl_en : 16; /**< [ 47: 32](R/W) Logical backpressure enable. Determines which classes/channels in the class enable vector
+ field of a transmitted PFC packet can be asserted due to RX logical backpressure. */
+ uint64_t phys_en : 16; /**< [ 63: 48](R/W) Physical backpressure enable. Determines which classes/channels in the class enable vector
+ field of a transmitted PFC packet can be asserted due to RX physical backpressure. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_cbfc_ctl_s cn; */
+};
+typedef union bdk_bgxx_smux_cbfc_ctl bdk_bgxx_smux_cbfc_ctl_t;
+
+static inline uint64_t BDK_BGXX_SMUX_CBFC_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_CBFC_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020218ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020218ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020218ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_CBFC_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_CBFC_CTL(a,b) bdk_bgxx_smux_cbfc_ctl_t
+#define bustype_BDK_BGXX_SMUX_CBFC_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_CBFC_CTL(a,b) "BGXX_SMUX_CBFC_CTL"
+#define device_bar_BDK_BGXX_SMUX_CBFC_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_CBFC_CTL(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_CBFC_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_ctrl
+ *
+ * BGX SMU Control Registers
+ */
+union bdk_bgxx_smux_ctrl
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t tx_idle : 1; /**< [ 1: 1](RO/H) TX machine is idle. This indication pertains to the framer FSM and ignores the effects on
+ the data-path controls or values which occur when BGX()_SMU()_TX_CTL[LS_BYP] is
+ set. */
+ uint64_t rx_idle : 1; /**< [ 0: 0](RO/H) RX machine is idle. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_idle : 1; /**< [ 0: 0](RO/H) RX machine is idle. */
+ uint64_t tx_idle : 1; /**< [ 1: 1](RO/H) TX machine is idle. This indication pertains to the framer FSM and ignores the effects on
+ the data-path controls or values which occur when BGX()_SMU()_TX_CTL[LS_BYP] is
+ set. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_ctrl_s cn; */
+};
+typedef union bdk_bgxx_smux_ctrl bdk_bgxx_smux_ctrl_t;
+
+static inline uint64_t BDK_BGXX_SMUX_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020200ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020200ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020200ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_CTRL(a,b) bdk_bgxx_smux_ctrl_t
+#define bustype_BDK_BGXX_SMUX_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_CTRL(a,b) "BGXX_SMUX_CTRL"
+#define device_bar_BDK_BGXX_SMUX_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_CTRL(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_ext_loopback
+ *
+ * BGX SMU External Loopback Registers
+ * In loopback mode, the IFG1+IFG2 of local and remote parties must match exactly; otherwise one
+ * of the two sides' loopback FIFO will overrun: BGX()_SMU()_TX_INT[LB_OVRFLW].
+ */
+union bdk_bgxx_smux_ext_loopback
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_ext_loopback_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t en : 1; /**< [ 4: 4](R/W) Loopback enable. Puts the packet interface in external loopback mode where the RX lines
+ are reflected on the TX lines. */
+ uint64_t thresh : 4; /**< [ 3: 0](R/W) Threshold on the TX FIFO. Software must only write the typical value. Any other value
+ causes loopback mode not to function correctly. */
+#else /* Word 0 - Little Endian */
+ uint64_t thresh : 4; /**< [ 3: 0](R/W) Threshold on the TX FIFO. Software must only write the typical value. Any other value
+ causes loopback mode not to function correctly. */
+ uint64_t en : 1; /**< [ 4: 4](R/W) Loopback enable. Puts the packet interface in external loopback mode where the RX lines
+ are reflected on the TX lines. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_ext_loopback_s cn; */
+};
+typedef union bdk_bgxx_smux_ext_loopback bdk_bgxx_smux_ext_loopback_t;
+
+static inline uint64_t BDK_BGXX_SMUX_EXT_LOOPBACK(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_EXT_LOOPBACK(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020208ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020208ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020208ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_EXT_LOOPBACK", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_EXT_LOOPBACK(a,b) bdk_bgxx_smux_ext_loopback_t
+#define bustype_BDK_BGXX_SMUX_EXT_LOOPBACK(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_EXT_LOOPBACK(a,b) "BGXX_SMUX_EXT_LOOPBACK"
+#define device_bar_BDK_BGXX_SMUX_EXT_LOOPBACK(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_EXT_LOOPBACK(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_EXT_LOOPBACK(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_hg2_control
+ *
+ * BGX SMU HiGig2 Control Registers
+ * HiGig2 TX- and RX-enable are normally set together for HiGig2 messaging. Setting just the TX
+ * or RX bit results in only the HG2 message transmit or receive capability.
+ *
+ * Setting [PHYS_EN] and [LOGL_EN] to 1 allows link PAUSE or backpressure to TNS/NIC as per the
+ * received HiGig2 message. Setting these fields to 0 disables link PAUSE and backpressure to
+ * TNS/NIC
+ * in response to received messages.
+ *
+ * BGX()_SMU()_TX_CTL[HG_EN] must be set (to enable HiGig) whenever either [HG2TX_EN] or
+ * [HG2RX_EN] are set. BGX()_SMU()_RX_UDD_SKP[LEN] must be set to 16 (to select HiGig2)
+ * whenever either [HG2TX_EN] or [HG2RX_EN] are set.
+ *
+ * BGX()_CMR_RX_OVR_BP[EN]\<0\> must be set and BGX()_CMR_RX_OVR_BP[BP]\<0\> must be cleared
+ * to 0 (to forcibly disable hardware-automatic 802.3 PAUSE packet generation) with the HiGig2
+ * Protocol when [HG2TX_EN] = 0. (The HiGig2 protocol is indicated
+ * by BGX()_SMU()_TX_CTL[HG_EN] = 1 and BGX()_SMU()_RX_UDD_SKP[LEN]=16.) Hardware
+ * can only autogenerate backpressure via HiGig2 messages (optionally, when [HG2TX_EN] = 1) with
+ * the HiGig2 protocol.
+ */
+union bdk_bgxx_smux_hg2_control
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_hg2_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_19_63 : 45;
+ uint64_t hg2tx_en : 1; /**< [ 18: 18](R/W) Enable transmission of HG2 physical and logical messages. When set, also disables hardware
+ autogenerated 802.3 PAUSE and PFC frames. (CNXXXX cannot generate proper 802.3 or
+ PFC frames in HiGig2 mode.) */
+ uint64_t hg2rx_en : 1; /**< [ 17: 17](R/W) Enable extraction and processing of HG2 message packet from RX flow. Physical and logical
+ PAUSE information is used to PAUSE physical-link, backpressure PKO. This field must be set
+ when HiGig2 messages are present in the receive stream. This bit is also forwarded to CMR
+ so it can generate the required deferring signals to SMU TX and backpressure signals to
+ PKO. */
+ uint64_t phys_en : 1; /**< [ 16: 16](R/W) Physical-link backpressure enable for received physical HiGig2 messages. This bit enables
+ the SMU TX to CMR HG2 deferring counter to be set every time SMU RX receives and filters
+ out a valid physical HG2 message. */
+ uint64_t logl_en : 16; /**< [ 15: 0](R/W) 16-bit logical-link backpressure enables for received HiGig2 messages or PFC packets. */
+#else /* Word 0 - Little Endian */
+ uint64_t logl_en : 16; /**< [ 15: 0](R/W) 16-bit logical-link backpressure enables for received HiGig2 messages or PFC packets. */
+ uint64_t phys_en : 1; /**< [ 16: 16](R/W) Physical-link backpressure enable for received physical HiGig2 messages. This bit enables
+ the SMU TX to CMR HG2 deferring counter to be set every time SMU RX receives and filters
+ out a valid physical HG2 message. */
+ uint64_t hg2rx_en : 1; /**< [ 17: 17](R/W) Enable extraction and processing of HG2 message packet from RX flow. Physical and logical
+ PAUSE information is used to PAUSE physical-link, backpressure PKO. This field must be set
+ when HiGig2 messages are present in the receive stream. This bit is also forwarded to CMR
+ so it can generate the required deferring signals to SMU TX and backpressure signals to
+ PKO. */
+ uint64_t hg2tx_en : 1; /**< [ 18: 18](R/W) Enable transmission of HG2 physical and logical messages. When set, also disables hardware
+ autogenerated 802.3 PAUSE and PFC frames. (CNXXXX cannot generate proper 802.3 or
+ PFC frames in HiGig2 mode.) */
+ uint64_t reserved_19_63 : 45;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_hg2_control_s cn81xx; */
+ struct bdk_bgxx_smux_hg2_control_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_19_63 : 45;
+ uint64_t hg2tx_en : 1; /**< [ 18: 18](R/W) Enable transmission of HG2 physical and logical messages. When set, also disables hardware
+ autogenerated 802.3 PAUSE and PFC frames. (CNXXXX cannot generate proper 802.3 or
+ PFC frames in HiGig2 mode.) */
+ uint64_t hg2rx_en : 1; /**< [ 17: 17](R/W) Enable extraction and processing of HG2 message packet from RX flow. Physical and logical
+ PAUSE information is used to PAUSE physical-link, backpressure TNS/NIC. This field must be
+ set
+ when HiGig2 messages are present in the receive stream. This bit is also forwarded to CMR
+ so it can generate the required deferring signals to SMU TX and backpressure signals to
+ TNS/NIC. */
+ uint64_t phys_en : 1; /**< [ 16: 16](R/W) Physical-link backpressure enable for received physical HiGig2 messages. This bit enables
+ the SMU TX to CMR HG2 deferring counter to be set every time SMU RX receives and filters
+ out a valid physical HG2 message. */
+ uint64_t logl_en : 16; /**< [ 15: 0](R/W) 16-bit logical-link backpressure enables for received HiGig2 messages or PFC packets. */
+#else /* Word 0 - Little Endian */
+ uint64_t logl_en : 16; /**< [ 15: 0](R/W) 16-bit logical-link backpressure enables for received HiGig2 messages or PFC packets. */
+ uint64_t phys_en : 1; /**< [ 16: 16](R/W) Physical-link backpressure enable for received physical HiGig2 messages. This bit enables
+ the SMU TX to CMR HG2 deferring counter to be set every time SMU RX receives and filters
+ out a valid physical HG2 message. */
+ uint64_t hg2rx_en : 1; /**< [ 17: 17](R/W) Enable extraction and processing of HG2 message packet from RX flow. Physical and logical
+ PAUSE information is used to PAUSE physical-link, backpressure TNS/NIC. This field must be
+ set
+ when HiGig2 messages are present in the receive stream. This bit is also forwarded to CMR
+ so it can generate the required deferring signals to SMU TX and backpressure signals to
+ TNS/NIC. */
+ uint64_t hg2tx_en : 1; /**< [ 18: 18](R/W) Enable transmission of HG2 physical and logical messages. When set, also disables hardware
+ autogenerated 802.3 PAUSE and PFC frames. (CNXXXX cannot generate proper 802.3 or
+ PFC frames in HiGig2 mode.) */
+ uint64_t reserved_19_63 : 45;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_bgxx_smux_hg2_control_s cn83xx; */
+};
+typedef union bdk_bgxx_smux_hg2_control bdk_bgxx_smux_hg2_control_t;
+
+static inline uint64_t BDK_BGXX_SMUX_HG2_CONTROL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_HG2_CONTROL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020210ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020210ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020210ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_HG2_CONTROL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_HG2_CONTROL(a,b) bdk_bgxx_smux_hg2_control_t
+#define bustype_BDK_BGXX_SMUX_HG2_CONTROL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_HG2_CONTROL(a,b) "BGXX_SMUX_HG2_CONTROL"
+#define device_bar_BDK_BGXX_SMUX_HG2_CONTROL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_HG2_CONTROL(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_HG2_CONTROL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_bad_col_hi
+ *
+ * BGX SMU RX Bad Column High Registers
+ */
+union bdk_bgxx_smux_rx_bad_col_hi
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_bad_col_hi_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ uint64_t val : 1; /**< [ 16: 16](R/W1C/H) Set when BGX()_SMU()_RX_INT[PCTERR] is set. */
+ uint64_t state : 8; /**< [ 15: 8](RO/H) When BGX()_SMU()_RX_INT[PCTERR] is set, contains the receive state at the time of
+ the error. */
+ uint64_t lane_rxc : 8; /**< [ 7: 0](RO/H) When BGX()_SMU()_RX_INT[PCTERR] is set, contains the column at the time of the error. */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_rxc : 8; /**< [ 7: 0](RO/H) When BGX()_SMU()_RX_INT[PCTERR] is set, contains the column at the time of the error. */
+ uint64_t state : 8; /**< [ 15: 8](RO/H) When BGX()_SMU()_RX_INT[PCTERR] is set, contains the receive state at the time of
+ the error. */
+ uint64_t val : 1; /**< [ 16: 16](R/W1C/H) Set when BGX()_SMU()_RX_INT[PCTERR] is set. */
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_bad_col_hi_s cn; */
+};
+typedef union bdk_bgxx_smux_rx_bad_col_hi bdk_bgxx_smux_rx_bad_col_hi_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_BAD_COL_HI(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_BAD_COL_HI(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020058ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020058ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020058ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_BAD_COL_HI", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_BAD_COL_HI(a,b) bdk_bgxx_smux_rx_bad_col_hi_t
+#define bustype_BDK_BGXX_SMUX_RX_BAD_COL_HI(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_BAD_COL_HI(a,b) "BGXX_SMUX_RX_BAD_COL_HI"
+#define device_bar_BDK_BGXX_SMUX_RX_BAD_COL_HI(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_BAD_COL_HI(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_BAD_COL_HI(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_bad_col_lo
+ *
+ * BGX SMU RX Bad Column Low Registers
+ */
+union bdk_bgxx_smux_rx_bad_col_lo
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_bad_col_lo_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t lane_rxd : 64; /**< [ 63: 0](RO/H) When BGX()_SMU()_RX_INT[PCTERR] is set, [LANE_RXD] contains the XAUI/RXAUI column at
+ the time of the error. */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_rxd : 64; /**< [ 63: 0](RO/H) When BGX()_SMU()_RX_INT[PCTERR] is set, [LANE_RXD] contains the XAUI/RXAUI column at
+ the time of the error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_bad_col_lo_s cn; */
+};
+typedef union bdk_bgxx_smux_rx_bad_col_lo bdk_bgxx_smux_rx_bad_col_lo_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_BAD_COL_LO(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_BAD_COL_LO(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020050ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020050ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020050ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_BAD_COL_LO", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_BAD_COL_LO(a,b) bdk_bgxx_smux_rx_bad_col_lo_t
+#define bustype_BDK_BGXX_SMUX_RX_BAD_COL_LO(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_BAD_COL_LO(a,b) "BGXX_SMUX_RX_BAD_COL_LO"
+#define device_bar_BDK_BGXX_SMUX_RX_BAD_COL_LO(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_BAD_COL_LO(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_BAD_COL_LO(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_ctl
+ *
+ * BGX SMU RX Control Registers
+ */
+union bdk_bgxx_smux_rx_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t status : 2; /**< [ 1: 0](RO/H) Link status.
+ 0x0 = Link OK.
+ 0x1 = Local fault.
+ 0x2 = Remote fault.
+ 0x3 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 2; /**< [ 1: 0](RO/H) Link status.
+ 0x0 = Link OK.
+ 0x1 = Local fault.
+ 0x2 = Remote fault.
+ 0x3 = Reserved. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_ctl_s cn; */
+};
+typedef union bdk_bgxx_smux_rx_ctl bdk_bgxx_smux_rx_ctl_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020048ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020048ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020048ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_CTL(a,b) bdk_bgxx_smux_rx_ctl_t
+#define bustype_BDK_BGXX_SMUX_RX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_CTL(a,b) "BGXX_SMUX_RX_CTL"
+#define device_bar_BDK_BGXX_SMUX_RX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_CTL(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_decision
+ *
+ * BGX SMU Packet Decision Registers
+ * This register specifies the byte count used to determine when to accept or to filter a packet.
+ * As each byte in a packet is received by BGX, the L2 byte count (i.e. the number of bytes from
+ * the beginning of the L2 header (DMAC)) is compared against CNT. In normal operation, the L2
+ * header begins after the PREAMBLE + SFD (BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 1) and any
+ * optional UDD skip data (BGX()_SMU()_RX_UDD_SKP[LEN]).
+ */
+union bdk_bgxx_smux_rx_decision
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_decision_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t cnt : 5; /**< [ 4: 0](R/W) The byte count to decide when to accept or filter a packet. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 5; /**< [ 4: 0](R/W) The byte count to decide when to accept or filter a packet. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_decision_s cn; */
+};
+typedef union bdk_bgxx_smux_rx_decision bdk_bgxx_smux_rx_decision_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_DECISION(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_DECISION(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020038ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020038ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020038ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_DECISION", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_DECISION(a,b) bdk_bgxx_smux_rx_decision_t
+#define bustype_BDK_BGXX_SMUX_RX_DECISION(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_DECISION(a,b) "BGXX_SMUX_RX_DECISION"
+#define device_bar_BDK_BGXX_SMUX_RX_DECISION(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_DECISION(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_DECISION(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_frm_chk
+ *
+ * BGX SMU RX Frame Check Registers
+ * The CSRs provide the enable bits for a subset of errors passed to CMR encoded.
+ */
+union bdk_bgxx_smux_rx_frm_chk
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_frm_chk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t skperr : 1; /**< [ 8: 8](R/W) Skipper error. */
+ uint64_t rcverr : 1; /**< [ 7: 7](R/W) Frame was received with data-reception error. */
+ uint64_t reserved_6 : 1;
+ uint64_t fcserr_c : 1; /**< [ 5: 5](R/W) Control frame was received with FCS/CRC error. */
+ uint64_t fcserr_d : 1; /**< [ 4: 4](R/W) Data frame was received with FCS/CRC error. */
+ uint64_t jabber : 1; /**< [ 3: 3](R/W) Frame was received with length \> sys_length. */
+ uint64_t reserved_0_2 : 3;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_2 : 3;
+ uint64_t jabber : 1; /**< [ 3: 3](R/W) Frame was received with length \> sys_length. */
+ uint64_t fcserr_d : 1; /**< [ 4: 4](R/W) Data frame was received with FCS/CRC error. */
+ uint64_t fcserr_c : 1; /**< [ 5: 5](R/W) Control frame was received with FCS/CRC error. */
+ uint64_t reserved_6 : 1;
+ uint64_t rcverr : 1; /**< [ 7: 7](R/W) Frame was received with data-reception error. */
+ uint64_t skperr : 1; /**< [ 8: 8](R/W) Skipper error. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_frm_chk_s cn; */
+};
+typedef union bdk_bgxx_smux_rx_frm_chk bdk_bgxx_smux_rx_frm_chk_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_FRM_CHK(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_FRM_CHK(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020028ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020028ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020028ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_FRM_CHK", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_FRM_CHK(a,b) bdk_bgxx_smux_rx_frm_chk_t
+#define bustype_BDK_BGXX_SMUX_RX_FRM_CHK(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_FRM_CHK(a,b) "BGXX_SMUX_RX_FRM_CHK"
+#define device_bar_BDK_BGXX_SMUX_RX_FRM_CHK(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_FRM_CHK(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_FRM_CHK(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_frm_ctl
+ *
+ * BGX SMU RX Frame Control Registers
+ * This register controls the handling of the frames.
+ * The [CTL_BCK] and [CTL_DRP] bits control how the hardware handles incoming PAUSE packets. The
+ * most
+ * common modes of operation:
+ * _ [CTL_BCK] = 1, [CTL_DRP] = 1: hardware handles everything
+ * _ [CTL_BCK] = 0, [CTL_DRP] = 0: software sees all PAUSE frames
+ * _ [CTL_BCK] = 0, [CTL_DRP] = 1: all PAUSE frames are completely ignored
+ *
+ * These control bits should be set to [CTL_BCK] = 0, [CTL_DRP] = 0 in half-duplex mode. Since
+ * PAUSE
+ * packets only apply to full duplex operation, any PAUSE packet would constitute an exception
+ * which should be handled by the processing cores. PAUSE packets should not be forwarded.
+ */
+union bdk_bgxx_smux_rx_frm_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_frm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t ptp_mode : 1; /**< [ 12: 12](R/W) Timestamp mode. When [PTP_MODE] is set, a 64-bit timestamp is prepended to every incoming
+ packet.
+
+ The timestamp bytes are added to the packet in such a way as to not modify the packet's
+ receive byte count. This implies that the BGX()_SMU()_RX_JABBER,
+ BGX()_SMU()_RX_DECISION, and BGX()_SMU()_RX_UDD_SKP do not require any
+ adjustment as they operate on the received packet size. When the packet reaches NIC, its
+ size reflects the additional bytes. */
+ uint64_t reserved_6_11 : 6;
+ uint64_t ctl_smac : 1; /**< [ 5: 5](R/W) Control PAUSE frames can match station SMAC. */
+ uint64_t ctl_mcst : 1; /**< [ 4: 4](R/W) Control PAUSE frames can match globally assigned multicast address. */
+ uint64_t ctl_bck : 1; /**< [ 3: 3](R/W) Forward PAUSE information to TX block. */
+ uint64_t ctl_drp : 1; /**< [ 2: 2](R/W) Drop control PAUSE frames. */
+ uint64_t pre_strp : 1; /**< [ 1: 1](R/W) Strip off the preamble (when present).
+
+ 0 = PREAMBLE + SFD is sent to core as part of frame.
+ 1 = PREAMBLE + SFD is dropped.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features.
+
+ If [PTP_MODE] = 1 and [PRE_CHK] = 1, [PRE_STRP] must be 1.
+
+ When [PRE_CHK] is set (indicating that the PREAMBLE will be sent), [PRE_STRP] determines
+ if
+ the PREAMBLE+SFD bytes are thrown away or sent to the core as part of the packet. In
+ either mode, the PREAMBLE+SFD bytes are not counted toward the packet size when checking
+ against the MIN and MAX bounds. Furthermore, the bytes are skipped when locating the start
+ of the L2 header for DMAC and control frame recognition. */
+ uint64_t pre_chk : 1; /**< [ 0: 0](R/W) Check the preamble for correctness.
+ This port is configured to send a valid 802.3 PREAMBLE to begin every frame. BGX checks
+ that a valid PREAMBLE is received. When a problem does occur within
+ the PREAMBLE sequence, the frame is marked as bad and not sent into the core. The
+ BGX()_SMU()_RX_INT[PCTERR] interrupt is also raised.
+
+ When BGX()_SMU()_TX_CTL[HG_EN] is set, [PRE_CHK] must be 0.
+
+ If [PTP_MODE] = 1 and [PRE_CHK] = 1, [PRE_STRP] must be 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_chk : 1; /**< [ 0: 0](R/W) Check the preamble for correctness.
+ This port is configured to send a valid 802.3 PREAMBLE to begin every frame. BGX checks
+ that a valid PREAMBLE is received. When a problem does occur within
+ the PREAMBLE sequence, the frame is marked as bad and not sent into the core. The
+ BGX()_SMU()_RX_INT[PCTERR] interrupt is also raised.
+
+ When BGX()_SMU()_TX_CTL[HG_EN] is set, [PRE_CHK] must be 0.
+
+ If [PTP_MODE] = 1 and [PRE_CHK] = 1, [PRE_STRP] must be 1. */
+ uint64_t pre_strp : 1; /**< [ 1: 1](R/W) Strip off the preamble (when present).
+
+ 0 = PREAMBLE + SFD is sent to core as part of frame.
+ 1 = PREAMBLE + SFD is dropped.
+ [PRE_CHK] must be set to enable this and all PREAMBLE features.
+
+ If [PTP_MODE] = 1 and [PRE_CHK] = 1, [PRE_STRP] must be 1.
+
+ When [PRE_CHK] is set (indicating that the PREAMBLE will be sent), [PRE_STRP] determines
+ if
+ the PREAMBLE+SFD bytes are thrown away or sent to the core as part of the packet. In
+ either mode, the PREAMBLE+SFD bytes are not counted toward the packet size when checking
+ against the MIN and MAX bounds. Furthermore, the bytes are skipped when locating the start
+ of the L2 header for DMAC and control frame recognition. */
+ uint64_t ctl_drp : 1; /**< [ 2: 2](R/W) Drop control PAUSE frames. */
+ uint64_t ctl_bck : 1; /**< [ 3: 3](R/W) Forward PAUSE information to TX block. */
+ uint64_t ctl_mcst : 1; /**< [ 4: 4](R/W) Control PAUSE frames can match globally assigned multicast address. */
+ uint64_t ctl_smac : 1; /**< [ 5: 5](R/W) Control PAUSE frames can match station SMAC. */
+ uint64_t reserved_6_11 : 6;
+ uint64_t ptp_mode : 1; /**< [ 12: 12](R/W) Timestamp mode. When [PTP_MODE] is set, a 64-bit timestamp is prepended to every incoming
+ packet.
+
+ The timestamp bytes are added to the packet in such a way as to not modify the packet's
+ receive byte count. This implies that the BGX()_SMU()_RX_JABBER,
+ BGX()_SMU()_RX_DECISION, and BGX()_SMU()_RX_UDD_SKP do not require any
+ adjustment as they operate on the received packet size. When the packet reaches NIC, its
+ size reflects the additional bytes. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_frm_ctl_s cn; */
+};
+typedef union bdk_bgxx_smux_rx_frm_ctl bdk_bgxx_smux_rx_frm_ctl_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_FRM_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_FRM_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020020ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020020ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020020ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_FRM_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_FRM_CTL(a,b) bdk_bgxx_smux_rx_frm_ctl_t
+#define bustype_BDK_BGXX_SMUX_RX_FRM_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_FRM_CTL(a,b) "BGXX_SMUX_RX_FRM_CTL"
+#define device_bar_BDK_BGXX_SMUX_RX_FRM_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_FRM_CTL(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_FRM_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_int
+ *
+ * BGX SMU Receive Interrupt Registers
+ * SMU Interrupt Register.
+ * Internal:
+ * Exception conditions \<9\> and \<4:0\> can also set the rcv/opcode in the received packet's work
+ * queue entry. BGX()_SMU()_RX_FRM_CHK provides a bit mask for configuring which
+ * conditions set the error.
+ */
+union bdk_bgxx_smux_rx_int
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1C/H) HiGig2 received message CRC or control-character error. Set when either a CRC8 error is
+ detected, or when a control character is found in the message bytes after the K.SOM.
+ HG2CC has higher priority than HG2FLD, which means that a HiGig2 message that results in
+ HG2CC getting set never sets HG2FLD. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1C/H) HiGig2 received message field error:
+
+ MSG_TYPE field not 0x0, i.e. it is not a flow-control message, which is the only defined
+ type for HiGig2.
+
+ FWD_TYPE field not 0x0, i.e. it is not a link-level message, which is the only defined
+ type for HiGig2.
+
+ FC_OBJECT field is neither 0x0 for physical link, nor 0x2 for logical link. Those are the
+ only two defined types in HiGig2 */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1C/H) Frame is terminated by control character other than /T/. The error
+ propagation control character /E/ will be included as part of the frame and does not cause
+ a frame termination. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1C/H) Detected reserved sequence. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1C/H) Remote-fault sequence detected. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1C/H) Local-fault sequence detected. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1C/H) Detected reserved opcode. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1C/H) Bad preamble/protocol. In XAUI/RXAUI mode, the column of data that was bad is logged in
+ BGX()_SMU()_RX_BAD_COL_LO and BGX()_SMU()_RX_BAD_COL_HI.
+ PCTERR checks that the frame begins with a valid
+ PREAMBLE sequence. Does not check the number of PREAMBLE cycles. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1C/H) Skipper error. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1C/H) Frame was received with data-reception error. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1C/H) Frame was received with FCS/CRC error */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1C/H) Frame was received with length \> sys_length. An RX Jabber error indicates that a packet
+ was received which is longer than the maximum allowed packet as defined by the system. BGX
+ terminates the packet with an EOP on the beat on which JABBER was exceeded. The beat on
+ which JABBER was exceeded is left unchanged and all subsequent data beats are dropped.
+ Failure to truncate could lead to system instability. */
+#else /* Word 0 - Little Endian */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1C/H) Frame was received with length \> sys_length. An RX Jabber error indicates that a packet
+ was received which is longer than the maximum allowed packet as defined by the system. BGX
+ terminates the packet with an EOP on the beat on which JABBER was exceeded. The beat on
+ which JABBER was exceeded is left unchanged and all subsequent data beats are dropped.
+ Failure to truncate could lead to system instability. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1C/H) Frame was received with FCS/CRC error */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1C/H) Frame was received with data-reception error. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1C/H) Skipper error. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1C/H) Bad preamble/protocol. In XAUI/RXAUI mode, the column of data that was bad is logged in
+ BGX()_SMU()_RX_BAD_COL_LO and BGX()_SMU()_RX_BAD_COL_HI.
+ PCTERR checks that the frame begins with a valid
+ PREAMBLE sequence. Does not check the number of PREAMBLE cycles. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1C/H) Detected reserved opcode. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1C/H) Local-fault sequence detected. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1C/H) Remote-fault sequence detected. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1C/H) Detected reserved sequence. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1C/H) Frame is terminated by control character other than /T/. The error
+ propagation control character /E/ will be included as part of the frame and does not cause
+ a frame termination. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1C/H) HiGig2 received message field error:
+
+ MSG_TYPE field not 0x0, i.e. it is not a flow-control message, which is the only defined
+ type for HiGig2.
+
+ FWD_TYPE field not 0x0, i.e. it is not a link-level message, which is the only defined
+ type for HiGig2.
+
+ FC_OBJECT field is neither 0x0 for physical link, nor 0x2 for logical link. Those are the
+ only two defined types in HiGig2 */
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1C/H) HiGig2 received message CRC or control-character error. Set when either a CRC8 error is
+ detected, or when a control character is found in the message bytes after the K.SOM.
+ HG2CC has higher priority than HG2FLD, which means that a HiGig2 message that results in
+ HG2CC getting set never sets HG2FLD. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_int_s cn; */
+};
+typedef union bdk_bgxx_smux_rx_int bdk_bgxx_smux_rx_int_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_INT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_INT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020000ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020000ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020000ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_INT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_INT(a,b) bdk_bgxx_smux_rx_int_t
+#define bustype_BDK_BGXX_SMUX_RX_INT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_INT(a,b) "BGXX_SMUX_RX_INT"
+#define device_bar_BDK_BGXX_SMUX_RX_INT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_INT(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_INT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_int_ena_w1c
+ *
+ * BGX SMU Receive Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_bgxx_smux_rx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[JABBER]. */
+#else /* Word 0 - Little Endian */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_int_ena_w1c_s cn81xx; */
+ /* struct bdk_bgxx_smux_rx_int_ena_w1c_s cn88xx; */
+ struct bdk_bgxx_smux_rx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[JABBER]. */
+#else /* Word 0 - Little Endian */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_smux_rx_int_ena_w1c bdk_bgxx_smux_rx_int_ena_w1c_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_INT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_INT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020010ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020010ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020010ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_INT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_INT_ENA_W1C(a,b) bdk_bgxx_smux_rx_int_ena_w1c_t
+#define bustype_BDK_BGXX_SMUX_RX_INT_ENA_W1C(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_INT_ENA_W1C(a,b) "BGXX_SMUX_RX_INT_ENA_W1C"
+#define device_bar_BDK_BGXX_SMUX_RX_INT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_INT_ENA_W1C(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_INT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_int_ena_w1s
+ *
+ * BGX SMU Receive Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_bgxx_smux_rx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[JABBER]. */
+#else /* Word 0 - Little Endian */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_int_ena_w1s_s cn81xx; */
+ /* struct bdk_bgxx_smux_rx_int_ena_w1s_s cn88xx; */
+ struct bdk_bgxx_smux_rx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[JABBER]. */
+#else /* Word 0 - Little Endian */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_smux_rx_int_ena_w1s bdk_bgxx_smux_rx_int_ena_w1s_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_INT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_INT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020018ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020018ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020018ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_INT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_INT_ENA_W1S(a,b) bdk_bgxx_smux_rx_int_ena_w1s_t
+#define bustype_BDK_BGXX_SMUX_RX_INT_ENA_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_INT_ENA_W1S(a,b) "BGXX_SMUX_RX_INT_ENA_W1S"
+#define device_bar_BDK_BGXX_SMUX_RX_INT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_INT_ENA_W1S(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_INT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_int_w1s
+ *
+ * BGX SMU Receive Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_bgxx_smux_rx_int_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[JABBER]. */
+#else /* Word 0 - Little Endian */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_int_w1s_s cn81xx; */
+ /* struct bdk_bgxx_smux_rx_int_w1s_s cn88xx; */
+ struct bdk_bgxx_smux_rx_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[JABBER]. */
+#else /* Word 0 - Little Endian */
+ uint64_t jabber : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[JABBER]. */
+ uint64_t fcserr : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[FCSERR]. */
+ uint64_t rcverr : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[RCVERR]. */
+ uint64_t skperr : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[SKPERR]. */
+ uint64_t pcterr : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[PCTERR]. */
+ uint64_t rsverr : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[RSVERR]. */
+ uint64_t loc_fault : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[LOC_FAULT]. */
+ uint64_t rem_fault : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[REM_FAULT]. */
+ uint64_t bad_seq : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[BAD_SEQ]. */
+ uint64_t bad_term : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[BAD_TERM]. */
+ uint64_t hg2fld : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[HG2FLD]. */
+ uint64_t hg2cc : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_RX_INT[HG2CC]. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_smux_rx_int_w1s bdk_bgxx_smux_rx_int_w1s_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_INT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_INT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020008ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020008ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020008ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_INT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_INT_W1S(a,b) bdk_bgxx_smux_rx_int_w1s_t
+#define bustype_BDK_BGXX_SMUX_RX_INT_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_INT_W1S(a,b) "BGXX_SMUX_RX_INT_W1S"
+#define device_bar_BDK_BGXX_SMUX_RX_INT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_INT_W1S(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_INT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_jabber
+ *
+ * BGX SMU Maximum Packet-Size Registers
+ * This register specifies the maximum size for packets, beyond which the SMU truncates. In
+ * XAUI/RXAUI mode, port 0 is used for checking.
+ *
+ * Internal:
+ * The packet that will be sent to the packet input logic will have an
+ * additionl 8 bytes if BGX()_SMU()_RX_FRM_CTL[PRE_CHK] is set and
+ * BGX()_SMU()_RX_FRM_CTL[PRE_STRP] is clear. The max packet that will be sent is
+ * defined as:
+ *
+ * _ max_sized_packet = BGX()_SMU()_RX_JABBER[CNT]+((BGX()_SMU()_RX_FRM_CTL[PRE_CHK] &
+ * !BGX()_SMU()_RX_FRM_CTL[PRE_STRP])*8)
+ */
+union bdk_bgxx_smux_rx_jabber
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_jabber_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t cnt : 16; /**< [ 15: 0](R/W) Byte count for jabber check. Failing packets set the JABBER interrupt and are optionally
+ sent with opcode = JABBER. BGX truncates the packet to CNT+1 to CNT+8 bytes.
+ CNT must be 8-byte aligned such that CNT\<2:0\> = 000. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 16; /**< [ 15: 0](R/W) Byte count for jabber check. Failing packets set the JABBER interrupt and are optionally
+ sent with opcode = JABBER. BGX truncates the packet to CNT+1 to CNT+8 bytes.
+ CNT must be 8-byte aligned such that CNT\<2:0\> = 000. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_jabber_s cn; */
+};
+typedef union bdk_bgxx_smux_rx_jabber bdk_bgxx_smux_rx_jabber_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_JABBER(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_JABBER(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020030ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020030ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020030ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_JABBER", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_JABBER(a,b) bdk_bgxx_smux_rx_jabber_t
+#define bustype_BDK_BGXX_SMUX_RX_JABBER(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_JABBER(a,b) "BGXX_SMUX_RX_JABBER"
+#define device_bar_BDK_BGXX_SMUX_RX_JABBER(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_JABBER(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_JABBER(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_rx_udd_skp
+ *
+ * BGX SMU User-Defined Data Skip Registers
+ * Internal:
+ * (1) The skip bytes are part of the packet and will be sent down the NCB
+ * packet interface and will be handled by NIC.
+ * (2) The system can determine if the UDD bytes are included in the FCS check
+ * by using the FCSSEL field if the FCS check is enabled.
+ *
+ * (3) Assume that the preamble/sfd is always at the start of the frame even
+ * before UDD bytes. In most cases, there will be no preamble in these
+ * cases since it will be packet interface in direct communication to
+ * another packet interface (MAC to MAC) without a PHY involved.
+ *
+ * (4) We can still do address filtering and control packet filtering if the
+ * user desires.
+ *
+ * (6) In all cases, the UDD bytes will be sent down the packet interface as
+ * part of the packet. The UDD bytes are never stripped from the actual
+ * packet.
+ */
+union bdk_bgxx_smux_rx_udd_skp
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_rx_udd_skp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t fcssel : 1; /**< [ 8: 8](R/W) Include the skip bytes in the FCS calculation.
+ 0 = All skip bytes are included in FCS.
+ 1 = The skip bytes are not included in FCS.
+
+ When BGX()_SMU()_TX_CTL[HG_EN] is set, this field must be 0.
+ The skip bytes are part of the packet and are sent through the NCB packet interface and
+ are handled by NIC. The system can determine if the UDD bytes are included in the FCS
+ check by using the FCSSEL field, if the FCS check is enabled. */
+ uint64_t reserved_7 : 1;
+ uint64_t len : 7; /**< [ 6: 0](R/W) Amount of user-defined data before the start of the L2C data, in bytes.
+ Setting to 0 means L2C comes first; maximum value is 64.
+ LEN must be 0x0 in half-duplex operation.
+
+ When BGX()_SMU()_TX_CTL[HG_EN] is set, this field must be set to 12 or 16
+ (depending on HiGig header size) to account for the HiGig header.
+ LEN = 12 selects HiGig/HiGig+; LEN = 16 selects HiGig2. */
+#else /* Word 0 - Little Endian */
+ uint64_t len : 7; /**< [ 6: 0](R/W) Amount of user-defined data before the start of the L2C data, in bytes.
+ Setting to 0 means L2C comes first; maximum value is 64.
+ LEN must be 0x0 in half-duplex operation.
+
+ When BGX()_SMU()_TX_CTL[HG_EN] is set, this field must be set to 12 or 16
+ (depending on HiGig header size) to account for the HiGig header.
+ LEN = 12 selects HiGig/HiGig+; LEN = 16 selects HiGig2. */
+ uint64_t reserved_7 : 1;
+ uint64_t fcssel : 1; /**< [ 8: 8](R/W) Include the skip bytes in the FCS calculation.
+ 0 = All skip bytes are included in FCS.
+ 1 = The skip bytes are not included in FCS.
+
+ When BGX()_SMU()_TX_CTL[HG_EN] is set, this field must be 0.
+ The skip bytes are part of the packet and are sent through the NCB packet interface and
+ are handled by NIC. The system can determine if the UDD bytes are included in the FCS
+ check by using the FCSSEL field, if the FCS check is enabled. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_rx_udd_skp_s cn; */
+};
+typedef union bdk_bgxx_smux_rx_udd_skp bdk_bgxx_smux_rx_udd_skp_t;
+
+static inline uint64_t BDK_BGXX_SMUX_RX_UDD_SKP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_RX_UDD_SKP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020040ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020040ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020040ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_RX_UDD_SKP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_RX_UDD_SKP(a,b) bdk_bgxx_smux_rx_udd_skp_t
+#define bustype_BDK_BGXX_SMUX_RX_UDD_SKP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_RX_UDD_SKP(a,b) "BGXX_SMUX_RX_UDD_SKP"
+#define device_bar_BDK_BGXX_SMUX_RX_UDD_SKP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_RX_UDD_SKP(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_RX_UDD_SKP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_smac
+ *
+ * BGX SMU SMAC Registers
+ */
+union bdk_bgxx_smux_smac
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_smac_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t smac : 48; /**< [ 47: 0](R/W) The SMAC field is used for generating and accepting control PAUSE packets. */
+#else /* Word 0 - Little Endian */
+ uint64_t smac : 48; /**< [ 47: 0](R/W) The SMAC field is used for generating and accepting control PAUSE packets. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_smac_s cn; */
+};
+typedef union bdk_bgxx_smux_smac bdk_bgxx_smux_smac_t;
+
+static inline uint64_t BDK_BGXX_SMUX_SMAC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_SMAC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020108ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020108ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020108ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_SMAC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_SMAC(a,b) bdk_bgxx_smux_smac_t
+#define bustype_BDK_BGXX_SMUX_SMAC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_SMAC(a,b) "BGXX_SMUX_SMAC"
+#define device_bar_BDK_BGXX_SMUX_SMAC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_SMAC(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_SMAC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_append
+ *
+ * BGX SMU TX Append Control Registers
+ * For more details on the interactions between FCS and PAD, see also the description of
+ * BGX()_SMU()_TX_MIN_PKT[MIN_SIZE].
+ */
+union bdk_bgxx_smux_tx_append
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_append_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t fcs_c : 1; /**< [ 3: 3](R/W) Append the Ethernet FCS on each PAUSE packet. PAUSE packets are normally padded to 60
+ bytes. If BGX()_SMU()_TX_MIN_PKT[MIN_SIZE] exceeds 59, then [FCS_C] is not used. */
+ uint64_t fcs_d : 1; /**< [ 2: 2](R/W) Append the Ethernet FCS on each data packet. */
+ uint64_t pad : 1; /**< [ 1: 1](R/W) Append PAD bytes such that minimum-sized packet is transmitted. */
+ uint64_t preamble : 1; /**< [ 0: 0](R/W) Prepend the Ethernet preamble on each transfer. When BGX()_SMU()_TX_CTL[HG_EN] is
+ set, PREAMBLE must be 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t preamble : 1; /**< [ 0: 0](R/W) Prepend the Ethernet preamble on each transfer. When BGX()_SMU()_TX_CTL[HG_EN] is
+ set, PREAMBLE must be 0. */
+ uint64_t pad : 1; /**< [ 1: 1](R/W) Append PAD bytes such that minimum-sized packet is transmitted. */
+ uint64_t fcs_d : 1; /**< [ 2: 2](R/W) Append the Ethernet FCS on each data packet. */
+ uint64_t fcs_c : 1; /**< [ 3: 3](R/W) Append the Ethernet FCS on each PAUSE packet. PAUSE packets are normally padded to 60
+ bytes. If BGX()_SMU()_TX_MIN_PKT[MIN_SIZE] exceeds 59, then [FCS_C] is not used. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_append_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_append bdk_bgxx_smux_tx_append_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_APPEND(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_APPEND(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020100ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020100ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020100ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_APPEND", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_APPEND(a,b) bdk_bgxx_smux_tx_append_t
+#define bustype_BDK_BGXX_SMUX_TX_APPEND(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_APPEND(a,b) "BGXX_SMUX_TX_APPEND"
+#define device_bar_BDK_BGXX_SMUX_TX_APPEND(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_APPEND(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_APPEND(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_ctl
+ *
+ * BGX SMU Transmit Control Registers
+ */
+union bdk_bgxx_smux_tx_ctl
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t spu_mrk_cnt : 20; /**< [ 30: 11](R/W) 40GBASE-R transmit marker interval count. Specifies the interval (number of 66-bit BASE-R
+ blocks) at which the LMAC transmit logic inserts 40GBASE-R alignment markers. An internal
+ counter in SMU is initialized to this value, counts down for each BASE-R block transmitted
+ by the LMAC, and wraps back to the initial value from 0. The LMAC transmit logic inserts
+ alignment markers for lanes 0, 1, 2 and 3, respectively, in the last four BASE-R blocks
+ before the counter wraps (3, 2, 1, 0). The default value corresponds to an alignment
+ marker period of 16363 blocks (exclusive) per lane, as specified in 802.3ba-2010. The
+ default value should always be used for normal operation. */
+ uint64_t hg_pause_hgi : 2; /**< [ 10: 9](R/W) HGI field for hardware-generated HiGig PAUSE packets. */
+ uint64_t hg_en : 1; /**< [ 8: 8](R/W) Enable HiGig mode.
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 12, the interface is in
+ HiGig/HiGig+ mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 12.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 16, the interface is in
+ HiGig2 mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 16.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+ * BGX()_SMU()_CBFC_CTL[RX_EN] = 0.
+ * BGX()_SMU()_CBFC_CTL[TX_EN] = 0. */
+ uint64_t l2p_bp_conv : 1; /**< [ 7: 7](R/W) If set, causes TX to generate 802.3 pause packets when CMR applies logical backpressure
+ (XOFF), if and only if BGX()_SMU()_CBFC_CTL[TX_EN] is clear and
+ BGX()_SMU()_HG2_CONTROL[HG2TX_EN] is clear. */
+ uint64_t ls_byp : 1; /**< [ 6: 6](R/W) Bypass the link status, as determined by the XGMII receiver, and set the link status of
+ the transmitter to LS. */
+ uint64_t ls : 2; /**< [ 5: 4](R/W) Link status.
+ 0 = Link OK; link runs normally. RS passes MAC data to PCS.
+ 1 = Local fault. RS layer sends continuous remote fault sequences.
+ 2 = Remote fault. RS layer sends continuous idle sequences.
+ 3 = Link drain. RS layer drops full packets to allow BGX and PKO to drain their FIFOs. */
+ uint64_t reserved_3 : 1;
+ uint64_t x4a_dis : 1; /**< [ 2: 2](R/W) Disable 4-byte SOP align (effectively force 8-byte SOP align) for all 10G variants
+ (XAUI, RXAUI, 10G). */
+ uint64_t uni_en : 1; /**< [ 1: 1](R/W) Enable unidirectional mode (IEEE Clause 66). */
+ uint64_t dic_en : 1; /**< [ 0: 0](R/W) Enable the deficit idle counter for IFG averaging. */
+#else /* Word 0 - Little Endian */
+ uint64_t dic_en : 1; /**< [ 0: 0](R/W) Enable the deficit idle counter for IFG averaging. */
+ uint64_t uni_en : 1; /**< [ 1: 1](R/W) Enable unidirectional mode (IEEE Clause 66). */
+ uint64_t x4a_dis : 1; /**< [ 2: 2](R/W) Disable 4-byte SOP align (effectively force 8-byte SOP align) for all 10G variants
+ (XAUI, RXAUI, 10G). */
+ uint64_t reserved_3 : 1;
+ uint64_t ls : 2; /**< [ 5: 4](R/W) Link status.
+ 0 = Link OK; link runs normally. RS passes MAC data to PCS.
+ 1 = Local fault. RS layer sends continuous remote fault sequences.
+ 2 = Remote fault. RS layer sends continuous idle sequences.
+ 3 = Link drain. RS layer drops full packets to allow BGX and PKO to drain their FIFOs. */
+ uint64_t ls_byp : 1; /**< [ 6: 6](R/W) Bypass the link status, as determined by the XGMII receiver, and set the link status of
+ the transmitter to LS. */
+ uint64_t l2p_bp_conv : 1; /**< [ 7: 7](R/W) If set, causes TX to generate 802.3 pause packets when CMR applies logical backpressure
+ (XOFF), if and only if BGX()_SMU()_CBFC_CTL[TX_EN] is clear and
+ BGX()_SMU()_HG2_CONTROL[HG2TX_EN] is clear. */
+ uint64_t hg_en : 1; /**< [ 8: 8](R/W) Enable HiGig mode.
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 12, the interface is in
+ HiGig/HiGig+ mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 12.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 16, the interface is in
+ HiGig2 mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 16.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+ * BGX()_SMU()_CBFC_CTL[RX_EN] = 0.
+ * BGX()_SMU()_CBFC_CTL[TX_EN] = 0. */
+ uint64_t hg_pause_hgi : 2; /**< [ 10: 9](R/W) HGI field for hardware-generated HiGig PAUSE packets. */
+ uint64_t spu_mrk_cnt : 20; /**< [ 30: 11](R/W) 40GBASE-R transmit marker interval count. Specifies the interval (number of 66-bit BASE-R
+ blocks) at which the LMAC transmit logic inserts 40GBASE-R alignment markers. An internal
+ counter in SMU is initialized to this value, counts down for each BASE-R block transmitted
+ by the LMAC, and wraps back to the initial value from 0. The LMAC transmit logic inserts
+ alignment markers for lanes 0, 1, 2 and 3, respectively, in the last four BASE-R blocks
+ before the counter wraps (3, 2, 1, 0). The default value corresponds to an alignment
+ marker period of 16363 blocks (exclusive) per lane, as specified in 802.3ba-2010. The
+ default value should always be used for normal operation. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_bgxx_smux_tx_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t spu_mrk_cnt : 20; /**< [ 30: 11](R/W) 40GBASE-R transmit marker interval count. Specifies the interval (number of 66-bit BASE-R
+ blocks) at which the LMAC transmit logic inserts 40GBASE-R alignment markers. An internal
+ counter in SMU is initialized to this value, counts down for each BASE-R block transmitted
+ by the LMAC, and wraps back to the initial value from 0. The LMAC transmit logic inserts
+ alignment markers for lanes 0, 1, 2 and 3, respectively, in the last four BASE-R blocks
+ before the counter wraps (3, 2, 1, 0). The default value corresponds to an alignment
+ marker period of 16363 blocks (exclusive) per lane, as specified in 802.3ba-2010. The
+ default value should always be used for normal operation. */
+ uint64_t hg_pause_hgi : 2; /**< [ 10: 9](R/W) HGI field for hardware-generated HiGig PAUSE packets. */
+ uint64_t hg_en : 1; /**< [ 8: 8](R/W) Enable HiGig mode.
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 12, the interface is in
+ HiGig/HiGig+ mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 12.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 16, the interface is in
+ HiGig2 mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 16.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+ * BGX()_SMU()_CBFC_CTL[RX_EN] = 0.
+ * BGX()_SMU()_CBFC_CTL[TX_EN] = 0. */
+ uint64_t l2p_bp_conv : 1; /**< [ 7: 7](R/W) If set, causes TX to generate 802.3 pause packets when CMR applies logical backpressure
+ (XOFF), if and only if BGX()_SMU()_CBFC_CTL[TX_EN] is clear and
+ BGX()_SMU()_HG2_CONTROL[HG2TX_EN] is clear. */
+ uint64_t ls_byp : 1; /**< [ 6: 6](R/W) Bypass the link status, as determined by the XGMII receiver, and set the link status of
+ the transmitter to LS. */
+ uint64_t ls : 2; /**< [ 5: 4](R/W) Link status.
+ 0 = Link OK; link runs normally. RS passes MAC data to PCS.
+ 1 = Local fault. RS layer sends continuous remote fault sequences.
+ 2 = Remote fault. RS layer sends continuous idle sequences.
+ 3 = Link drain. RS layer drops full packets to allow BGX and TNS/NIC to drain their FIFOs. */
+ uint64_t reserved_3 : 1;
+ uint64_t x4a_dis : 1; /**< [ 2: 2](RAZ) Reserved. */
+ uint64_t uni_en : 1; /**< [ 1: 1](R/W) Enable unidirectional mode (IEEE Clause 66). */
+ uint64_t dic_en : 1; /**< [ 0: 0](R/W) Enable the deficit idle counter for IFG averaging. */
+#else /* Word 0 - Little Endian */
+ uint64_t dic_en : 1; /**< [ 0: 0](R/W) Enable the deficit idle counter for IFG averaging. */
+ uint64_t uni_en : 1; /**< [ 1: 1](R/W) Enable unidirectional mode (IEEE Clause 66). */
+ uint64_t x4a_dis : 1; /**< [ 2: 2](RAZ) Reserved. */
+ uint64_t reserved_3 : 1;
+ uint64_t ls : 2; /**< [ 5: 4](R/W) Link status.
+ 0 = Link OK; link runs normally. RS passes MAC data to PCS.
+ 1 = Local fault. RS layer sends continuous remote fault sequences.
+ 2 = Remote fault. RS layer sends continuous idle sequences.
+ 3 = Link drain. RS layer drops full packets to allow BGX and TNS/NIC to drain their FIFOs. */
+ uint64_t ls_byp : 1; /**< [ 6: 6](R/W) Bypass the link status, as determined by the XGMII receiver, and set the link status of
+ the transmitter to LS. */
+ uint64_t l2p_bp_conv : 1; /**< [ 7: 7](R/W) If set, causes TX to generate 802.3 pause packets when CMR applies logical backpressure
+ (XOFF), if and only if BGX()_SMU()_CBFC_CTL[TX_EN] is clear and
+ BGX()_SMU()_HG2_CONTROL[HG2TX_EN] is clear. */
+ uint64_t hg_en : 1; /**< [ 8: 8](R/W) Enable HiGig mode.
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 12, the interface is in
+ HiGig/HiGig+ mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 12.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 16, the interface is in
+ HiGig2 mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 16.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+ * BGX()_SMU()_CBFC_CTL[RX_EN] = 0.
+ * BGX()_SMU()_CBFC_CTL[TX_EN] = 0. */
+ uint64_t hg_pause_hgi : 2; /**< [ 10: 9](R/W) HGI field for hardware-generated HiGig PAUSE packets. */
+ uint64_t spu_mrk_cnt : 20; /**< [ 30: 11](R/W) 40GBASE-R transmit marker interval count. Specifies the interval (number of 66-bit BASE-R
+ blocks) at which the LMAC transmit logic inserts 40GBASE-R alignment markers. An internal
+ counter in SMU is initialized to this value, counts down for each BASE-R block transmitted
+ by the LMAC, and wraps back to the initial value from 0. The LMAC transmit logic inserts
+ alignment markers for lanes 0, 1, 2 and 3, respectively, in the last four BASE-R blocks
+ before the counter wraps (3, 2, 1, 0). The default value corresponds to an alignment
+ marker period of 16363 blocks (exclusive) per lane, as specified in 802.3ba-2010. The
+ default value should always be used for normal operation. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_bgxx_smux_tx_ctl_s cn81xx; */
+ /* struct bdk_bgxx_smux_tx_ctl_s cn83xx; */
+ struct bdk_bgxx_smux_tx_ctl_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t spu_mrk_cnt : 20; /**< [ 30: 11](R/W) 40GBASE-R transmit marker interval count. Specifies the interval (number of 66-bit BASE-R
+ blocks) at which the LMAC transmit logic inserts 40GBASE-R alignment markers. An internal
+ counter in SMU is initialized to this value, counts down for each BASE-R block transmitted
+ by the LMAC, and wraps back to the initial value from 0. The LMAC transmit logic inserts
+ alignment markers for lanes 0, 1, 2 and 3, respectively, in the last four BASE-R blocks
+ before the counter wraps (3, 2, 1, 0). The default value corresponds to an alignment
+ marker period of 16363 blocks (exclusive) per lane, as specified in 802.3ba-2010. The
+ default value should always be used for normal operation. */
+ uint64_t hg_pause_hgi : 2; /**< [ 10: 9](R/W) HGI field for hardware-generated HiGig PAUSE packets. */
+ uint64_t hg_en : 1; /**< [ 8: 8](R/W) Enable HiGig mode.
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 12, the interface is in
+ HiGig/HiGig+ mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 12.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 16, the interface is in
+ HiGig2 mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 16.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+ * BGX()_SMU()_CBFC_CTL[RX_EN] = 0.
+ * BGX()_SMU()_CBFC_CTL[TX_EN] = 0. */
+ uint64_t l2p_bp_conv : 1; /**< [ 7: 7](R/W) If set, causes TX to generate 802.3 pause packets when CMR applies logical backpressure
+ (XOFF), if and only if BGX()_SMU()_CBFC_CTL[TX_EN] is clear and
+ BGX()_SMU()_HG2_CONTROL[HG2TX_EN] is clear. */
+ uint64_t ls_byp : 1; /**< [ 6: 6](R/W) Bypass the link status, as determined by the XGMII receiver, and set the link status of
+ the transmitter to LS. */
+ uint64_t ls : 2; /**< [ 5: 4](R/W) Link status.
+ 0 = Link OK; link runs normally. RS passes MAC data to PCS.
+ 1 = Local fault. RS layer sends continuous remote fault sequences.
+ 2 = Remote fault. RS layer sends continuous idle sequences.
+ 3 = Link drain. RS layer drops full packets to allow BGX and TNS/NIC to drain their FIFOs. */
+ uint64_t reserved_3 : 1;
+ uint64_t x4a_dis : 1; /**< [ 2: 2](R/W) Disable 4-byte SOP align (effectively force 8-byte SOP align) for all 10G variants
+ (XAUI, RXAUI, 10G). */
+ uint64_t uni_en : 1; /**< [ 1: 1](R/W) Enable unidirectional mode (IEEE Clause 66). */
+ uint64_t dic_en : 1; /**< [ 0: 0](R/W) Enable the deficit idle counter for IFG averaging. */
+#else /* Word 0 - Little Endian */
+ uint64_t dic_en : 1; /**< [ 0: 0](R/W) Enable the deficit idle counter for IFG averaging. */
+ uint64_t uni_en : 1; /**< [ 1: 1](R/W) Enable unidirectional mode (IEEE Clause 66). */
+ uint64_t x4a_dis : 1; /**< [ 2: 2](R/W) Disable 4-byte SOP align (effectively force 8-byte SOP align) for all 10G variants
+ (XAUI, RXAUI, 10G). */
+ uint64_t reserved_3 : 1;
+ uint64_t ls : 2; /**< [ 5: 4](R/W) Link status.
+ 0 = Link OK; link runs normally. RS passes MAC data to PCS.
+ 1 = Local fault. RS layer sends continuous remote fault sequences.
+ 2 = Remote fault. RS layer sends continuous idle sequences.
+ 3 = Link drain. RS layer drops full packets to allow BGX and TNS/NIC to drain their FIFOs. */
+ uint64_t ls_byp : 1; /**< [ 6: 6](R/W) Bypass the link status, as determined by the XGMII receiver, and set the link status of
+ the transmitter to LS. */
+ uint64_t l2p_bp_conv : 1; /**< [ 7: 7](R/W) If set, causes TX to generate 802.3 pause packets when CMR applies logical backpressure
+ (XOFF), if and only if BGX()_SMU()_CBFC_CTL[TX_EN] is clear and
+ BGX()_SMU()_HG2_CONTROL[HG2TX_EN] is clear. */
+ uint64_t hg_en : 1; /**< [ 8: 8](R/W) Enable HiGig mode.
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 12, the interface is in
+ HiGig/HiGig+ mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 12.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+
+ When this field is set and BGX()_SMU()_RX_UDD_SKP[LEN] = 16, the interface is in
+ HiGig2 mode and the following must be set:
+ * BGX()_SMU()_RX_FRM_CTL[PRE_CHK] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[FCSSEL] = 0.
+ * BGX()_SMU()_RX_UDD_SKP[LEN] = 16.
+ * BGX()_SMU()_TX_APPEND[PREAMBLE] = 0.
+ * BGX()_SMU()_CBFC_CTL[RX_EN] = 0.
+ * BGX()_SMU()_CBFC_CTL[TX_EN] = 0. */
+ uint64_t hg_pause_hgi : 2; /**< [ 10: 9](R/W) HGI field for hardware-generated HiGig PAUSE packets. */
+ uint64_t spu_mrk_cnt : 20; /**< [ 30: 11](R/W) 40GBASE-R transmit marker interval count. Specifies the interval (number of 66-bit BASE-R
+ blocks) at which the LMAC transmit logic inserts 40GBASE-R alignment markers. An internal
+ counter in SMU is initialized to this value, counts down for each BASE-R block transmitted
+ by the LMAC, and wraps back to the initial value from 0. The LMAC transmit logic inserts
+ alignment markers for lanes 0, 1, 2 and 3, respectively, in the last four BASE-R blocks
+ before the counter wraps (3, 2, 1, 0). The default value corresponds to an alignment
+ marker period of 16363 blocks (exclusive) per lane, as specified in 802.3ba-2010. The
+ default value should always be used for normal operation. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_bgxx_smux_tx_ctl bdk_bgxx_smux_tx_ctl_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020178ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020178ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020178ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_CTL(a,b) bdk_bgxx_smux_tx_ctl_t
+#define bustype_BDK_BGXX_SMUX_TX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_CTL(a,b) "BGXX_SMUX_TX_CTL"
+#define device_bar_BDK_BGXX_SMUX_TX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_CTL(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_ifg
+ *
+ * BGX SMU TX Interframe-Gap Cycles Registers
+ * Programming IFG1 and IFG2:
+ * * For XAUI/RXAUI/10Gbs/40Gbs systems that require IEEE 802.3 compatibility, the IFG1+IFG2 sum
+ * must be 12.
+ * * In loopback mode, the IFG1+IFG2 of local and remote parties must match exactly; otherwise
+ * one of the two sides' loopback FIFO will overrun: BGX()_SMU()_TX_INT[LB_OVRFLW].
+ */
+union bdk_bgxx_smux_tx_ifg
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_ifg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t ifg2 : 4; /**< [ 7: 4](R/W) 1/2 of the interframe gap timing (in IFG2*8 bits). */
+ uint64_t ifg1 : 4; /**< [ 3: 0](R/W) 1/2 of the interframe gap timing (in IFG1*8 bits). */
+#else /* Word 0 - Little Endian */
+ uint64_t ifg1 : 4; /**< [ 3: 0](R/W) 1/2 of the interframe gap timing (in IFG1*8 bits). */
+ uint64_t ifg2 : 4; /**< [ 7: 4](R/W) 1/2 of the interframe gap timing (in IFG2*8 bits). */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_ifg_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_ifg bdk_bgxx_smux_tx_ifg_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_IFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_IFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020160ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020160ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020160ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_IFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_IFG(a,b) bdk_bgxx_smux_tx_ifg_t
+#define bustype_BDK_BGXX_SMUX_TX_IFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_IFG(a,b) "BGXX_SMUX_TX_IFG"
+#define device_bar_BDK_BGXX_SMUX_TX_IFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_IFG(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_IFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_int
+ *
+ * BGX SMU TX Interrupt Registers
+ */
+union bdk_bgxx_smux_tx_int
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1C/H) TX loopback overflow. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1C/H) TX loopback underflow. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1C/H) TX SMU started a packet with PTP on SOP and has not seen a commit for it from TX SPU after
+ 256 cycles so it faked a commit to CMR. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1C/H) Link status changed. This denotes a change to BGX()_SMU()_RX_CTL[STATUS]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) TX underflow. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) TX underflow. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1C/H) Link status changed. This denotes a change to BGX()_SMU()_RX_CTL[STATUS]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1C/H) TX SMU started a packet with PTP on SOP and has not seen a commit for it from TX SPU after
+ 256 cycles so it faked a commit to CMR. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1C/H) TX loopback underflow. */
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1C/H) TX loopback overflow. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_int_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_int bdk_bgxx_smux_tx_int_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_INT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_INT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020140ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020140ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020140ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_INT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_INT(a,b) bdk_bgxx_smux_tx_int_t
+#define bustype_BDK_BGXX_SMUX_TX_INT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_INT(a,b) "BGXX_SMUX_TX_INT"
+#define device_bar_BDK_BGXX_SMUX_TX_INT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_INT(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_INT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_int_ena_w1c
+ *
+ * BGX SMU TX Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_bgxx_smux_tx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_TX_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_TX_INT[UNDFLW]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_int_ena_w1c_s cn81xx; */
+ /* struct bdk_bgxx_smux_tx_int_ena_w1c_s cn88xx; */
+ struct bdk_bgxx_smux_tx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_TX_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_TX_INT[UNDFLW]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_smux_tx_int_ena_w1c bdk_bgxx_smux_tx_int_ena_w1c_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_INT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_INT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020150ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020150ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020150ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_INT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_INT_ENA_W1C(a,b) bdk_bgxx_smux_tx_int_ena_w1c_t
+#define bustype_BDK_BGXX_SMUX_TX_INT_ENA_W1C(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_INT_ENA_W1C(a,b) "BGXX_SMUX_TX_INT_ENA_W1C"
+#define device_bar_BDK_BGXX_SMUX_TX_INT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_INT_ENA_W1C(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_INT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_int_ena_w1s
+ *
+ * BGX SMU TX Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_bgxx_smux_tx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_TX_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_TX_INT[UNDFLW]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_int_ena_w1s_s cn81xx; */
+ /* struct bdk_bgxx_smux_tx_int_ena_w1s_s cn88xx; */
+ struct bdk_bgxx_smux_tx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_TX_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_TX_INT[UNDFLW]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_smux_tx_int_ena_w1s bdk_bgxx_smux_tx_int_ena_w1s_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_INT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_INT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020158ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020158ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020158ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_INT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_INT_ENA_W1S(a,b) bdk_bgxx_smux_tx_int_ena_w1s_t
+#define bustype_BDK_BGXX_SMUX_TX_INT_ENA_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_INT_ENA_W1S(a,b) "BGXX_SMUX_TX_INT_ENA_W1S"
+#define device_bar_BDK_BGXX_SMUX_TX_INT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_INT_ENA_W1S(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_INT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_int_w1s
+ *
+ * BGX SMU TX Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_bgxx_smux_tx_int_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_TX_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_TX_INT[UNDFLW]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_int_w1s_s cn81xx; */
+ /* struct bdk_bgxx_smux_tx_int_w1s_s cn88xx; */
+ struct bdk_bgxx_smux_tx_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_TX_INT[UNDFLW]. */
+#else /* Word 0 - Little Endian */
+ uint64_t undflw : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_TX_INT[UNDFLW]. */
+ uint64_t xchange : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_TX_INT[XCHANGE]. */
+ uint64_t fake_commit : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_TX_INT[FAKE_COMMIT]. */
+ uint64_t lb_undflw : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_TX_INT[LB_UNDFLW]. */
+ uint64_t lb_ovrflw : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_SMU(0..3)_TX_INT[LB_OVRFLW]. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_smux_tx_int_w1s bdk_bgxx_smux_tx_int_w1s_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_INT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_INT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020148ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020148ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020148ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_INT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_INT_W1S(a,b) bdk_bgxx_smux_tx_int_w1s_t
+#define bustype_BDK_BGXX_SMUX_TX_INT_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_INT_W1S(a,b) "BGXX_SMUX_TX_INT_W1S"
+#define device_bar_BDK_BGXX_SMUX_TX_INT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_INT_W1S(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_INT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_min_pkt
+ *
+ * BGX SMU TX Minimum-Size-Packet Registers
+ */
+union bdk_bgxx_smux_tx_min_pkt
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_min_pkt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t min_size : 8; /**< [ 7: 0](R/W) Min frame in bytes inclusive of FCS, if applied. Padding is only appended when
+ BGX()_SMU()_TX_APPEND[PAD] for the corresponding port is set. When FCS is added to
+ a packet which was padded, the FCS always appears in the four octets preceding /T/ or /E/. */
+#else /* Word 0 - Little Endian */
+ uint64_t min_size : 8; /**< [ 7: 0](R/W) Min frame in bytes inclusive of FCS, if applied. Padding is only appended when
+ BGX()_SMU()_TX_APPEND[PAD] for the corresponding port is set. When FCS is added to
+ a packet which was padded, the FCS always appears in the four octets preceding /T/ or /E/. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_min_pkt_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_min_pkt bdk_bgxx_smux_tx_min_pkt_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_MIN_PKT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_MIN_PKT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020118ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020118ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020118ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_MIN_PKT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_MIN_PKT(a,b) bdk_bgxx_smux_tx_min_pkt_t
+#define bustype_BDK_BGXX_SMUX_TX_MIN_PKT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_MIN_PKT(a,b) "BGXX_SMUX_TX_MIN_PKT"
+#define device_bar_BDK_BGXX_SMUX_TX_MIN_PKT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_MIN_PKT(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_MIN_PKT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_pause_pkt_dmac
+ *
+ * BGX SMU TX PAUSE-Packet DMAC-Field Registers
+ * This register provides the DMAC value that is placed in outbound PAUSE packets.
+ */
+union bdk_bgxx_smux_tx_pause_pkt_dmac
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_pause_pkt_dmac_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t dmac : 48; /**< [ 47: 0](R/W) The DMAC field that is placed in outbound PAUSE packets. */
+#else /* Word 0 - Little Endian */
+ uint64_t dmac : 48; /**< [ 47: 0](R/W) The DMAC field that is placed in outbound PAUSE packets. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_pause_pkt_dmac_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_pause_pkt_dmac bdk_bgxx_smux_tx_pause_pkt_dmac_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_PKT_DMAC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_PKT_DMAC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020168ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020168ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020168ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_PAUSE_PKT_DMAC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_PAUSE_PKT_DMAC(a,b) bdk_bgxx_smux_tx_pause_pkt_dmac_t
+#define bustype_BDK_BGXX_SMUX_TX_PAUSE_PKT_DMAC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_PAUSE_PKT_DMAC(a,b) "BGXX_SMUX_TX_PAUSE_PKT_DMAC"
+#define device_bar_BDK_BGXX_SMUX_TX_PAUSE_PKT_DMAC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_PAUSE_PKT_DMAC(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_PAUSE_PKT_DMAC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_pause_pkt_interval
+ *
+ * BGX SMU TX PAUSE-Packet Transmission-Interval Registers
+ * This register specifies how often PAUSE packets are sent.
+ */
+union bdk_bgxx_smux_tx_pause_pkt_interval
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_pause_pkt_interval_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t hg2_intra_en : 1; /**< [ 32: 32](R/W) Allow intrapacket HiGig2 message generation. Relevant only if HiGig2 message generation is enabled. */
+ uint64_t hg2_intra_interval : 16; /**< [ 31: 16](R/W) Arbitrate for a HiGig2 message, every (INTERVAL*512) bit-times whilst sending regular
+ packet data. Relevant only if HiGig2 message generation and [HG2_INTRA_EN] are both set.
+ Normally, 0 \< INTERVAL \< BGX()_SMU()_TX_PAUSE_PKT_TIME.
+
+ INTERVAL = 0 only sends a single PAUSE packet for each backpressure event. */
+ uint64_t interval : 16; /**< [ 15: 0](R/W) Arbitrate for a 802.3 PAUSE packet, HiGig2 message, or PFC packet every
+ (INTERVAL * 512) bit-times.
+ Normally, 0 \< INTERVAL \< BGX()_SMU()_TX_PAUSE_PKT_TIME[P_TIME].
+
+ INTERVAL = 0 only sends a single PAUSE packet for each backpressure event. */
+#else /* Word 0 - Little Endian */
+ uint64_t interval : 16; /**< [ 15: 0](R/W) Arbitrate for a 802.3 PAUSE packet, HiGig2 message, or PFC packet every
+ (INTERVAL * 512) bit-times.
+ Normally, 0 \< INTERVAL \< BGX()_SMU()_TX_PAUSE_PKT_TIME[P_TIME].
+
+ INTERVAL = 0 only sends a single PAUSE packet for each backpressure event. */
+ uint64_t hg2_intra_interval : 16; /**< [ 31: 16](R/W) Arbitrate for a HiGig2 message, every (INTERVAL*512) bit-times whilst sending regular
+ packet data. Relevant only if HiGig2 message generation and [HG2_INTRA_EN] are both set.
+ Normally, 0 \< INTERVAL \< BGX()_SMU()_TX_PAUSE_PKT_TIME.
+
+ INTERVAL = 0 only sends a single PAUSE packet for each backpressure event. */
+ uint64_t hg2_intra_en : 1; /**< [ 32: 32](R/W) Allow intrapacket HiGig2 message generation. Relevant only if HiGig2 message generation is enabled. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_pause_pkt_interval_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_pause_pkt_interval bdk_bgxx_smux_tx_pause_pkt_interval_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_PKT_INTERVAL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_PKT_INTERVAL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020120ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020120ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020120ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_PAUSE_PKT_INTERVAL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_PAUSE_PKT_INTERVAL(a,b) bdk_bgxx_smux_tx_pause_pkt_interval_t
+#define bustype_BDK_BGXX_SMUX_TX_PAUSE_PKT_INTERVAL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_PAUSE_PKT_INTERVAL(a,b) "BGXX_SMUX_TX_PAUSE_PKT_INTERVAL"
+#define device_bar_BDK_BGXX_SMUX_TX_PAUSE_PKT_INTERVAL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_PAUSE_PKT_INTERVAL(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_PAUSE_PKT_INTERVAL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_pause_pkt_time
+ *
+ * BGX SMU TX PAUSE Packet Time Registers
+ */
+union bdk_bgxx_smux_tx_pause_pkt_time
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_pause_pkt_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t p_time : 16; /**< [ 15: 0](R/W) Provides the pause_time field placed in outbound 802.3 PAUSE packets, HiGig2 messages, or
+ PFC packets in 512 bit-times. Normally, [P_TIME] \>
+ BGX()_SMU()_TX_PAUSE_PKT_INTERVAL[INTERVAL]. See programming notes in
+ BGX()_SMU()_TX_PAUSE_PKT_INTERVAL. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_time : 16; /**< [ 15: 0](R/W) Provides the pause_time field placed in outbound 802.3 PAUSE packets, HiGig2 messages, or
+ PFC packets in 512 bit-times. Normally, [P_TIME] \>
+ BGX()_SMU()_TX_PAUSE_PKT_INTERVAL[INTERVAL]. See programming notes in
+ BGX()_SMU()_TX_PAUSE_PKT_INTERVAL. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_pause_pkt_time_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_pause_pkt_time bdk_bgxx_smux_tx_pause_pkt_time_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_PKT_TIME(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_PKT_TIME(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020110ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020110ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020110ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_PAUSE_PKT_TIME", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_PAUSE_PKT_TIME(a,b) bdk_bgxx_smux_tx_pause_pkt_time_t
+#define bustype_BDK_BGXX_SMUX_TX_PAUSE_PKT_TIME(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_PAUSE_PKT_TIME(a,b) "BGXX_SMUX_TX_PAUSE_PKT_TIME"
+#define device_bar_BDK_BGXX_SMUX_TX_PAUSE_PKT_TIME(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_PAUSE_PKT_TIME(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_PAUSE_PKT_TIME(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_pause_pkt_type
+ *
+ * BGX SMU TX PAUSE-Packet P_TYPE-Field Registers
+ * This register provides the P_TYPE field that is placed in outbound PAUSE packets.
+ */
+union bdk_bgxx_smux_tx_pause_pkt_type
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_pause_pkt_type_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t p_type : 16; /**< [ 15: 0](R/W) The P_TYPE field that is placed in outbound PAUSE packets. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_type : 16; /**< [ 15: 0](R/W) The P_TYPE field that is placed in outbound PAUSE packets. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_pause_pkt_type_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_pause_pkt_type bdk_bgxx_smux_tx_pause_pkt_type_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_PKT_TYPE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_PKT_TYPE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020170ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020170ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020170ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_PAUSE_PKT_TYPE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_PAUSE_PKT_TYPE(a,b) bdk_bgxx_smux_tx_pause_pkt_type_t
+#define bustype_BDK_BGXX_SMUX_TX_PAUSE_PKT_TYPE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_PAUSE_PKT_TYPE(a,b) "BGXX_SMUX_TX_PAUSE_PKT_TYPE"
+#define device_bar_BDK_BGXX_SMUX_TX_PAUSE_PKT_TYPE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_PAUSE_PKT_TYPE(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_PAUSE_PKT_TYPE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_pause_togo
+ *
+ * BGX SMU TX Time-to-Backpressure Registers
+ */
+union bdk_bgxx_smux_tx_pause_togo
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_pause_togo_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t msg_time : 16; /**< [ 31: 16](RO/H) Amount of time remaining to backpressure, from the HiGig2 physical message PAUSE timer
+ (only valid on port0). */
+ uint64_t p_time : 16; /**< [ 15: 0](RO/H) Amount of time remaining to backpressure, from the standard 802.3 PAUSE timer. */
+#else /* Word 0 - Little Endian */
+ uint64_t p_time : 16; /**< [ 15: 0](RO/H) Amount of time remaining to backpressure, from the standard 802.3 PAUSE timer. */
+ uint64_t msg_time : 16; /**< [ 31: 16](RO/H) Amount of time remaining to backpressure, from the HiGig2 physical message PAUSE timer
+ (only valid on port0). */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_pause_togo_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_pause_togo bdk_bgxx_smux_tx_pause_togo_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_TOGO(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_TOGO(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020130ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020130ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020130ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_PAUSE_TOGO", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_PAUSE_TOGO(a,b) bdk_bgxx_smux_tx_pause_togo_t
+#define bustype_BDK_BGXX_SMUX_TX_PAUSE_TOGO(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_PAUSE_TOGO(a,b) "BGXX_SMUX_TX_PAUSE_TOGO"
+#define device_bar_BDK_BGXX_SMUX_TX_PAUSE_TOGO(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_PAUSE_TOGO(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_PAUSE_TOGO(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_pause_zero
+ *
+ * BGX SMU TX PAUSE Zero Registers
+ */
+union bdk_bgxx_smux_tx_pause_zero
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_pause_zero_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t send : 1; /**< [ 0: 0](R/W) Send PAUSE-zero enable. When this bit is set, and the backpressure condition is clear, it
+ allows sending a PAUSE packet with pause_time of 0 to enable the channel. */
+#else /* Word 0 - Little Endian */
+ uint64_t send : 1; /**< [ 0: 0](R/W) Send PAUSE-zero enable. When this bit is set, and the backpressure condition is clear, it
+ allows sending a PAUSE packet with pause_time of 0 to enable the channel. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_pause_zero_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_pause_zero bdk_bgxx_smux_tx_pause_zero_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_ZERO(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_PAUSE_ZERO(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020138ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020138ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020138ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_PAUSE_ZERO", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_PAUSE_ZERO(a,b) bdk_bgxx_smux_tx_pause_zero_t
+#define bustype_BDK_BGXX_SMUX_TX_PAUSE_ZERO(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_PAUSE_ZERO(a,b) "BGXX_SMUX_TX_PAUSE_ZERO"
+#define device_bar_BDK_BGXX_SMUX_TX_PAUSE_ZERO(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_PAUSE_ZERO(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_PAUSE_ZERO(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_soft_pause
+ *
+ * BGX SMU TX Soft PAUSE Registers
+ */
+union bdk_bgxx_smux_tx_soft_pause
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_soft_pause_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t p_time : 16; /**< [ 15: 0](R/W) Back off the TX bus for ([P_TIME] * 512) bit-times */
+#else /* Word 0 - Little Endian */
+ uint64_t p_time : 16; /**< [ 15: 0](R/W) Back off the TX bus for ([P_TIME] * 512) bit-times */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_soft_pause_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_soft_pause bdk_bgxx_smux_tx_soft_pause_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_SOFT_PAUSE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_SOFT_PAUSE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020128ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020128ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020128ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_SOFT_PAUSE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_SOFT_PAUSE(a,b) bdk_bgxx_smux_tx_soft_pause_t
+#define bustype_BDK_BGXX_SMUX_TX_SOFT_PAUSE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_SOFT_PAUSE(a,b) "BGXX_SMUX_TX_SOFT_PAUSE"
+#define device_bar_BDK_BGXX_SMUX_TX_SOFT_PAUSE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_SOFT_PAUSE(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_SOFT_PAUSE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_smu#_tx_thresh
+ *
+ * BGX SMU TX Threshold Registers
+ */
+union bdk_bgxx_smux_tx_thresh
+{
+ uint64_t u;
+ struct bdk_bgxx_smux_tx_thresh_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t cnt : 11; /**< [ 10: 0](R/W) Number of 128-bit words to accumulate in the TX FIFO before sending on the packet
+ interface. This field should be large enough to prevent underflow on the packet interface
+ and must never be set to 0x0.
+
+ In all modes, this register cannot exceed the TX FIFO depth configured by
+ BGX()_CMR_TX_LMACS[LMACS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 11; /**< [ 10: 0](R/W) Number of 128-bit words to accumulate in the TX FIFO before sending on the packet
+ interface. This field should be large enough to prevent underflow on the packet interface
+ and must never be set to 0x0.
+
+ In all modes, this register cannot exceed the TX FIFO depth configured by
+ BGX()_CMR_TX_LMACS[LMACS]. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_smux_tx_thresh_s cn; */
+};
+typedef union bdk_bgxx_smux_tx_thresh bdk_bgxx_smux_tx_thresh_t;
+
+static inline uint64_t BDK_BGXX_SMUX_TX_THRESH(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SMUX_TX_THRESH(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020180ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0020180ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0020180ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SMUX_TX_THRESH", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SMUX_TX_THRESH(a,b) bdk_bgxx_smux_tx_thresh_t
+#define bustype_BDK_BGXX_SMUX_TX_THRESH(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SMUX_TX_THRESH(a,b) "BGXX_SMUX_TX_THRESH"
+#define device_bar_BDK_BGXX_SMUX_TX_THRESH(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SMUX_TX_THRESH(a,b) (a)
+#define arguments_BDK_BGXX_SMUX_TX_THRESH(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_an_adv
+ *
+ * BGX SPU Autonegotiation Advertisement Registers
+ * Software programs this register with the contents of the AN-link code word base page to be
+ * transmitted during autonegotiation. (See IEEE 802.3 section 73.6 for details.) Any write
+ * operations to this register prior to completion of autonegotiation, as indicated by
+ * BGX()_SPU()_AN_STATUS[AN_COMPLETE], should be followed by a renegotiation in order for
+ * the new values to take effect. Renegotiation is initiated by setting
+ * BGX()_SPU()_AN_CONTROL[AN_RESTART]. Once autonegotiation has completed, software can
+ * examine this register along with BGX()_SPU()_AN_LP_BASE to determine the highest
+ * common denominator technology.
+ */
+union bdk_bgxx_spux_an_adv
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_an_adv_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t fec_req : 1; /**< [ 47: 47](R/W) FEC requested. */
+ uint64_t fec_able : 1; /**< [ 46: 46](R/W) FEC ability. */
+ uint64_t arsv : 19; /**< [ 45: 27](R/W) Technology ability. Reserved bits, should always be 0. */
+ uint64_t a100g_cr10 : 1; /**< [ 26: 26](R/W) 100GBASE-CR10 ability. Should always be 0; 100GBASE-R is not supported. */
+ uint64_t a40g_cr4 : 1; /**< [ 25: 25](R/W) 40GBASE-CR4 ability. */
+ uint64_t a40g_kr4 : 1; /**< [ 24: 24](R/W) 40GBASE-KR4 ability. */
+ uint64_t a10g_kr : 1; /**< [ 23: 23](R/W) 10GBASE-KR ability. */
+ uint64_t a10g_kx4 : 1; /**< [ 22: 22](R/W) 10GBASE-KX4 ability. */
+ uint64_t a1g_kx : 1; /**< [ 21: 21](R/W) 1000BASE-KX ability. Should always be 0; autonegotiation is not supported for 1000Base-KX. */
+ uint64_t t : 5; /**< [ 20: 16](R/W/H) Transmitted nonce. This field is automatically updated with a pseudo-random value on entry
+ to the AN ability detect state. */
+ uint64_t np : 1; /**< [ 15: 15](R/W) Next page. Always 0; extended next pages are not used for 10G+ autonegotiation. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) Acknowledge. Always 0 in this register. */
+ uint64_t rf : 1; /**< [ 13: 13](R/W) Remote fault. */
+ uint64_t xnp_able : 1; /**< [ 12: 12](R/W) Extended next page ability. */
+ uint64_t asm_dir : 1; /**< [ 11: 11](R/W) Asymmetric PAUSE. */
+ uint64_t pause : 1; /**< [ 10: 10](R/W) PAUSE ability. */
+ uint64_t e : 5; /**< [ 9: 5](R/W) Echoed nonce. Provides the echoed-nonce value to use when ACK = 0 in transmitted DME page.
+ Should always be 0x0. */
+ uint64_t s : 5; /**< [ 4: 0](R/W) Selector. Should be 0x1 (encoding for IEEE 802.3). */
+#else /* Word 0 - Little Endian */
+ uint64_t s : 5; /**< [ 4: 0](R/W) Selector. Should be 0x1 (encoding for IEEE 802.3). */
+ uint64_t e : 5; /**< [ 9: 5](R/W) Echoed nonce. Provides the echoed-nonce value to use when ACK = 0 in transmitted DME page.
+ Should always be 0x0. */
+ uint64_t pause : 1; /**< [ 10: 10](R/W) PAUSE ability. */
+ uint64_t asm_dir : 1; /**< [ 11: 11](R/W) Asymmetric PAUSE. */
+ uint64_t xnp_able : 1; /**< [ 12: 12](R/W) Extended next page ability. */
+ uint64_t rf : 1; /**< [ 13: 13](R/W) Remote fault. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) Acknowledge. Always 0 in this register. */
+ uint64_t np : 1; /**< [ 15: 15](R/W) Next page. Always 0; extended next pages are not used for 10G+ autonegotiation. */
+ uint64_t t : 5; /**< [ 20: 16](R/W/H) Transmitted nonce. This field is automatically updated with a pseudo-random value on entry
+ to the AN ability detect state. */
+ uint64_t a1g_kx : 1; /**< [ 21: 21](R/W) 1000BASE-KX ability. Should always be 0; autonegotiation is not supported for 1000Base-KX. */
+ uint64_t a10g_kx4 : 1; /**< [ 22: 22](R/W) 10GBASE-KX4 ability. */
+ uint64_t a10g_kr : 1; /**< [ 23: 23](R/W) 10GBASE-KR ability. */
+ uint64_t a40g_kr4 : 1; /**< [ 24: 24](R/W) 40GBASE-KR4 ability. */
+ uint64_t a40g_cr4 : 1; /**< [ 25: 25](R/W) 40GBASE-CR4 ability. */
+ uint64_t a100g_cr10 : 1; /**< [ 26: 26](R/W) 100GBASE-CR10 ability. Should always be 0; 100GBASE-R is not supported. */
+ uint64_t arsv : 19; /**< [ 45: 27](R/W) Technology ability. Reserved bits, should always be 0. */
+ uint64_t fec_able : 1; /**< [ 46: 46](R/W) FEC ability. */
+ uint64_t fec_req : 1; /**< [ 47: 47](R/W) FEC requested. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_an_adv_s cn; */
+};
+typedef union bdk_bgxx_spux_an_adv bdk_bgxx_spux_an_adv_t;
+
+static inline uint64_t BDK_BGXX_SPUX_AN_ADV(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_AN_ADV(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100d8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100d8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100d8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_AN_ADV", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_AN_ADV(a,b) bdk_bgxx_spux_an_adv_t
+#define bustype_BDK_BGXX_SPUX_AN_ADV(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_AN_ADV(a,b) "BGXX_SPUX_AN_ADV"
+#define device_bar_BDK_BGXX_SPUX_AN_ADV(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_AN_ADV(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_AN_ADV(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_an_bp_status
+ *
+ * BGX SPU Autonegotiation Backplane Ethernet & BASE-R Copper Status Registers
+ * The contents of this register are updated
+ * during autonegotiation and are valid when BGX()_SPU()_AN_STATUS[AN_COMPLETE] is set.
+ * At that time, one of the port type bits ([N100G_CR10], [N40G_CR4], [N40G_KR4], [N10G_KR],
+ * [N10G_KX4],
+ * [N1G_KX]) will be set depending on the AN priority resolution. If a BASE-R type is negotiated,
+ * then [FEC] will be set to indicate that FEC operation has been negotiated, and will be
+ * clear otherwise.
+ */
+union bdk_bgxx_spux_an_bp_status
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_an_bp_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t n100g_cr10 : 1; /**< [ 8: 8](RO/H) 100GBASE-CR10 negotiated; expected to always be 0; 100GBASE-R is not supported. */
+ uint64_t reserved_7 : 1;
+ uint64_t n40g_cr4 : 1; /**< [ 6: 6](RO/H) 40GBASE-CR4 negotiated. */
+ uint64_t n40g_kr4 : 1; /**< [ 5: 5](RO/H) 40GBASE-KR4 negotiated. */
+ uint64_t fec : 1; /**< [ 4: 4](RO/H) BASE-R FEC negotiated. */
+ uint64_t n10g_kr : 1; /**< [ 3: 3](RO/H) 10GBASE-KR negotiated. */
+ uint64_t n10g_kx4 : 1; /**< [ 2: 2](RO/H) 10GBASE-KX4 or CX4 negotiated (XAUI). */
+ uint64_t n1g_kx : 1; /**< [ 1: 1](RO/H) 1000BASE-KX negotiated. */
+ uint64_t bp_an_able : 1; /**< [ 0: 0](RO) Backplane or BASE-R copper AN Ability; always 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t bp_an_able : 1; /**< [ 0: 0](RO) Backplane or BASE-R copper AN Ability; always 1. */
+ uint64_t n1g_kx : 1; /**< [ 1: 1](RO/H) 1000BASE-KX negotiated. */
+ uint64_t n10g_kx4 : 1; /**< [ 2: 2](RO/H) 10GBASE-KX4 or CX4 negotiated (XAUI). */
+ uint64_t n10g_kr : 1; /**< [ 3: 3](RO/H) 10GBASE-KR negotiated. */
+ uint64_t fec : 1; /**< [ 4: 4](RO/H) BASE-R FEC negotiated. */
+ uint64_t n40g_kr4 : 1; /**< [ 5: 5](RO/H) 40GBASE-KR4 negotiated. */
+ uint64_t n40g_cr4 : 1; /**< [ 6: 6](RO/H) 40GBASE-CR4 negotiated. */
+ uint64_t reserved_7 : 1;
+ uint64_t n100g_cr10 : 1; /**< [ 8: 8](RO/H) 100GBASE-CR10 negotiated; expected to always be 0; 100GBASE-R is not supported. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_an_bp_status_s cn; */
+};
+typedef union bdk_bgxx_spux_an_bp_status bdk_bgxx_spux_an_bp_status_t;
+
+static inline uint64_t BDK_BGXX_SPUX_AN_BP_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_AN_BP_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100f8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100f8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100f8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_AN_BP_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_AN_BP_STATUS(a,b) bdk_bgxx_spux_an_bp_status_t
+#define bustype_BDK_BGXX_SPUX_AN_BP_STATUS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_AN_BP_STATUS(a,b) "BGXX_SPUX_AN_BP_STATUS"
+#define device_bar_BDK_BGXX_SPUX_AN_BP_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_AN_BP_STATUS(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_AN_BP_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_an_control
+ *
+ * BGX SPU Autonegotiation Control Registers
+ */
+union bdk_bgxx_spux_an_control
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_an_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t an_reset : 1; /**< [ 15: 15](R/W1S/H) Autonegotiation reset. Setting this bit or BGX()_SPU()_CONTROL1[RESET] to 1
+ causes the following to happen:
+ * Resets the logical PCS (LPCS)
+ * Sets the IEEE 802.3 PCS, FEC and AN registers for the LPCS to their default states
+ * Resets the associated SerDes lanes.
+
+ It takes up to 32 coprocessor-clock cycles to reset the LPCS, after which RESET is
+ automatically cleared. */
+ uint64_t reserved_14 : 1;
+ uint64_t xnp_en : 1; /**< [ 13: 13](R/W) Extended next-page enable. */
+ uint64_t an_en : 1; /**< [ 12: 12](R/W) Autonegotiation enable. This bit should not be set when
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is set to RXAUI; autonegotiation is not supported
+ in RXAUI mode. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t an_restart : 1; /**< [ 9: 9](R/W1S/H) Autonegotiation restart. Writing a 1 to this bit restarts the autonegotiation process if
+ [AN_EN] is also set. This is a self-clearing bit. */
+ uint64_t reserved_0_8 : 9;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_8 : 9;
+ uint64_t an_restart : 1; /**< [ 9: 9](R/W1S/H) Autonegotiation restart. Writing a 1 to this bit restarts the autonegotiation process if
+ [AN_EN] is also set. This is a self-clearing bit. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t an_en : 1; /**< [ 12: 12](R/W) Autonegotiation enable. This bit should not be set when
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is set to RXAUI; autonegotiation is not supported
+ in RXAUI mode. */
+ uint64_t xnp_en : 1; /**< [ 13: 13](R/W) Extended next-page enable. */
+ uint64_t reserved_14 : 1;
+ uint64_t an_reset : 1; /**< [ 15: 15](R/W1S/H) Autonegotiation reset. Setting this bit or BGX()_SPU()_CONTROL1[RESET] to 1
+ causes the following to happen:
+ * Resets the logical PCS (LPCS)
+ * Sets the IEEE 802.3 PCS, FEC and AN registers for the LPCS to their default states
+ * Resets the associated SerDes lanes.
+
+ It takes up to 32 coprocessor-clock cycles to reset the LPCS, after which RESET is
+ automatically cleared. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_an_control_s cn; */
+};
+typedef union bdk_bgxx_spux_an_control bdk_bgxx_spux_an_control_t;
+
+static inline uint64_t BDK_BGXX_SPUX_AN_CONTROL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_AN_CONTROL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100c8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100c8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100c8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_AN_CONTROL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_AN_CONTROL(a,b) bdk_bgxx_spux_an_control_t
+#define bustype_BDK_BGXX_SPUX_AN_CONTROL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_AN_CONTROL(a,b) "BGXX_SPUX_AN_CONTROL"
+#define device_bar_BDK_BGXX_SPUX_AN_CONTROL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_AN_CONTROL(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_AN_CONTROL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_an_lp_base
+ *
+ * BGX SPU Autonegotiation Link-Partner Base-Page Ability Registers
+ * This register captures the contents of the latest AN link code word base page received from
+ * the link partner during autonegotiation. (See IEEE 802.3 section 73.6 for details.)
+ * BGX()_SPU()_AN_STATUS[PAGE_RX] is set when this register is updated by hardware.
+ */
+union bdk_bgxx_spux_an_lp_base
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_an_lp_base_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t fec_req : 1; /**< [ 47: 47](RO/H) FEC requested. */
+ uint64_t fec_able : 1; /**< [ 46: 46](RO/H) FEC ability. */
+ uint64_t arsv : 19; /**< [ 45: 27](RO/H) Technology ability. Reserved bits, should always be 0. */
+ uint64_t a100g_cr10 : 1; /**< [ 26: 26](RO/H) 100GBASE-CR10 ability. */
+ uint64_t a40g_cr4 : 1; /**< [ 25: 25](RO/H) 40GBASE-CR4 ability. */
+ uint64_t a40g_kr4 : 1; /**< [ 24: 24](RO/H) 40GBASE-KR4 ability. */
+ uint64_t a10g_kr : 1; /**< [ 23: 23](RO/H) 10GBASE-KR ability. */
+ uint64_t a10g_kx4 : 1; /**< [ 22: 22](RO/H) 10GBASE-KX4 ability. */
+ uint64_t a1g_kx : 1; /**< [ 21: 21](RO/H) 1000BASE-KX ability. */
+ uint64_t t : 5; /**< [ 20: 16](RO/H) Transmitted nonce. */
+ uint64_t np : 1; /**< [ 15: 15](RO/H) Next page. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) Acknowledge. */
+ uint64_t rf : 1; /**< [ 13: 13](RO/H) Remote fault. */
+ uint64_t xnp_able : 1; /**< [ 12: 12](RO/H) Extended next page ability. */
+ uint64_t asm_dir : 1; /**< [ 11: 11](RO/H) Asymmetric PAUSE. */
+ uint64_t pause : 1; /**< [ 10: 10](RO/H) PAUSE ability. */
+ uint64_t e : 5; /**< [ 9: 5](RO/H) Echoed nonce. */
+ uint64_t s : 5; /**< [ 4: 0](RO/H) Selector. */
+#else /* Word 0 - Little Endian */
+ uint64_t s : 5; /**< [ 4: 0](RO/H) Selector. */
+ uint64_t e : 5; /**< [ 9: 5](RO/H) Echoed nonce. */
+ uint64_t pause : 1; /**< [ 10: 10](RO/H) PAUSE ability. */
+ uint64_t asm_dir : 1; /**< [ 11: 11](RO/H) Asymmetric PAUSE. */
+ uint64_t xnp_able : 1; /**< [ 12: 12](RO/H) Extended next page ability. */
+ uint64_t rf : 1; /**< [ 13: 13](RO/H) Remote fault. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) Acknowledge. */
+ uint64_t np : 1; /**< [ 15: 15](RO/H) Next page. */
+ uint64_t t : 5; /**< [ 20: 16](RO/H) Transmitted nonce. */
+ uint64_t a1g_kx : 1; /**< [ 21: 21](RO/H) 1000BASE-KX ability. */
+ uint64_t a10g_kx4 : 1; /**< [ 22: 22](RO/H) 10GBASE-KX4 ability. */
+ uint64_t a10g_kr : 1; /**< [ 23: 23](RO/H) 10GBASE-KR ability. */
+ uint64_t a40g_kr4 : 1; /**< [ 24: 24](RO/H) 40GBASE-KR4 ability. */
+ uint64_t a40g_cr4 : 1; /**< [ 25: 25](RO/H) 40GBASE-CR4 ability. */
+ uint64_t a100g_cr10 : 1; /**< [ 26: 26](RO/H) 100GBASE-CR10 ability. */
+ uint64_t arsv : 19; /**< [ 45: 27](RO/H) Technology ability. Reserved bits, should always be 0. */
+ uint64_t fec_able : 1; /**< [ 46: 46](RO/H) FEC ability. */
+ uint64_t fec_req : 1; /**< [ 47: 47](RO/H) FEC requested. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_an_lp_base_s cn; */
+};
+typedef union bdk_bgxx_spux_an_lp_base bdk_bgxx_spux_an_lp_base_t;
+
+static inline uint64_t BDK_BGXX_SPUX_AN_LP_BASE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_AN_LP_BASE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100e0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100e0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100e0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_AN_LP_BASE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_AN_LP_BASE(a,b) bdk_bgxx_spux_an_lp_base_t
+#define bustype_BDK_BGXX_SPUX_AN_LP_BASE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_AN_LP_BASE(a,b) "BGXX_SPUX_AN_LP_BASE"
+#define device_bar_BDK_BGXX_SPUX_AN_LP_BASE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_AN_LP_BASE(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_AN_LP_BASE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_an_lp_xnp
+ *
+ * BGX SPU Autonegotiation Link Partner Extended Next Page Ability Registers
+ * This register captures the contents of the latest next page code word received from the link
+ * partner during autonegotiation, if any. See section 802.3 section 73.7.7 for details.
+ */
+union bdk_bgxx_spux_an_lp_xnp
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_an_lp_xnp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t u : 32; /**< [ 47: 16](RO/H) Unformatted code field. */
+ uint64_t np : 1; /**< [ 15: 15](RO/H) Next page. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) Acknowledge. */
+ uint64_t mp : 1; /**< [ 13: 13](RO/H) Message page. */
+ uint64_t ack2 : 1; /**< [ 12: 12](RO/H) Acknowledge 2. */
+ uint64_t toggle : 1; /**< [ 11: 11](RO/H) Toggle. */
+ uint64_t m_u : 11; /**< [ 10: 0](RO/H) Message/unformatted code field. */
+#else /* Word 0 - Little Endian */
+ uint64_t m_u : 11; /**< [ 10: 0](RO/H) Message/unformatted code field. */
+ uint64_t toggle : 1; /**< [ 11: 11](RO/H) Toggle. */
+ uint64_t ack2 : 1; /**< [ 12: 12](RO/H) Acknowledge 2. */
+ uint64_t mp : 1; /**< [ 13: 13](RO/H) Message page. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) Acknowledge. */
+ uint64_t np : 1; /**< [ 15: 15](RO/H) Next page. */
+ uint64_t u : 32; /**< [ 47: 16](RO/H) Unformatted code field. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_an_lp_xnp_s cn; */
+};
+typedef union bdk_bgxx_spux_an_lp_xnp bdk_bgxx_spux_an_lp_xnp_t;
+
+static inline uint64_t BDK_BGXX_SPUX_AN_LP_XNP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_AN_LP_XNP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100f0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100f0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100f0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_AN_LP_XNP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_AN_LP_XNP(a,b) bdk_bgxx_spux_an_lp_xnp_t
+#define bustype_BDK_BGXX_SPUX_AN_LP_XNP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_AN_LP_XNP(a,b) "BGXX_SPUX_AN_LP_XNP"
+#define device_bar_BDK_BGXX_SPUX_AN_LP_XNP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_AN_LP_XNP(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_AN_LP_XNP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_an_status
+ *
+ * BGX SPU Autonegotiation Status Registers
+ */
+union bdk_bgxx_spux_an_status
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_an_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t prl_flt : 1; /**< [ 9: 9](RO) Parallel detection fault. Always 0; SPU does not support parallel detection as part of the
+ autonegotiation protocol. */
+ uint64_t reserved_8 : 1;
+ uint64_t xnp_stat : 1; /**< [ 7: 7](RO/H) Extended next-page status. */
+ uint64_t page_rx : 1; /**< [ 6: 6](R/W1C/H) Page received. This latching-high bit is set when a new page has been received and stored
+ in BGX()_SPU()_AN_LP_BASE or BGX()_SPU()_AN_LP_XNP; stays set until a 1 is
+ written by software, autonegotiation is disabled or restarted, or next page exchange is
+ initiated. Note that in order to avoid read side effects, this is implemented as a
+ write-1-to-clear bit, rather than latching high read-only as specified in 802.3. */
+ uint64_t an_complete : 1; /**< [ 5: 5](RO/H) Autonegotiation complete. Set when the autonegotiation process has been completed and
+ the link is up and running using the negotiated highest common denominator (HCD)
+ technology. If AN is enabled (BGX()_SPU()_AN_CONTROL[AN_EN] = 1) and this bit is
+ read as a zero, it indicates that the AN process has not been completed, and the contents
+ of BGX()_SPU()_AN_LP_BASE, BGX()_SPU()_AN_XNP_TX, and
+ BGX()_SPU()_AN_LP_XNP are as defined by the current state of the autonegotiation
+ protocol, or as written for manual configuration. This bit is always zero when AN is
+ disabled (BGX()_SPU()_AN_CONTROL[AN_EN] = 0). */
+ uint64_t rmt_flt : 1; /**< [ 4: 4](RO) Remote fault: Always 0. */
+ uint64_t an_able : 1; /**< [ 3: 3](RO) Autonegotiation ability: Always 1. */
+ uint64_t link_status : 1; /**< [ 2: 2](R/W1S/H) Link status. This bit captures the state of the link_status variable as defined in 802.3
+ section 73.9.1. When set, indicates that a valid link has been established. When clear,
+ indicates that the link has been invalid after this bit was last set by software. Latching
+ low bit; stays clear until a 1 is written by software. Note that in order to avoid read
+ side effects, this is implemented as a write-1-to-set bit, rather than latching low read-
+ only as specified in 802.3. */
+ uint64_t reserved_1 : 1;
+ uint64_t lp_an_able : 1; /**< [ 0: 0](RO/H) Link partner autonegotiation ability. Set to indicate that the link partner is able to
+ participate in the autonegotiation function, and cleared otherwise. */
+#else /* Word 0 - Little Endian */
+ uint64_t lp_an_able : 1; /**< [ 0: 0](RO/H) Link partner autonegotiation ability. Set to indicate that the link partner is able to
+ participate in the autonegotiation function, and cleared otherwise. */
+ uint64_t reserved_1 : 1;
+ uint64_t link_status : 1; /**< [ 2: 2](R/W1S/H) Link status. This bit captures the state of the link_status variable as defined in 802.3
+ section 73.9.1. When set, indicates that a valid link has been established. When clear,
+ indicates that the link has been invalid after this bit was last set by software. Latching
+ low bit; stays clear until a 1 is written by software. Note that in order to avoid read
+ side effects, this is implemented as a write-1-to-set bit, rather than latching low read-
+ only as specified in 802.3. */
+ uint64_t an_able : 1; /**< [ 3: 3](RO) Autonegotiation ability: Always 1. */
+ uint64_t rmt_flt : 1; /**< [ 4: 4](RO) Remote fault: Always 0. */
+ uint64_t an_complete : 1; /**< [ 5: 5](RO/H) Autonegotiation complete. Set when the autonegotiation process has been completed and
+ the link is up and running using the negotiated highest common denominator (HCD)
+ technology. If AN is enabled (BGX()_SPU()_AN_CONTROL[AN_EN] = 1) and this bit is
+ read as a zero, it indicates that the AN process has not been completed, and the contents
+ of BGX()_SPU()_AN_LP_BASE, BGX()_SPU()_AN_XNP_TX, and
+ BGX()_SPU()_AN_LP_XNP are as defined by the current state of the autonegotiation
+ protocol, or as written for manual configuration. This bit is always zero when AN is
+ disabled (BGX()_SPU()_AN_CONTROL[AN_EN] = 0). */
+ uint64_t page_rx : 1; /**< [ 6: 6](R/W1C/H) Page received. This latching-high bit is set when a new page has been received and stored
+ in BGX()_SPU()_AN_LP_BASE or BGX()_SPU()_AN_LP_XNP; stays set until a 1 is
+ written by software, autonegotiation is disabled or restarted, or next page exchange is
+ initiated. Note that in order to avoid read side effects, this is implemented as a
+ write-1-to-clear bit, rather than latching high read-only as specified in 802.3. */
+ uint64_t xnp_stat : 1; /**< [ 7: 7](RO/H) Extended next-page status. */
+ uint64_t reserved_8 : 1;
+ uint64_t prl_flt : 1; /**< [ 9: 9](RO) Parallel detection fault. Always 0; SPU does not support parallel detection as part of the
+ autonegotiation protocol. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_an_status_s cn; */
+};
+typedef union bdk_bgxx_spux_an_status bdk_bgxx_spux_an_status_t;
+
+static inline uint64_t BDK_BGXX_SPUX_AN_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_AN_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100d0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100d0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100d0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_AN_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_AN_STATUS(a,b) bdk_bgxx_spux_an_status_t
+#define bustype_BDK_BGXX_SPUX_AN_STATUS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_AN_STATUS(a,b) "BGXX_SPUX_AN_STATUS"
+#define device_bar_BDK_BGXX_SPUX_AN_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_AN_STATUS(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_AN_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_an_xnp_tx
+ *
+ * BGX SPU Autonegotiation Extended Next Page Transmit Registers
+ * Software programs this register with the contents of the AN message next page or unformatted
+ * next page link code word to be transmitted during autonegotiation. Next page exchange occurs
+ * after the base link code words have been exchanged if either end of the link segment sets the
+ * NP bit to 1, indicating that it has at least one next page to send. Once initiated, next page
+ * exchange continues until both ends of the link segment set their NP bits to 0. See section
+ * 802.3 section 73.7.7 for details.
+ */
+union bdk_bgxx_spux_an_xnp_tx
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_an_xnp_tx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t u : 32; /**< [ 47: 16](R/W) Unformatted code field. When the MP bit is set, this field contains the 32-bit unformatted
+ code field of the message next page. When MP is clear, this field contains the upper 32
+ bits of the 43-bit unformatted code field of the unformatted next page. */
+ uint64_t np : 1; /**< [ 15: 15](R/W) Next page. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) Acknowledge: Always 0 in this register. */
+ uint64_t mp : 1; /**< [ 13: 13](R/W) Message page. Set to indicate that this register contains a message next page. Clear to
+ indicate that the register contains an unformatted next page. */
+ uint64_t ack2 : 1; /**< [ 12: 12](R/W) Acknowledge 2. Indicates that the receiver is able to act on the information (or perform
+ the task) defined in the message. */
+ uint64_t toggle : 1; /**< [ 11: 11](R/W) This bit is ignored by hardware. The value of the TOGGLE bit in transmitted next pages is
+ automatically generated by hardware. */
+ uint64_t m_u : 11; /**< [ 10: 0](R/W) Message/unformatted code field: When the MP bit is set, this field contains the message
+ code field (M) of the message next page. When MP is clear, this field contains the lower
+ 11 bits of the 43-bit unformatted code field of the unformatted next page. */
+#else /* Word 0 - Little Endian */
+ uint64_t m_u : 11; /**< [ 10: 0](R/W) Message/unformatted code field: When the MP bit is set, this field contains the message
+ code field (M) of the message next page. When MP is clear, this field contains the lower
+ 11 bits of the 43-bit unformatted code field of the unformatted next page. */
+ uint64_t toggle : 1; /**< [ 11: 11](R/W) This bit is ignored by hardware. The value of the TOGGLE bit in transmitted next pages is
+ automatically generated by hardware. */
+ uint64_t ack2 : 1; /**< [ 12: 12](R/W) Acknowledge 2. Indicates that the receiver is able to act on the information (or perform
+ the task) defined in the message. */
+ uint64_t mp : 1; /**< [ 13: 13](R/W) Message page. Set to indicate that this register contains a message next page. Clear to
+ indicate that the register contains an unformatted next page. */
+ uint64_t ack : 1; /**< [ 14: 14](RO/H) Acknowledge: Always 0 in this register. */
+ uint64_t np : 1; /**< [ 15: 15](R/W) Next page. */
+ uint64_t u : 32; /**< [ 47: 16](R/W) Unformatted code field. When the MP bit is set, this field contains the 32-bit unformatted
+ code field of the message next page. When MP is clear, this field contains the upper 32
+ bits of the 43-bit unformatted code field of the unformatted next page. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_an_xnp_tx_s cn; */
+};
+typedef union bdk_bgxx_spux_an_xnp_tx bdk_bgxx_spux_an_xnp_tx_t;
+
+static inline uint64_t BDK_BGXX_SPUX_AN_XNP_TX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_AN_XNP_TX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100e8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100e8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100e8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_AN_XNP_TX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_AN_XNP_TX(a,b) bdk_bgxx_spux_an_xnp_tx_t
+#define bustype_BDK_BGXX_SPUX_AN_XNP_TX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_AN_XNP_TX(a,b) "BGXX_SPUX_AN_XNP_TX"
+#define device_bar_BDK_BGXX_SPUX_AN_XNP_TX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_AN_XNP_TX(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_AN_XNP_TX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_algn_status
+ *
+ * BGX SPU Multilane BASE-R PCS Alignment-Status Registers
+ * This register implements the IEEE 802.3 multilane BASE-R PCS alignment status 1-4 registers
+ * (3.50-3.53). It is valid only when the LPCS type is 40GBASE-R
+ * (BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x4), and always returns 0x0 for all other LPCS
+ * types. IEEE 802.3 bits that are not applicable to 40GBASE-R (e.g. status bits for PCS lanes
+ * 19-4) are not implemented and marked as reserved. PCS lanes 3-0 are valid and are mapped to
+ * physical SerDes lanes based on the programming of BGX()_CMR()_CONFIG[LANE_TO_SDS].
+ */
+union bdk_bgxx_spux_br_algn_status
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_algn_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t marker_lock : 4; /**< [ 35: 32](RO/H) Marker-locked status for PCS lanes 3-0.
+ 0 = Not locked.
+ 1 = Locked. */
+ uint64_t reserved_13_31 : 19;
+ uint64_t alignd : 1; /**< [ 12: 12](RO/H) All lanes are locked and aligned. This bit returns 1 when the logical PCS has locked and
+ aligned all associated receive lanes; returns 0 otherwise. For all other PCS types, this
+ bit always returns 0. */
+ uint64_t reserved_4_11 : 8;
+ uint64_t block_lock : 4; /**< [ 3: 0](RO/H) Block-lock status for PCS lanes 3-0:
+ 0 = Not locked.
+ 1 = Locked. */
+#else /* Word 0 - Little Endian */
+ uint64_t block_lock : 4; /**< [ 3: 0](RO/H) Block-lock status for PCS lanes 3-0:
+ 0 = Not locked.
+ 1 = Locked. */
+ uint64_t reserved_4_11 : 8;
+ uint64_t alignd : 1; /**< [ 12: 12](RO/H) All lanes are locked and aligned. This bit returns 1 when the logical PCS has locked and
+ aligned all associated receive lanes; returns 0 otherwise. For all other PCS types, this
+ bit always returns 0. */
+ uint64_t reserved_13_31 : 19;
+ uint64_t marker_lock : 4; /**< [ 35: 32](RO/H) Marker-locked status for PCS lanes 3-0.
+ 0 = Not locked.
+ 1 = Locked. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_algn_status_s cn; */
+};
+typedef union bdk_bgxx_spux_br_algn_status bdk_bgxx_spux_br_algn_status_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_ALGN_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_ALGN_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010050ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010050ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010050ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_ALGN_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_ALGN_STATUS(a,b) bdk_bgxx_spux_br_algn_status_t
+#define bustype_BDK_BGXX_SPUX_BR_ALGN_STATUS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_ALGN_STATUS(a,b) "BGXX_SPUX_BR_ALGN_STATUS"
+#define device_bar_BDK_BGXX_SPUX_BR_ALGN_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_ALGN_STATUS(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_ALGN_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_bip_err_cnt
+ *
+ * BGX SPU 40GBASE-R BIP Error-Counter Registers
+ * This register implements the IEEE 802.3 BIP error-counter registers for PCS lanes 0-3
+ * (3.200-3.203). It is valid only when the LPCS type is 40GBASE-R
+ * (BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x4), and always returns 0x0 for all other LPCS
+ * types. The counters are indexed by the RX PCS lane number based on the Alignment Marker
+ * detected on each lane and captured in BGX()_SPU()_BR_LANE_MAP. Each counter counts the
+ * BIP errors for its PCS lane, and is held at all ones in case of overflow. The counters are
+ * reset to all 0s when this register is read by software.
+ *
+ * The reset operation takes precedence over the increment operation; if the register is read on
+ * the same clock cycle as an increment operation, the counter is reset to all 0s and the
+ * increment operation is lost. The counters are writable for test purposes, rather than read-
+ * only as specified in IEEE 802.3.
+ */
+union bdk_bgxx_spux_br_bip_err_cnt
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_bip_err_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t bip_err_cnt_ln3 : 16; /**< [ 63: 48](RC/W/H) BIP error counter for lane on which PCS lane 3 markers are received. */
+ uint64_t bip_err_cnt_ln2 : 16; /**< [ 47: 32](RC/W/H) BIP error counter for lane on which PCS lane 2 markers are received. */
+ uint64_t bip_err_cnt_ln1 : 16; /**< [ 31: 16](RC/W/H) BIP error counter for lane on which PCS lane 1 markers are received. */
+ uint64_t bip_err_cnt_ln0 : 16; /**< [ 15: 0](RC/W/H) BIP error counter for lane on which PCS lane 0 markers are received. */
+#else /* Word 0 - Little Endian */
+ uint64_t bip_err_cnt_ln0 : 16; /**< [ 15: 0](RC/W/H) BIP error counter for lane on which PCS lane 0 markers are received. */
+ uint64_t bip_err_cnt_ln1 : 16; /**< [ 31: 16](RC/W/H) BIP error counter for lane on which PCS lane 1 markers are received. */
+ uint64_t bip_err_cnt_ln2 : 16; /**< [ 47: 32](RC/W/H) BIP error counter for lane on which PCS lane 2 markers are received. */
+ uint64_t bip_err_cnt_ln3 : 16; /**< [ 63: 48](RC/W/H) BIP error counter for lane on which PCS lane 3 markers are received. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_bip_err_cnt_s cn; */
+};
+typedef union bdk_bgxx_spux_br_bip_err_cnt bdk_bgxx_spux_br_bip_err_cnt_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_BIP_ERR_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_BIP_ERR_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010058ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010058ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010058ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_BIP_ERR_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_BIP_ERR_CNT(a,b) bdk_bgxx_spux_br_bip_err_cnt_t
+#define bustype_BDK_BGXX_SPUX_BR_BIP_ERR_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_BIP_ERR_CNT(a,b) "BGXX_SPUX_BR_BIP_ERR_CNT"
+#define device_bar_BDK_BGXX_SPUX_BR_BIP_ERR_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_BIP_ERR_CNT(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_BIP_ERR_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_lane_map
+ *
+ * BGX SPU 40GBASE-R Lane-Mapping Registers
+ * This register implements the IEEE 802.3 lane 0-3 mapping registers (3.400-3.403). It is valid
+ * only when the LPCS type is 40GBASE-R (BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x4), and always
+ * returns 0x0 for all other LPCS types. The LNx_MAPPING field for each programmed PCS lane
+ * (called service interface in 802.3ba-2010) is valid when that lane has achieved alignment
+ * marker lock on the receive side (i.e. the associated
+ * BGX()_SPU()_BR_ALGN_STATUS[MARKER_LOCK] = 1), and is invalid otherwise. When valid, it
+ * returns the actual detected receive PCS lane number based on the received alignment marker
+ * contents received on that service interface.
+ *
+ * The mapping is flexible because IEEE 802.3 allows multilane BASE-R receive lanes to be re-
+ * ordered. Note that for the transmit side, each PCS lane is mapped to a physical SerDes lane
+ * based on the programming of BGX()_CMR()_CONFIG[LANE_TO_SDS]. For the receive side,
+ * BGX()_CMR()_CONFIG[LANE_TO_SDS] specifies the service interface to physical SerDes
+ * lane mapping, and this register specifies the service interface to PCS lane mapping.
+ */
+union bdk_bgxx_spux_br_lane_map
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_lane_map_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t ln3_mapping : 6; /**< [ 53: 48](RO/H) PCS lane number received on service interface 3 */
+ uint64_t reserved_38_47 : 10;
+ uint64_t ln2_mapping : 6; /**< [ 37: 32](RO/H) PCS lane number received on service interface 2 */
+ uint64_t reserved_22_31 : 10;
+ uint64_t ln1_mapping : 6; /**< [ 21: 16](RO/H) PCS lane number received on service interface 1 */
+ uint64_t reserved_6_15 : 10;
+ uint64_t ln0_mapping : 6; /**< [ 5: 0](RO/H) PCS lane number received on service interface 0 */
+#else /* Word 0 - Little Endian */
+ uint64_t ln0_mapping : 6; /**< [ 5: 0](RO/H) PCS lane number received on service interface 0 */
+ uint64_t reserved_6_15 : 10;
+ uint64_t ln1_mapping : 6; /**< [ 21: 16](RO/H) PCS lane number received on service interface 1 */
+ uint64_t reserved_22_31 : 10;
+ uint64_t ln2_mapping : 6; /**< [ 37: 32](RO/H) PCS lane number received on service interface 2 */
+ uint64_t reserved_38_47 : 10;
+ uint64_t ln3_mapping : 6; /**< [ 53: 48](RO/H) PCS lane number received on service interface 3 */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_lane_map_s cn; */
+};
+typedef union bdk_bgxx_spux_br_lane_map bdk_bgxx_spux_br_lane_map_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_LANE_MAP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_LANE_MAP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010060ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010060ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010060ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_LANE_MAP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_LANE_MAP(a,b) bdk_bgxx_spux_br_lane_map_t
+#define bustype_BDK_BGXX_SPUX_BR_LANE_MAP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_LANE_MAP(a,b) "BGXX_SPUX_BR_LANE_MAP"
+#define device_bar_BDK_BGXX_SPUX_BR_LANE_MAP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_LANE_MAP(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_LANE_MAP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_pmd_control
+ *
+ * BGX SPU 40GBASE-R PMD Control Registers
+ */
+union bdk_bgxx_spux_br_pmd_control
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_pmd_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t train_en : 1; /**< [ 1: 1](R/W) BASE-R training enable. */
+ uint64_t train_restart : 1; /**< [ 0: 0](R/W1S/H) BASE-R training restart. Writing a 1 to this bit restarts the training process if
+ [TRAIN_EN] is also set. This is a self-clearing bit. Software should
+ wait a minimum of 1.7ms after BGX()_SPU()_INT[TRAINING_FAILURE] is set before
+ restarting the training process. */
+#else /* Word 0 - Little Endian */
+ uint64_t train_restart : 1; /**< [ 0: 0](R/W1S/H) BASE-R training restart. Writing a 1 to this bit restarts the training process if
+ [TRAIN_EN] is also set. This is a self-clearing bit. Software should
+ wait a minimum of 1.7ms after BGX()_SPU()_INT[TRAINING_FAILURE] is set before
+ restarting the training process. */
+ uint64_t train_en : 1; /**< [ 1: 1](R/W) BASE-R training enable. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_pmd_control_s cn; */
+};
+typedef union bdk_bgxx_spux_br_pmd_control bdk_bgxx_spux_br_pmd_control_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_CONTROL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_CONTROL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010068ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010068ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010068ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_PMD_CONTROL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_PMD_CONTROL(a,b) bdk_bgxx_spux_br_pmd_control_t
+#define bustype_BDK_BGXX_SPUX_BR_PMD_CONTROL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_PMD_CONTROL(a,b) "BGXX_SPUX_BR_PMD_CONTROL"
+#define device_bar_BDK_BGXX_SPUX_BR_PMD_CONTROL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_PMD_CONTROL(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_PMD_CONTROL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_pmd_ld_cup
+ *
+ * BGX SPU 40GBASE-R PMD Local Device Coefficient Update Registers
+ * This register implements 802.3 MDIO register 1.153 for 10GBASE-R (when
+ * BGX()_CMR()_CONFIG[LMAC_TYPE] = 10G_R)
+ * and MDIO registers 1.1300-1.1303 for 40GBASE-R (when
+ * BGX()_CMR()_CONFIG[LMAC_TYPE] = 40G_R). It is automatically cleared at the start of training.
+ * When link training
+ * is in progress, each field reflects the contents of the coefficient update field in the
+ * associated lane's outgoing training frame. The fields in this register are read/write even
+ * though they are specified as read-only in 802.3.
+ *
+ * If BGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is set, then this register must be updated
+ * by software during link training and hardware updates are disabled. If
+ * BGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is clear, this register is automatically
+ * updated by hardware, and it should not be written by software. The lane fields in this
+ * register are indexed by logical PCS lane ID.
+ *
+ * The lane 0 field (LN0_*) is valid for both
+ * 10GBASE-R and 40GBASE-R. The remaining fields (LN1_*, LN2_*, LN3_*) are only valid for
+ * 40GBASE-R.
+ */
+union bdk_bgxx_spux_br_pmd_ld_cup
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_pmd_ld_cup_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ln3_cup : 16; /**< [ 63: 48](R/W/H) PCS lane 3 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln2_cup : 16; /**< [ 47: 32](R/W/H) PCS lane 2 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln1_cup : 16; /**< [ 31: 16](R/W/H) PCS lane 1 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln0_cup : 16; /**< [ 15: 0](R/W/H) PCS lane 0 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. */
+#else /* Word 0 - Little Endian */
+ uint64_t ln0_cup : 16; /**< [ 15: 0](R/W/H) PCS lane 0 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. */
+ uint64_t ln1_cup : 16; /**< [ 31: 16](R/W/H) PCS lane 1 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln2_cup : 16; /**< [ 47: 32](R/W/H) PCS lane 2 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln3_cup : 16; /**< [ 63: 48](R/W/H) PCS lane 3 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_pmd_ld_cup_s cn; */
+};
+typedef union bdk_bgxx_spux_br_pmd_ld_cup bdk_bgxx_spux_br_pmd_ld_cup_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_LD_CUP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_LD_CUP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010088ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010088ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010088ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_PMD_LD_CUP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_PMD_LD_CUP(a,b) bdk_bgxx_spux_br_pmd_ld_cup_t
+#define bustype_BDK_BGXX_SPUX_BR_PMD_LD_CUP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_PMD_LD_CUP(a,b) "BGXX_SPUX_BR_PMD_LD_CUP"
+#define device_bar_BDK_BGXX_SPUX_BR_PMD_LD_CUP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_PMD_LD_CUP(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_PMD_LD_CUP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_pmd_ld_rep
+ *
+ * BGX SPU 40GBASE-R PMD Local Device Status Report Registers
+ * This register implements 802.3 MDIO register 1.154 for 10GBASE-R (when
+ * BGX()_CMR()_CONFIG[LMAC_TYPE] = 10G_R) and MDIO registers 1.1400-1.1403 for 40GBASE-R
+ * (when BGX()_CMR()_CONFIG[LMAC_TYPE] = 40G_R). It is automatically cleared at the start of
+ * training. Each field
+ * reflects the contents of the status report field in the associated lane's outgoing training
+ * frame. The fields in this register are read/write even though they are specified as read-only
+ * in 802.3. If BGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is set, then this register must
+ * be updated by software during link training and hardware updates are disabled. If
+ * BGX()_SPU_DBG_CONTROL[BR_PMD_TRAIN_SOFT_EN] is clear, this register is automatically
+ * updated by hardware, and it should not be written by software. The lane fields in this
+ * register are indexed by logical PCS lane ID.
+ *
+ * The lane 0 field (LN0_*) is valid for both
+ * 10GBASE-R and 40GBASE-R. The remaining fields (LN1_*, LN2_*, LN3_*) are only valid for
+ * 40GBASE-R.
+ */
+union bdk_bgxx_spux_br_pmd_ld_rep
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_pmd_ld_rep_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ln3_rep : 16; /**< [ 63: 48](R/W/H) PCS lane 3 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln2_rep : 16; /**< [ 47: 32](R/W/H) PCS lane 2 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln1_rep : 16; /**< [ 31: 16](R/W/H) PCS lane 1 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln0_rep : 16; /**< [ 15: 0](R/W/H) PCS lane 0 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. */
+#else /* Word 0 - Little Endian */
+ uint64_t ln0_rep : 16; /**< [ 15: 0](R/W/H) PCS lane 0 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. */
+ uint64_t ln1_rep : 16; /**< [ 31: 16](R/W/H) PCS lane 1 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln2_rep : 16; /**< [ 47: 32](R/W/H) PCS lane 2 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln3_rep : 16; /**< [ 63: 48](R/W/H) PCS lane 3 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_pmd_ld_rep_s cn; */
+};
+typedef union bdk_bgxx_spux_br_pmd_ld_rep bdk_bgxx_spux_br_pmd_ld_rep_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_LD_REP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_LD_REP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010090ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010090ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010090ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_PMD_LD_REP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_PMD_LD_REP(a,b) bdk_bgxx_spux_br_pmd_ld_rep_t
+#define bustype_BDK_BGXX_SPUX_BR_PMD_LD_REP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_PMD_LD_REP(a,b) "BGXX_SPUX_BR_PMD_LD_REP"
+#define device_bar_BDK_BGXX_SPUX_BR_PMD_LD_REP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_PMD_LD_REP(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_PMD_LD_REP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_pmd_lp_cup
+ *
+ * BGX SPU 40GBASE-R PMD Link Partner Coefficient Update Registers
+ * This register implements 802.3 MDIO register 1.152 for 10GBASE-R (when
+ * BGX()_CMR()_CONFIG[LMAC_TYPE] = 10G_R)
+ * and MDIO registers 1.1100-1.1103 for 40GBASE-R (when
+ * BGX()_CMR()_CONFIG[LMAC_TYPE] = 40G_R). It is automatically cleared at the start of training.
+ * Each field reflects
+ * the contents of the coefficient update field in the lane's most recently received training
+ * frame. This register should not be written when link training is enabled, i.e. when
+ * BGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is set. The lane fields in this register are indexed by
+ * logical PCS lane ID.
+ *
+ * The lane 0 field (LN0_*) is valid for both 10GBASE-R and 40GBASE-R. The remaining fields
+ * (LN1_*, LN2_*, LN3_*) are only valid for 40GBASE-R.
+ */
+union bdk_bgxx_spux_br_pmd_lp_cup
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_pmd_lp_cup_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ln3_cup : 16; /**< [ 63: 48](R/W/H) PCS lane 3 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln2_cup : 16; /**< [ 47: 32](R/W/H) PCS lane 2 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln1_cup : 16; /**< [ 31: 16](R/W/H) PCS lane 1 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln0_cup : 16; /**< [ 15: 0](R/W/H) PCS lane 0 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. */
+#else /* Word 0 - Little Endian */
+ uint64_t ln0_cup : 16; /**< [ 15: 0](R/W/H) PCS lane 0 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. */
+ uint64_t ln1_cup : 16; /**< [ 31: 16](R/W/H) PCS lane 1 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln2_cup : 16; /**< [ 47: 32](R/W/H) PCS lane 2 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln3_cup : 16; /**< [ 63: 48](R/W/H) PCS lane 3 coefficient update: format defined by BGX_SPU_BR_TRAIN_CUP_S. Not valid for
+ 10GBASE-R. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_pmd_lp_cup_s cn; */
+};
+typedef union bdk_bgxx_spux_br_pmd_lp_cup bdk_bgxx_spux_br_pmd_lp_cup_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_LP_CUP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_LP_CUP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010078ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010078ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010078ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_PMD_LP_CUP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_PMD_LP_CUP(a,b) bdk_bgxx_spux_br_pmd_lp_cup_t
+#define bustype_BDK_BGXX_SPUX_BR_PMD_LP_CUP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_PMD_LP_CUP(a,b) "BGXX_SPUX_BR_PMD_LP_CUP"
+#define device_bar_BDK_BGXX_SPUX_BR_PMD_LP_CUP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_PMD_LP_CUP(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_PMD_LP_CUP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_pmd_lp_rep
+ *
+ * BGX SPU 40GBASE-R PMD Link Partner Status Report Registers
+ * This register implements 802.3 MDIO register 1.153 for 10GBASE-R (when
+ * BGX()_CMR()_CONFIG[LMAC_TYPE] = 10G_R)
+ * and MDIO registers 1.1200-1.1203 for 40GBASE-R (when
+ * BGX()_CMR()_CONFIG[LMAC_TYPE] = 40G_R). It is automatically cleared at the start of training.
+ * Each field reflects
+ * the contents of the status report field in the associated lane's most recently received
+ * training frame. The lane fields in this register are indexed by logical PCS lane ID.
+ *
+ * The lane
+ * 0 field (LN0_*) is valid for both 10GBASE-R and 40GBASE-R. The remaining fields (LN1_*, LN2_*,
+ * LN3_*) are only valid for 40GBASE-R.
+ */
+union bdk_bgxx_spux_br_pmd_lp_rep
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_pmd_lp_rep_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ln3_rep : 16; /**< [ 63: 48](RO/H) PCS lane 3 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln2_rep : 16; /**< [ 47: 32](RO/H) PCS lane 2 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln1_rep : 16; /**< [ 31: 16](RO/H) PCS lane 1 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln0_rep : 16; /**< [ 15: 0](RO/H) PCS lane 0 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. */
+#else /* Word 0 - Little Endian */
+ uint64_t ln0_rep : 16; /**< [ 15: 0](RO/H) PCS lane 0 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. */
+ uint64_t ln1_rep : 16; /**< [ 31: 16](RO/H) PCS lane 1 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln2_rep : 16; /**< [ 47: 32](RO/H) PCS lane 2 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+ uint64_t ln3_rep : 16; /**< [ 63: 48](RO/H) PCS lane 3 status report: format defined by BGX_SPU_BR_TRAIN_REP_S. Not valid for
+ 10GBASE-R. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_pmd_lp_rep_s cn; */
+};
+typedef union bdk_bgxx_spux_br_pmd_lp_rep bdk_bgxx_spux_br_pmd_lp_rep_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_LP_REP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_LP_REP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010080ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010080ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010080ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_PMD_LP_REP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_PMD_LP_REP(a,b) bdk_bgxx_spux_br_pmd_lp_rep_t
+#define bustype_BDK_BGXX_SPUX_BR_PMD_LP_REP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_PMD_LP_REP(a,b) "BGXX_SPUX_BR_PMD_LP_REP"
+#define device_bar_BDK_BGXX_SPUX_BR_PMD_LP_REP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_PMD_LP_REP(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_PMD_LP_REP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_pmd_status
+ *
+ * BGX SPU 40GBASE-R PMD Status Registers
+ * The lane fields in this register are indexed by logical PCS lane ID. The lane 0 field (LN0_*)
+ * is valid for both 10GBASE-R and 40GBASE-R. The remaining fields (LN1_*, LN2_*, LN3_*) are only
+ * valid for 40GBASE-R.
+ */
+union bdk_bgxx_spux_br_pmd_status
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_pmd_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ln3_train_status : 4; /**< [ 15: 12](RO/H) PCS lane 3 link training status. Format defined by BGX_SPU_BR_LANE_TRAIN_STATUS_S. Not
+ valid for 10GBASE-R. */
+ uint64_t ln2_train_status : 4; /**< [ 11: 8](RO/H) PCS lane 2 link training status. Format defined by BGX_SPU_BR_LANE_TRAIN_STATUS_S. Not
+ valid for 10GBASE-R. */
+ uint64_t ln1_train_status : 4; /**< [ 7: 4](RO/H) PCS lane 1 link training status. Format defined by BGX_SPU_BR_LANE_TRAIN_STATUS_S. Not
+ valid for 10GBASE-R. */
+ uint64_t ln0_train_status : 4; /**< [ 3: 0](RO/H) PCS lane 0 link training status. Format defined by BGX_SPU_BR_LANE_TRAIN_STATUS_S. */
+#else /* Word 0 - Little Endian */
+ uint64_t ln0_train_status : 4; /**< [ 3: 0](RO/H) PCS lane 0 link training status. Format defined by BGX_SPU_BR_LANE_TRAIN_STATUS_S. */
+ uint64_t ln1_train_status : 4; /**< [ 7: 4](RO/H) PCS lane 1 link training status. Format defined by BGX_SPU_BR_LANE_TRAIN_STATUS_S. Not
+ valid for 10GBASE-R. */
+ uint64_t ln2_train_status : 4; /**< [ 11: 8](RO/H) PCS lane 2 link training status. Format defined by BGX_SPU_BR_LANE_TRAIN_STATUS_S. Not
+ valid for 10GBASE-R. */
+ uint64_t ln3_train_status : 4; /**< [ 15: 12](RO/H) PCS lane 3 link training status. Format defined by BGX_SPU_BR_LANE_TRAIN_STATUS_S. Not
+ valid for 10GBASE-R. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_pmd_status_s cn; */
+};
+typedef union bdk_bgxx_spux_br_pmd_status bdk_bgxx_spux_br_pmd_status_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_PMD_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010070ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010070ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010070ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_PMD_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_PMD_STATUS(a,b) bdk_bgxx_spux_br_pmd_status_t
+#define bustype_BDK_BGXX_SPUX_BR_PMD_STATUS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_PMD_STATUS(a,b) "BGXX_SPUX_BR_PMD_STATUS"
+#define device_bar_BDK_BGXX_SPUX_BR_PMD_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_PMD_STATUS(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_PMD_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_status1
+ *
+ * BGX SPU BASE-R Status 1 Registers
+ */
+union bdk_bgxx_spux_br_status1
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_status1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t rcv_lnk : 1; /**< [ 12: 12](RO/H) BASE-R receive link status.
+ 0 = BASE-R PCS receive-link down.
+ 1 = BASE-R PCS receive-link up.
+
+ This bit is a reflection of the PCS_status variable defined in IEEE 802.3 sections
+ 49.2.14.1 and 82.3.1. */
+ uint64_t reserved_4_11 : 8;
+ uint64_t prbs9 : 1; /**< [ 3: 3](RO) 10GBASE-R PRBS9 pattern testing ability. Always 0; PRBS9 pattern testing is not supported. */
+ uint64_t prbs31 : 1; /**< [ 2: 2](RO) 10GBASE-R PRBS31 pattern testing ability. Always 0; PRBS31 pattern testing is not supported. */
+ uint64_t hi_ber : 1; /**< [ 1: 1](RO/H) BASE-R PCS high bit-error rate.
+ 0 = 64/66 bit receiver is detecting a bit-error rate of \< 10.4.
+ 1 = 64/66 bit receiver is detecting a bit-error rate of \>= 10.4.
+
+ This bit is a direct reflection of the state of the HI_BER variable in the 64 B/66 B state
+ diagram and is defined in IEEE 802.3 sections 49.2.13.2.2 and 82.2.18.2.2. */
+ uint64_t blk_lock : 1; /**< [ 0: 0](RO/H) BASE-R PCS block lock.
+ 0 = No block lock.
+ 1 = 64/66 bit receiver for BASE-R has block lock.
+
+ This bit is a direct reflection of the state of the BLOCK_LOCK variable in the 64 B/66 B
+ state diagram and is defined in IEEE 802.3 sections 49.2.13.2.2 and 82.2.18.2.2.
+ For a multilane logical PCS (i.e. 40GBASE-R), this bit indicates that the receiver has
+ both block lock and alignment for all lanes and is identical to
+ BGX()_SPU()_BR_ALGN_STATUS[ALIGND]. */
+#else /* Word 0 - Little Endian */
+ uint64_t blk_lock : 1; /**< [ 0: 0](RO/H) BASE-R PCS block lock.
+ 0 = No block lock.
+ 1 = 64/66 bit receiver for BASE-R has block lock.
+
+ This bit is a direct reflection of the state of the BLOCK_LOCK variable in the 64 B/66 B
+ state diagram and is defined in IEEE 802.3 sections 49.2.13.2.2 and 82.2.18.2.2.
+ For a multilane logical PCS (i.e. 40GBASE-R), this bit indicates that the receiver has
+ both block lock and alignment for all lanes and is identical to
+ BGX()_SPU()_BR_ALGN_STATUS[ALIGND]. */
+ uint64_t hi_ber : 1; /**< [ 1: 1](RO/H) BASE-R PCS high bit-error rate.
+ 0 = 64/66 bit receiver is detecting a bit-error rate of \< 10.4.
+ 1 = 64/66 bit receiver is detecting a bit-error rate of \>= 10.4.
+
+ This bit is a direct reflection of the state of the HI_BER variable in the 64 B/66 B state
+ diagram and is defined in IEEE 802.3 sections 49.2.13.2.2 and 82.2.18.2.2. */
+ uint64_t prbs31 : 1; /**< [ 2: 2](RO) 10GBASE-R PRBS31 pattern testing ability. Always 0; PRBS31 pattern testing is not supported. */
+ uint64_t prbs9 : 1; /**< [ 3: 3](RO) 10GBASE-R PRBS9 pattern testing ability. Always 0; PRBS9 pattern testing is not supported. */
+ uint64_t reserved_4_11 : 8;
+ uint64_t rcv_lnk : 1; /**< [ 12: 12](RO/H) BASE-R receive link status.
+ 0 = BASE-R PCS receive-link down.
+ 1 = BASE-R PCS receive-link up.
+
+ This bit is a reflection of the PCS_status variable defined in IEEE 802.3 sections
+ 49.2.14.1 and 82.3.1. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_status1_s cn; */
+};
+typedef union bdk_bgxx_spux_br_status1 bdk_bgxx_spux_br_status1_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_STATUS1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_STATUS1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010030ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010030ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010030ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_STATUS1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_STATUS1(a,b) bdk_bgxx_spux_br_status1_t
+#define bustype_BDK_BGXX_SPUX_BR_STATUS1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_STATUS1(a,b) "BGXX_SPUX_BR_STATUS1"
+#define device_bar_BDK_BGXX_SPUX_BR_STATUS1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_STATUS1(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_STATUS1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_status2
+ *
+ * BGX SPU BASE-R Status 2 Registers
+ * This register implements a combination of the following IEEE 802.3 registers:
+ * * BASE-R PCS status 2 (MDIO address 3.33).
+ * * BASE-R BER high-order counter (MDIO address 3.44).
+ * * Errored-blocks high-order counter (MDIO address 3.45).
+ *
+ * Note that the relative locations of some fields have been moved from IEEE 802.3 in order to
+ * make the register layout more software friendly: the BER counter high-order and low-order bits
+ * from sections 3.44 and 3.33 have been combined into the contiguous, 22-bit [BER_CNT] field;
+ * likewise, the errored-blocks counter high-order and low-order bits from section 3.45 have been
+ * combined into the contiguous, 22-bit [ERR_BLKS] field.
+ */
+union bdk_bgxx_spux_br_status2
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_status2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t err_blks : 22; /**< [ 61: 40](RC/W/H) Errored-blocks counter. This is the BASE-R errored-blocks counter as defined by the
+ errored_block_count variable specified in IEEE 802.3 sections 49.2.14.2 and 82.2.18.2.4.
+ It
+ increments by one on each block for which the BASE-R receive state machine, specified in Std
+ 802.3 diagrams 49-15 and 82-15, enters the RX_E state.
+ Back-to-back blocks in the RX_E state are counted as transitions from RX_E to RX_E and
+ keep incrementing the counter. The counter is reset to all zeros after this register is read
+ by software.
+
+ The reset operation takes precedence over the increment operation: if the register is read
+ on the same clock cycle as an increment operation, the counter is reset to all zeros and the
+ increment operation is lost.
+
+ This field is writable for test purposes, rather than read-only as specified in IEEE
+ 802.3. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t ber_cnt : 22; /**< [ 37: 16](RC/W/H) Bit-error-rate counter. This is the BASE-R BER counter as defined by the BER_COUNT
+ variable in IEEE 802.3 sections 49.2.14.2 and 82.2.18.2.4. The counter is reset to all zeros
+ after this register is read by software, and is held at all ones in case of overflow.
+ The reset operation takes precedence over the increment operation: if the register is read
+ on the same clock cycle an increment operation, the counter is reset to all zeros and the
+ increment operation is lost.
+
+ This field is writable for test purposes, rather than read-only as specified in IEEE
+ 802.3. */
+ uint64_t latched_lock : 1; /**< [ 15: 15](R/W1S/H) Latched-block lock.
+ 0 = No block.
+ 1 = 64/66 bit receiver for BASE-R has block lock.
+
+ This is a latching-low version of BGX()_SPU()_BR_STATUS1[BLK_LOCK]; it stays clear
+ until a write-1-to-set by software. */
+ uint64_t latched_ber : 1; /**< [ 14: 14](R/W1C/H) Latched-high bit-error rate.
+ 0 = Not a high BER.
+ 1 = 64/66 bit receiver is detecting a high BER.
+
+ This is a latching-high version of BGX()_SPU()_BR_STATUS1[HI_BER]; it stays set until
+ a write-1-to-clear by software. */
+ uint64_t reserved_0_13 : 14;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_13 : 14;
+ uint64_t latched_ber : 1; /**< [ 14: 14](R/W1C/H) Latched-high bit-error rate.
+ 0 = Not a high BER.
+ 1 = 64/66 bit receiver is detecting a high BER.
+
+ This is a latching-high version of BGX()_SPU()_BR_STATUS1[HI_BER]; it stays set until
+ a write-1-to-clear by software. */
+ uint64_t latched_lock : 1; /**< [ 15: 15](R/W1S/H) Latched-block lock.
+ 0 = No block.
+ 1 = 64/66 bit receiver for BASE-R has block lock.
+
+ This is a latching-low version of BGX()_SPU()_BR_STATUS1[BLK_LOCK]; it stays clear
+ until a write-1-to-set by software. */
+ uint64_t ber_cnt : 22; /**< [ 37: 16](RC/W/H) Bit-error-rate counter. This is the BASE-R BER counter as defined by the BER_COUNT
+ variable in IEEE 802.3 sections 49.2.14.2 and 82.2.18.2.4. The counter is reset to all zeros
+ after this register is read by software, and is held at all ones in case of overflow.
+ The reset operation takes precedence over the increment operation: if the register is read
+ on the same clock cycle an increment operation, the counter is reset to all zeros and the
+ increment operation is lost.
+
+ This field is writable for test purposes, rather than read-only as specified in IEEE
+ 802.3. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t err_blks : 22; /**< [ 61: 40](RC/W/H) Errored-blocks counter. This is the BASE-R errored-blocks counter as defined by the
+ errored_block_count variable specified in IEEE 802.3 sections 49.2.14.2 and 82.2.18.2.4.
+ It
+ increments by one on each block for which the BASE-R receive state machine, specified in Std
+ 802.3 diagrams 49-15 and 82-15, enters the RX_E state.
+ Back-to-back blocks in the RX_E state are counted as transitions from RX_E to RX_E and
+ keep incrementing the counter. The counter is reset to all zeros after this register is read
+ by software.
+
+ The reset operation takes precedence over the increment operation: if the register is read
+ on the same clock cycle as an increment operation, the counter is reset to all zeros and the
+ increment operation is lost.
+
+ This field is writable for test purposes, rather than read-only as specified in IEEE
+ 802.3. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_status2_s cn; */
+};
+typedef union bdk_bgxx_spux_br_status2 bdk_bgxx_spux_br_status2_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_STATUS2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_STATUS2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010038ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010038ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010038ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_STATUS2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_STATUS2(a,b) bdk_bgxx_spux_br_status2_t
+#define bustype_BDK_BGXX_SPUX_BR_STATUS2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_STATUS2(a,b) "BGXX_SPUX_BR_STATUS2"
+#define device_bar_BDK_BGXX_SPUX_BR_STATUS2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_STATUS2(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_STATUS2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_tp_control
+ *
+ * BGX SPU BASE-R Test-Pattern Control Registers
+ * Refer to the test pattern methodology described in 802.3 sections 49.2.8 and 82.2.10.
+ */
+union bdk_bgxx_spux_br_tp_control
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_tp_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t scramble_tp : 1; /**< [ 7: 7](R/W) Select scrambled idle test pattern. This bit selects the transmit test pattern used when
+ [TX_TP_EN] is set:
+ 0 = Square wave test pattern.
+ 1 = Scrambled idle test pattern. */
+ uint64_t prbs9_tx : 1; /**< [ 6: 6](RO) 10GBASE-R PRBS9 TP transmit enable. Always 0; PRBS9 pattern testing is not supported. */
+ uint64_t prbs31_rx : 1; /**< [ 5: 5](RO) 10GBASE-R PRBS31 TP receive enable. Always 0; PRBS31 pattern testing is not supported. */
+ uint64_t prbs31_tx : 1; /**< [ 4: 4](RO) 10GBASE-R PRBS31 TP transmit enable. Always 0; PRBS31 pattern is not supported. */
+ uint64_t tx_tp_en : 1; /**< [ 3: 3](R/W) Transmit-test-pattern enable. */
+ uint64_t rx_tp_en : 1; /**< [ 2: 2](R/W) Receive-test-pattern enable. The only supported receive test pattern is the scrambled idle
+ test pattern. Setting this bit enables checking of that receive pattern. */
+ uint64_t tp_sel : 1; /**< [ 1: 1](RO/H) Square/PRBS test pattern select. Always 1 to select square wave test pattern; PRBS test
+ patterns are not supported. */
+ uint64_t dp_sel : 1; /**< [ 0: 0](RO) Data pattern select. Always 0; PRBS test patterns are not supported. */
+#else /* Word 0 - Little Endian */
+ uint64_t dp_sel : 1; /**< [ 0: 0](RO) Data pattern select. Always 0; PRBS test patterns are not supported. */
+ uint64_t tp_sel : 1; /**< [ 1: 1](RO/H) Square/PRBS test pattern select. Always 1 to select square wave test pattern; PRBS test
+ patterns are not supported. */
+ uint64_t rx_tp_en : 1; /**< [ 2: 2](R/W) Receive-test-pattern enable. The only supported receive test pattern is the scrambled idle
+ test pattern. Setting this bit enables checking of that receive pattern. */
+ uint64_t tx_tp_en : 1; /**< [ 3: 3](R/W) Transmit-test-pattern enable. */
+ uint64_t prbs31_tx : 1; /**< [ 4: 4](RO) 10GBASE-R PRBS31 TP transmit enable. Always 0; PRBS31 pattern is not supported. */
+ uint64_t prbs31_rx : 1; /**< [ 5: 5](RO) 10GBASE-R PRBS31 TP receive enable. Always 0; PRBS31 pattern testing is not supported. */
+ uint64_t prbs9_tx : 1; /**< [ 6: 6](RO) 10GBASE-R PRBS9 TP transmit enable. Always 0; PRBS9 pattern testing is not supported. */
+ uint64_t scramble_tp : 1; /**< [ 7: 7](R/W) Select scrambled idle test pattern. This bit selects the transmit test pattern used when
+ [TX_TP_EN] is set:
+ 0 = Square wave test pattern.
+ 1 = Scrambled idle test pattern. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_tp_control_s cn; */
+};
+typedef union bdk_bgxx_spux_br_tp_control bdk_bgxx_spux_br_tp_control_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_TP_CONTROL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_TP_CONTROL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010040ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010040ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010040ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_TP_CONTROL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_TP_CONTROL(a,b) bdk_bgxx_spux_br_tp_control_t
+#define bustype_BDK_BGXX_SPUX_BR_TP_CONTROL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_TP_CONTROL(a,b) "BGXX_SPUX_BR_TP_CONTROL"
+#define device_bar_BDK_BGXX_SPUX_BR_TP_CONTROL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_TP_CONTROL(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_TP_CONTROL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_br_tp_err_cnt
+ *
+ * BGX SPU BASE-R Test-Pattern Error-Count Registers
+ * This register provides the BASE-R PCS test-pattern error counter.
+ */
+union bdk_bgxx_spux_br_tp_err_cnt
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_br_tp_err_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t err_cnt : 16; /**< [ 15: 0](RC/W/H) Error counter. This 16-bit counter contains the number of errors received during a pattern
+ test. These bits are reset to all zeros when this register is read by software, and they are
+ held at all ones in the case of overflow.
+
+ The test pattern methodology is described in IEEE 802.3, Sections 49.2.12 and 82.2.10.
+ This
+ counter counts either block errors or bit errors dependent on the test mode (see Section
+ 49.2.12). The reset operation takes precedence over the increment operation; if the
+ register is read on the same clock cycle as an increment operation, the counter is reset
+ to all zeros and the increment operation is lost. This field is writable for test purposes,
+ rather than read-only as specified in IEEE 802.3. */
+#else /* Word 0 - Little Endian */
+ uint64_t err_cnt : 16; /**< [ 15: 0](RC/W/H) Error counter. This 16-bit counter contains the number of errors received during a pattern
+ test. These bits are reset to all zeros when this register is read by software, and they are
+ held at all ones in the case of overflow.
+
+ The test pattern methodology is described in IEEE 802.3, Sections 49.2.12 and 82.2.10.
+ This
+ counter counts either block errors or bit errors dependent on the test mode (see Section
+ 49.2.12). The reset operation takes precedence over the increment operation; if the
+ register is read on the same clock cycle as an increment operation, the counter is reset
+ to all zeros and the increment operation is lost. This field is writable for test purposes,
+ rather than read-only as specified in IEEE 802.3. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_br_tp_err_cnt_s cn; */
+};
+typedef union bdk_bgxx_spux_br_tp_err_cnt bdk_bgxx_spux_br_tp_err_cnt_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BR_TP_ERR_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BR_TP_ERR_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010048ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010048ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010048ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BR_TP_ERR_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BR_TP_ERR_CNT(a,b) bdk_bgxx_spux_br_tp_err_cnt_t
+#define bustype_BDK_BGXX_SPUX_BR_TP_ERR_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BR_TP_ERR_CNT(a,b) "BGXX_SPUX_BR_TP_ERR_CNT"
+#define device_bar_BDK_BGXX_SPUX_BR_TP_ERR_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BR_TP_ERR_CNT(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BR_TP_ERR_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_bx_status
+ *
+ * BGX SPU BASE-X Status Registers
+ */
+union bdk_bgxx_spux_bx_status
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_bx_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t alignd : 1; /**< [ 12: 12](RO/H) 10GBASE-X lane-alignment status.
+ 0 = receive lanes not aligned.
+ 1 = receive lanes aligned. */
+ uint64_t pattst : 1; /**< [ 11: 11](RO) Pattern-testing ability. Always 0; 10GBASE-X pattern is testing not supported. */
+ uint64_t reserved_4_10 : 7;
+ uint64_t lsync : 4; /**< [ 3: 0](RO/H) Lane synchronization. BASE-X lane synchronization status for PCS lanes 3-0. Each bit is
+ set when the associated lane is code-group synchronized, and clear otherwise. If the PCS
+ type is RXAUI (i.e. the associated BGX()_CMR()_CONFIG[LMAC_TYPE] = RXAUI), then
+ only lanes 1-0 are valid. */
+#else /* Word 0 - Little Endian */
+ uint64_t lsync : 4; /**< [ 3: 0](RO/H) Lane synchronization. BASE-X lane synchronization status for PCS lanes 3-0. Each bit is
+ set when the associated lane is code-group synchronized, and clear otherwise. If the PCS
+ type is RXAUI (i.e. the associated BGX()_CMR()_CONFIG[LMAC_TYPE] = RXAUI), then
+ only lanes 1-0 are valid. */
+ uint64_t reserved_4_10 : 7;
+ uint64_t pattst : 1; /**< [ 11: 11](RO) Pattern-testing ability. Always 0; 10GBASE-X pattern is testing not supported. */
+ uint64_t alignd : 1; /**< [ 12: 12](RO/H) 10GBASE-X lane-alignment status.
+ 0 = receive lanes not aligned.
+ 1 = receive lanes aligned. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_bx_status_s cn; */
+};
+typedef union bdk_bgxx_spux_bx_status bdk_bgxx_spux_bx_status_t;
+
+static inline uint64_t BDK_BGXX_SPUX_BX_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_BX_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010028ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010028ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010028ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_BX_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_BX_STATUS(a,b) bdk_bgxx_spux_bx_status_t
+#define bustype_BDK_BGXX_SPUX_BX_STATUS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_BX_STATUS(a,b) "BGXX_SPUX_BX_STATUS"
+#define device_bar_BDK_BGXX_SPUX_BX_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_BX_STATUS(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_BX_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_control1
+ *
+ * BGX SPU Control 1 Registers
+ */
+union bdk_bgxx_spux_control1
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_control1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t reset : 1; /**< [ 15: 15](R/W1S/H) Reset. Setting this bit or BGX()_SPU()_AN_CONTROL[AN_RESET] to 1 causes the
+ following to happen:
+ * Resets the logical PCS (LPCS)
+ * Sets the IEEE 802.3 PCS, FEC and AN registers for the LPCS to their default states
+ * Resets the associated SerDes lanes.
+
+ It takes up to 32 coprocessor-clock cycles to reset the LPCS, after which RESET is
+ automatically cleared. */
+ uint64_t loopbck : 1; /**< [ 14: 14](R/W) TX-to-RX loopback enable. When set, transmit data for each SerDes lane is looped back as
+ receive data. */
+ uint64_t spdsel1 : 1; /**< [ 13: 13](RO/H) Speed select 1: always 1. */
+ uint64_t reserved_12 : 1;
+ uint64_t lo_pwr : 1; /**< [ 11: 11](R/W) Low power enable. When set, the LPCS is disabled (overriding the associated
+ BGX()_CMR()_CONFIG[ENABLE]), and the SerDes lanes associated with the LPCS are
+ reset. */
+ uint64_t reserved_7_10 : 4;
+ uint64_t spdsel0 : 1; /**< [ 6: 6](RO/H) Speed select 0: always 1. */
+ uint64_t spd : 4; /**< [ 5: 2](RO/H) '"Speed selection.
+ Note that this is a read-only field rather than read/write as
+ specified in 802.3.
+ The LPCS speed is instead configured by the associated
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. The read values returned by this field are as
+ follows:
+
+ \<pre\>
+ LMAC_TYPE Speed SPD Read Value Comment
+ ------------------------------------------------------
+ XAUI 10G/20G 0x0 20G if DXAUI
+ RXAUI 10G 0x0
+ 10G_R 10G 0x0
+ 40G_R 40G 0x3
+ Other - X
+ \</pre\>' */
+ uint64_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_1 : 2;
+ uint64_t spd : 4; /**< [ 5: 2](RO/H) '"Speed selection.
+ Note that this is a read-only field rather than read/write as
+ specified in 802.3.
+ The LPCS speed is instead configured by the associated
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. The read values returned by this field are as
+ follows:
+
+ \<pre\>
+ LMAC_TYPE Speed SPD Read Value Comment
+ ------------------------------------------------------
+ XAUI 10G/20G 0x0 20G if DXAUI
+ RXAUI 10G 0x0
+ 10G_R 10G 0x0
+ 40G_R 40G 0x3
+ Other - X
+ \</pre\>' */
+ uint64_t spdsel0 : 1; /**< [ 6: 6](RO/H) Speed select 0: always 1. */
+ uint64_t reserved_7_10 : 4;
+ uint64_t lo_pwr : 1; /**< [ 11: 11](R/W) Low power enable. When set, the LPCS is disabled (overriding the associated
+ BGX()_CMR()_CONFIG[ENABLE]), and the SerDes lanes associated with the LPCS are
+ reset. */
+ uint64_t reserved_12 : 1;
+ uint64_t spdsel1 : 1; /**< [ 13: 13](RO/H) Speed select 1: always 1. */
+ uint64_t loopbck : 1; /**< [ 14: 14](R/W) TX-to-RX loopback enable. When set, transmit data for each SerDes lane is looped back as
+ receive data. */
+ uint64_t reset : 1; /**< [ 15: 15](R/W1S/H) Reset. Setting this bit or BGX()_SPU()_AN_CONTROL[AN_RESET] to 1 causes the
+ following to happen:
+ * Resets the logical PCS (LPCS)
+ * Sets the IEEE 802.3 PCS, FEC and AN registers for the LPCS to their default states
+ * Resets the associated SerDes lanes.
+
+ It takes up to 32 coprocessor-clock cycles to reset the LPCS, after which RESET is
+ automatically cleared. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_control1_s cn; */
+};
+typedef union bdk_bgxx_spux_control1 bdk_bgxx_spux_control1_t;
+
+static inline uint64_t BDK_BGXX_SPUX_CONTROL1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_CONTROL1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010000ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010000ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010000ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_CONTROL1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_CONTROL1(a,b) bdk_bgxx_spux_control1_t
+#define bustype_BDK_BGXX_SPUX_CONTROL1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_CONTROL1(a,b) "BGXX_SPUX_CONTROL1"
+#define device_bar_BDK_BGXX_SPUX_CONTROL1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_CONTROL1(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_CONTROL1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_control2
+ *
+ * BGX SPU Control 2 Registers
+ */
+union bdk_bgxx_spux_control2
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_control2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t pcs_type : 3; /**< [ 2: 0](RO/H) PCS type selection.
+ Note that this is a read-only field rather than read/write as
+ specified in 802.3.
+ The LPCS speed is instead configured by the associated
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. The read values returned by this field are as
+ follows:
+
+ \<pre\>
+ [PCS_TYPE]
+ LMAC_TYPE Read Value Comment
+ -------------------------------------------------
+ XAUI 0x1 10GBASE-X PCS type
+ RXAUI 0x1 10GBASE-X PCS type
+ 10G_R 0x0 10GBASE-R PCS type
+ 40G_R 0x4 40GBASE-R PCS type
+ Other Undefined Reserved
+ \</pre\> */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_type : 3; /**< [ 2: 0](RO/H) PCS type selection.
+ Note that this is a read-only field rather than read/write as
+ specified in 802.3.
+ The LPCS speed is instead configured by the associated
+ BGX()_CMR()_CONFIG[LMAC_TYPE]. The read values returned by this field are as
+ follows:
+
+ \<pre\>
+ [PCS_TYPE]
+ LMAC_TYPE Read Value Comment
+ -------------------------------------------------
+ XAUI 0x1 10GBASE-X PCS type
+ RXAUI 0x1 10GBASE-X PCS type
+ 10G_R 0x0 10GBASE-R PCS type
+ 40G_R 0x4 40GBASE-R PCS type
+ Other Undefined Reserved
+ \</pre\> */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_control2_s cn; */
+};
+typedef union bdk_bgxx_spux_control2 bdk_bgxx_spux_control2_t;
+
+static inline uint64_t BDK_BGXX_SPUX_CONTROL2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_CONTROL2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010018ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010018ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010018ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_CONTROL2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_CONTROL2(a,b) bdk_bgxx_spux_control2_t
+#define bustype_BDK_BGXX_SPUX_CONTROL2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_CONTROL2(a,b) "BGXX_SPUX_CONTROL2"
+#define device_bar_BDK_BGXX_SPUX_CONTROL2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_CONTROL2(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_CONTROL2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_fec_abil
+ *
+ * BGX SPU Forward Error Correction Ability Registers
+ */
+union bdk_bgxx_spux_fec_abil
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_fec_abil_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t err_abil : 1; /**< [ 1: 1](RO/H) BASE-R FEC error-indication ability. Always 1 when the LPCS type is BASE-R,
+ i.e. BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x3 or 0x4. Always 0 otherwise. */
+ uint64_t fec_abil : 1; /**< [ 0: 0](RO/H) BASE-R FEC ability. Always 1 when the LPCS type is BASE-R,
+ i.e. BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x3 or 0x4. Always 0 otherwise. */
+#else /* Word 0 - Little Endian */
+ uint64_t fec_abil : 1; /**< [ 0: 0](RO/H) BASE-R FEC ability. Always 1 when the LPCS type is BASE-R,
+ i.e. BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x3 or 0x4. Always 0 otherwise. */
+ uint64_t err_abil : 1; /**< [ 1: 1](RO/H) BASE-R FEC error-indication ability. Always 1 when the LPCS type is BASE-R,
+ i.e. BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x3 or 0x4. Always 0 otherwise. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_fec_abil_s cn; */
+};
+typedef union bdk_bgxx_spux_fec_abil bdk_bgxx_spux_fec_abil_t;
+
+static inline uint64_t BDK_BGXX_SPUX_FEC_ABIL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_FEC_ABIL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010098ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010098ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010098ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_FEC_ABIL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_FEC_ABIL(a,b) bdk_bgxx_spux_fec_abil_t
+#define bustype_BDK_BGXX_SPUX_FEC_ABIL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_FEC_ABIL(a,b) "BGXX_SPUX_FEC_ABIL"
+#define device_bar_BDK_BGXX_SPUX_FEC_ABIL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_FEC_ABIL(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_FEC_ABIL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_fec_control
+ *
+ * BGX SPU Forward Error Correction Control Registers
+ */
+union bdk_bgxx_spux_fec_control
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_fec_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t err_en : 1; /**< [ 1: 1](R/W) BASE-R FEC error-indication enable. This bit corresponds to FEC_Enable_Error_to_PCS
+ variable for BASE-R as defined in 802.3 Clause 74. When FEC is enabled ([FEC_EN] is set)
+ and this bit is set, the FEC decoder on the receive side signals an
+ uncorrectable FEC error to the BASE-R decoder by driving a value of 2'b11 on the sync bits
+ for some of the 32 64/66 bit blocks belonging to the uncorrectable FEC block. See
+ 802.3-2008/802.3ba-2010 section 74.7.4.5.1 for more details. */
+ uint64_t fec_en : 1; /**< [ 0: 0](R/W) BASE-R FEC enable. When this bit is set and the LPCS type is BASE-R
+ (BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x4), forward error correction is enabled. FEC is
+ disabled otherwise. Forward error correction is defined in IEEE Std
+ 802.3-2008/802.3ba-2010 Clause 74. */
+#else /* Word 0 - Little Endian */
+ uint64_t fec_en : 1; /**< [ 0: 0](R/W) BASE-R FEC enable. When this bit is set and the LPCS type is BASE-R
+ (BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x4), forward error correction is enabled. FEC is
+ disabled otherwise. Forward error correction is defined in IEEE Std
+ 802.3-2008/802.3ba-2010 Clause 74. */
+ uint64_t err_en : 1; /**< [ 1: 1](R/W) BASE-R FEC error-indication enable. This bit corresponds to FEC_Enable_Error_to_PCS
+ variable for BASE-R as defined in 802.3 Clause 74. When FEC is enabled ([FEC_EN] is set)
+ and this bit is set, the FEC decoder on the receive side signals an
+ uncorrectable FEC error to the BASE-R decoder by driving a value of 2'b11 on the sync bits
+ for some of the 32 64/66 bit blocks belonging to the uncorrectable FEC block. See
+ 802.3-2008/802.3ba-2010 section 74.7.4.5.1 for more details. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_fec_control_s cn; */
+};
+typedef union bdk_bgxx_spux_fec_control bdk_bgxx_spux_fec_control_t;
+
+static inline uint64_t BDK_BGXX_SPUX_FEC_CONTROL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_FEC_CONTROL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100a0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100a0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100a0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_FEC_CONTROL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_FEC_CONTROL(a,b) bdk_bgxx_spux_fec_control_t
+#define bustype_BDK_BGXX_SPUX_FEC_CONTROL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_FEC_CONTROL(a,b) "BGXX_SPUX_FEC_CONTROL"
+#define device_bar_BDK_BGXX_SPUX_FEC_CONTROL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_FEC_CONTROL(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_FEC_CONTROL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_fec_corr_blks01
+ *
+ * BGX SPU FEC Corrected-Blocks Counters 0/1 Registers
+ * This register is valid only when the LPCS type is BASE-R
+ * (BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x3 or 0x4). The FEC corrected-block counters are
+ * defined in IEEE 802.3 section 74.8.4.1. Each corrected-blocks counter increments by 1 for a
+ * corrected FEC block, i.e. an FEC block that has been received with invalid parity on the
+ * associated PCS lane and has been corrected by the FEC decoder. The counter is reset to all 0s
+ * when the register is read, and held at all 1s in case of overflow.
+ *
+ * The reset operation takes precedence over the increment operation; if the register is read on
+ * the same clock cycle as an increment operation, the counter is reset to all 0s and the
+ * increment operation is lost. The counters are writable for test purposes, rather than read-
+ * only as specified in IEEE 802.3.
+ */
+union bdk_bgxx_spux_fec_corr_blks01
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_fec_corr_blks01_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ln1_corr_blks : 32; /**< [ 63: 32](RC/W/H) PCS Lane 1 FEC corrected blocks.
+ * For 10GBASE-R, reserved.
+ * For 40GBASE-R, correspond to the IEEE 802.3 FEC_corrected_blocks_counter_1 variable
+ (registers 1.302-1.303). */
+ uint64_t ln0_corr_blks : 32; /**< [ 31: 0](RC/W/H) PCS Lane 0 FEC corrected blocks.
+ * For 10GBASE-R, corresponds to the IEEE 802.3 FEC_corrected_blocks_counter variable
+ (registers 1.172-1.173).
+ * For 40GBASE-R, correspond to the IEEE 802.3 FEC_corrected_blocks_counter_0 variable
+ (registers 1.300-1.301). */
+#else /* Word 0 - Little Endian */
+ uint64_t ln0_corr_blks : 32; /**< [ 31: 0](RC/W/H) PCS Lane 0 FEC corrected blocks.
+ * For 10GBASE-R, corresponds to the IEEE 802.3 FEC_corrected_blocks_counter variable
+ (registers 1.172-1.173).
+ * For 40GBASE-R, correspond to the IEEE 802.3 FEC_corrected_blocks_counter_0 variable
+ (registers 1.300-1.301). */
+ uint64_t ln1_corr_blks : 32; /**< [ 63: 32](RC/W/H) PCS Lane 1 FEC corrected blocks.
+ * For 10GBASE-R, reserved.
+ * For 40GBASE-R, correspond to the IEEE 802.3 FEC_corrected_blocks_counter_1 variable
+ (registers 1.302-1.303). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_fec_corr_blks01_s cn; */
+};
+typedef union bdk_bgxx_spux_fec_corr_blks01 bdk_bgxx_spux_fec_corr_blks01_t;
+
+static inline uint64_t BDK_BGXX_SPUX_FEC_CORR_BLKS01(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_FEC_CORR_BLKS01(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100a8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100a8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100a8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_FEC_CORR_BLKS01", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_FEC_CORR_BLKS01(a,b) bdk_bgxx_spux_fec_corr_blks01_t
+#define bustype_BDK_BGXX_SPUX_FEC_CORR_BLKS01(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_FEC_CORR_BLKS01(a,b) "BGXX_SPUX_FEC_CORR_BLKS01"
+#define device_bar_BDK_BGXX_SPUX_FEC_CORR_BLKS01(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_FEC_CORR_BLKS01(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_FEC_CORR_BLKS01(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_fec_corr_blks23
+ *
+ * BGX SPU FEC Corrected-Blocks Counters 2/3 Registers
+ * This register is valid only when the LPCS type is 40GBASE-R
+ * (BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x4). The FEC corrected-block counters are defined in
+ * IEEE 802.3 section 74.8.4.1. Each corrected-blocks counter increments by 1 for a corrected FEC
+ * block, i.e. an FEC block that has been received with invalid parity on the associated PCS lane
+ * and has been corrected by the FEC decoder. The counter is reset to all 0s when the register is
+ * read, and held at all 1s in case of overflow.
+ *
+ * The reset operation takes precedence over the increment operation; if the register is read on
+ * the same clock cycle as an increment operation, the counter is reset to all 0s and the
+ * increment operation is lost. The counters are writable for test purposes, rather than read-
+ * only as specified in IEEE 802.3.
+ */
+union bdk_bgxx_spux_fec_corr_blks23
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_fec_corr_blks23_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ln3_corr_blks : 32; /**< [ 63: 32](RC/W/H) PCS Lane 3 FEC corrected blocks. Correspond to the IEEE 802.3
+ FEC_corrected_blocks_counter_3 variable (registers 1.306-1.307). */
+ uint64_t ln2_corr_blks : 32; /**< [ 31: 0](RC/W/H) PCS Lane 2 FEC corrected blocks. Correspond to the IEEE 802.3
+ FEC_corrected_blocks_counter_3 variable (registers 1.304-1.305). */
+#else /* Word 0 - Little Endian */
+ uint64_t ln2_corr_blks : 32; /**< [ 31: 0](RC/W/H) PCS Lane 2 FEC corrected blocks. Correspond to the IEEE 802.3
+ FEC_corrected_blocks_counter_3 variable (registers 1.304-1.305). */
+ uint64_t ln3_corr_blks : 32; /**< [ 63: 32](RC/W/H) PCS Lane 3 FEC corrected blocks. Correspond to the IEEE 802.3
+ FEC_corrected_blocks_counter_3 variable (registers 1.306-1.307). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_fec_corr_blks23_s cn; */
+};
+typedef union bdk_bgxx_spux_fec_corr_blks23 bdk_bgxx_spux_fec_corr_blks23_t;
+
+static inline uint64_t BDK_BGXX_SPUX_FEC_CORR_BLKS23(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_FEC_CORR_BLKS23(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100b0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100b0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100b0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_FEC_CORR_BLKS23", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_FEC_CORR_BLKS23(a,b) bdk_bgxx_spux_fec_corr_blks23_t
+#define bustype_BDK_BGXX_SPUX_FEC_CORR_BLKS23(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_FEC_CORR_BLKS23(a,b) "BGXX_SPUX_FEC_CORR_BLKS23"
+#define device_bar_BDK_BGXX_SPUX_FEC_CORR_BLKS23(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_FEC_CORR_BLKS23(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_FEC_CORR_BLKS23(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_fec_uncorr_blks01
+ *
+ * BGX SPU FEC Uncorrected-Blocks Counters 0/1 Registers
+ * This register is valid only when the LPCS type is BASE-R
+ * (BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x3 or 0x4). The FEC corrected-block counters are
+ * defined in IEEE 802.3 section 74.8.4.2. Each uncorrected-blocks counter increments by 1 for an
+ * uncorrected FEC block, i.e. an FEC block that has been received with invalid parity on the
+ * associated PCS lane and has not been corrected by the FEC decoder. The counter is reset to all
+ * 0s when the register is read, and held at all 1s in case of overflow.
+ *
+ * The reset operation takes precedence over the increment operation; if the register is read on
+ * the same clock cycle as an increment operation, the counter is reset to all 0s and the
+ * increment operation is lost. The counters are writable for test purposes, rather than read-
+ * only as specified in IEEE 802.3.
+ */
+union bdk_bgxx_spux_fec_uncorr_blks01
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_fec_uncorr_blks01_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ln1_uncorr_blks : 32; /**< [ 63: 32](RC/W/H) PCS Lane 1 FEC corrected blocks.
+ * For 10GBASE-R, reserved.
+ * For 40GBASE-R, corresponds to the IEEE 802.3 FEC_uncorrected_blocks_counter_1 variable
+ (registers 1.702-1.703). */
+ uint64_t ln0_uncorr_blks : 32; /**< [ 31: 0](RC/W/H) PCS Lane 0 FEC uncorrected blocks.
+ * For 10GBASE-R, corresponds to the IEEE 802.3 FEC_uncorrected_blocks_counter variable
+ (registers 1.174-1.175).
+ * For 40GBASE-R, correspond to the IEEE 802.3 FEC_uncorrected_blocks_counter_0 variable
+ (registers 1.700-1.701). */
+#else /* Word 0 - Little Endian */
+ uint64_t ln0_uncorr_blks : 32; /**< [ 31: 0](RC/W/H) PCS Lane 0 FEC uncorrected blocks.
+ * For 10GBASE-R, corresponds to the IEEE 802.3 FEC_uncorrected_blocks_counter variable
+ (registers 1.174-1.175).
+ * For 40GBASE-R, correspond to the IEEE 802.3 FEC_uncorrected_blocks_counter_0 variable
+ (registers 1.700-1.701). */
+ uint64_t ln1_uncorr_blks : 32; /**< [ 63: 32](RC/W/H) PCS Lane 1 FEC corrected blocks.
+ * For 10GBASE-R, reserved.
+ * For 40GBASE-R, corresponds to the IEEE 802.3 FEC_uncorrected_blocks_counter_1 variable
+ (registers 1.702-1.703). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_fec_uncorr_blks01_s cn; */
+};
+typedef union bdk_bgxx_spux_fec_uncorr_blks01 bdk_bgxx_spux_fec_uncorr_blks01_t;
+
+static inline uint64_t BDK_BGXX_SPUX_FEC_UNCORR_BLKS01(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_FEC_UNCORR_BLKS01(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100b8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100b8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100b8ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_FEC_UNCORR_BLKS01", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_FEC_UNCORR_BLKS01(a,b) bdk_bgxx_spux_fec_uncorr_blks01_t
+#define bustype_BDK_BGXX_SPUX_FEC_UNCORR_BLKS01(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_FEC_UNCORR_BLKS01(a,b) "BGXX_SPUX_FEC_UNCORR_BLKS01"
+#define device_bar_BDK_BGXX_SPUX_FEC_UNCORR_BLKS01(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_FEC_UNCORR_BLKS01(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_FEC_UNCORR_BLKS01(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_fec_uncorr_blks23
+ *
+ * BGX SPU FEC Uncorrected-Blocks Counters 2/3 Registers
+ * This register is valid only when the LPCS type is 40GBASE-R
+ * (BGX()_CMR()_CONFIG[LMAC_TYPE] = 0x4). The FEC uncorrected-block counters are defined
+ * in IEEE 802.3 section 74.8.4.2. Each corrected-blocks counter increments by 1 for an
+ * uncorrected FEC block, i.e. an FEC block that has been received with invalid parity on the
+ * associated PCS lane and has not been corrected by the FEC decoder. The counter is reset to all
+ * 0s when the register is read, and held at all 1s in case of overflow.
+ *
+ * The reset operation takes precedence over the increment operation; if the register is read on
+ * the same clock cycle as an increment operation, the counter is reset to all 0s and the
+ * increment operation is lost. The counters are writable for test purposes, rather than read-
+ * only as specified in IEEE 802.3.
+ */
+union bdk_bgxx_spux_fec_uncorr_blks23
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_fec_uncorr_blks23_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ln3_uncorr_blks : 32; /**< [ 63: 32](RC/W/H) PCS Lane 3 FEC uncorrected blocks. Corresponds to the IEEE 802.3
+ FEC_uncorrected_blocks_counter_3 variable (registers 1.706-1.707). */
+ uint64_t ln2_uncorr_blks : 32; /**< [ 31: 0](RC/W/H) PCS Lane 2 FEC uncorrected blocks. Corresponds to the IEEE 802.3
+ FEC_uncorrected_blocks_counter_3 variable (registers 1.704-1.705). */
+#else /* Word 0 - Little Endian */
+ uint64_t ln2_uncorr_blks : 32; /**< [ 31: 0](RC/W/H) PCS Lane 2 FEC uncorrected blocks. Corresponds to the IEEE 802.3
+ FEC_uncorrected_blocks_counter_3 variable (registers 1.704-1.705). */
+ uint64_t ln3_uncorr_blks : 32; /**< [ 63: 32](RC/W/H) PCS Lane 3 FEC uncorrected blocks. Corresponds to the IEEE 802.3
+ FEC_uncorrected_blocks_counter_3 variable (registers 1.706-1.707). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_fec_uncorr_blks23_s cn; */
+};
+typedef union bdk_bgxx_spux_fec_uncorr_blks23 bdk_bgxx_spux_fec_uncorr_blks23_t;
+
+static inline uint64_t BDK_BGXX_SPUX_FEC_UNCORR_BLKS23(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_FEC_UNCORR_BLKS23(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100c0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e00100c0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e00100c0ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_FEC_UNCORR_BLKS23", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_FEC_UNCORR_BLKS23(a,b) bdk_bgxx_spux_fec_uncorr_blks23_t
+#define bustype_BDK_BGXX_SPUX_FEC_UNCORR_BLKS23(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_FEC_UNCORR_BLKS23(a,b) "BGXX_SPUX_FEC_UNCORR_BLKS23"
+#define device_bar_BDK_BGXX_SPUX_FEC_UNCORR_BLKS23(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_FEC_UNCORR_BLKS23(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_FEC_UNCORR_BLKS23(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_int
+ *
+ * BGX SPU Interrupt Registers
+ */
+union bdk_bgxx_spux_int
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1C/H) BASE-R PMD training failure. Set when BASE-R PMD link training has failed on the 10GBASE-R
+ lane or any 40GBASE-R lane. Valid if the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is 10GBASE-R or 40GBASE-R and
+ BGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is 1, and never set otherwise. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1C/H) BASE-R PMD training done. Set when the 10GBASE-R lane or all 40GBASE-R lanes have
+ successfully completed BASE-R PMD link training. Valid if the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is 10GBASE-R or 40GBASE-R and
+ BGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is 1, and never set otherwise. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1C/H) Autonegotiation complete. Set when BGX()_SPU()_AN_STATUS[AN_COMPLETE] is set,
+ indicating that the autonegotiation process has been completed and the link is up and
+ running using the negotiated highest common denominator (HCD) technology. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1C/H) Autonegotiation link good. Set when the an_link_good variable is set as defined in
+ 802.3-2008 Figure 73-11, indicating that autonegotiation has completed. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1C/H) Autonegotiation page received. This bit is set along with
+ BGX()_SPU()_AN_STATUS[PAGE_RX] when a new page has been received and stored in
+ BGX()_SPU()_AN_LP_BASE or BGX()_SPU()_AN_LP_XNP. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1C/H) Uncorrectable FEC error. Set when an FEC block with an uncorrectable error is received on
+ the 10GBASE-R lane or any 40GBASE-R lane. Valid if the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is 10GBASE-R or 40GBASE-R, and never set otherwise. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1C/H) Correctable FEC error. Set when an FEC block with a correctable error is received on the
+ 10GBASE-R lane or any 40GBASE-R lane. Valid if the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is 10GBASE-R or 40GBASE-R, and never set otherwise. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1C/H) 40GBASE-R bit interleaved parity error. Set when a BIP error is detected on any lane.
+ Valid if the LPCS type selected by BGX()_CMR()_CONFIG[LMAC_TYPE] is 40GBASE-R, and
+ never set otherwise. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1C/H) Sync failure debug. This interrupt is provided for link problem debugging help. It is set
+ as follows based on the LPCS type selected by BGX()_CMR()_CONFIG[LMAC_TYPE], and
+ whether FEC is enabled or disabled by BGX()_SPU()_FEC_CONTROL[FEC_EN]:
+ * XAUI or RXAUI: Set when any lane's PCS synchronization state transitions from
+ SYNC_ACQUIRED_1 to SYNC_ACQUIRED_2 (see 802.3-2008 Figure 48-7).
+ * 10GBASE-R or 40GBASE-R with FEC disabled: Set when sh_invalid_cnt increments to 1 while
+ BLOCK_LOCK is 1 (see 802.3-2008 Figure 49-12 and 802.3ba-2010 Figure 82-20).
+ * 10GBASE-R or 40GBASE-R with FEC enabled: Set when parity_invalid_cnt increments to 1
+ while fec_block_lock is 1 (see 802.3-2008 Figure 74-8). */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1C/H) Loss of lane alignment. Set when lane-to-lane alignment is lost. This is only valid if the
+ logical PCS is a multilane type (i.e. XAUI, RXAUI or 40GBASE-R is selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE]), and is never set otherwise. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1C/H) Loss of lane sync. Lane code-group or block synchronization is lost on one or more lanes
+ associated with the LMAC/LPCS. Set as follows based on the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE], and whether FEC is enabled or disabled by
+ BGX()_SPU()_FEC_CONTROL[FEC_EN]:
+ * XAUI or RXAUI: Set when any lane's PCS synchronization state transitions to LOSS_OF_SYNC
+ (see 802.3-2008 Figure 48-7)
+ * 10GBASE-R or 40GBASE-R with FEC disabled: set when the BLOCK_LOCK variable is cleared on
+ the 10G lane or any 40G lane (see 802.3-2008 Figure 49-12 and 802.3ba-2010 Figure 82-20).
+ * 10GBASE-R or 40GBASE-R with FEC enabled: set when the fec_block_lock variable is cleared
+ on the 10G lane or any 40G lane (see 802.3-2008 Figure 74-8). */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1C/H) Bit lock lost on one or more lanes associated with the LMAC/LPCS. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1C/H) Errored block received. Set when an errored BASE-R block is received as described for
+ BGX()_SPU()_BR_STATUS2[ERR_BLKS]. Valid if the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is 10GBASE-R or 40GBASE-R, and never set otherwise. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1C/H) Set when the receive link goes down, which is the same condition that sets
+ BGX()_SPU()_STATUS2[RCVFLT]. */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1C/H) Set when the receive link comes up, which is the same condition that allows the setting of
+ BGX()_SPU()_STATUS1[RCV_LNK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1C/H) Set when the receive link comes up, which is the same condition that allows the setting of
+ BGX()_SPU()_STATUS1[RCV_LNK]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1C/H) Set when the receive link goes down, which is the same condition that sets
+ BGX()_SPU()_STATUS2[RCVFLT]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1C/H) Errored block received. Set when an errored BASE-R block is received as described for
+ BGX()_SPU()_BR_STATUS2[ERR_BLKS]. Valid if the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is 10GBASE-R or 40GBASE-R, and never set otherwise. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1C/H) Bit lock lost on one or more lanes associated with the LMAC/LPCS. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1C/H) Loss of lane sync. Lane code-group or block synchronization is lost on one or more lanes
+ associated with the LMAC/LPCS. Set as follows based on the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE], and whether FEC is enabled or disabled by
+ BGX()_SPU()_FEC_CONTROL[FEC_EN]:
+ * XAUI or RXAUI: Set when any lane's PCS synchronization state transitions to LOSS_OF_SYNC
+ (see 802.3-2008 Figure 48-7)
+ * 10GBASE-R or 40GBASE-R with FEC disabled: set when the BLOCK_LOCK variable is cleared on
+ the 10G lane or any 40G lane (see 802.3-2008 Figure 49-12 and 802.3ba-2010 Figure 82-20).
+ * 10GBASE-R or 40GBASE-R with FEC enabled: set when the fec_block_lock variable is cleared
+ on the 10G lane or any 40G lane (see 802.3-2008 Figure 74-8). */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1C/H) Loss of lane alignment. Set when lane-to-lane alignment is lost. This is only valid if the
+ logical PCS is a multilane type (i.e. XAUI, RXAUI or 40GBASE-R is selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE]), and is never set otherwise. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1C/H) Sync failure debug. This interrupt is provided for link problem debugging help. It is set
+ as follows based on the LPCS type selected by BGX()_CMR()_CONFIG[LMAC_TYPE], and
+ whether FEC is enabled or disabled by BGX()_SPU()_FEC_CONTROL[FEC_EN]:
+ * XAUI or RXAUI: Set when any lane's PCS synchronization state transitions from
+ SYNC_ACQUIRED_1 to SYNC_ACQUIRED_2 (see 802.3-2008 Figure 48-7).
+ * 10GBASE-R or 40GBASE-R with FEC disabled: Set when sh_invalid_cnt increments to 1 while
+ BLOCK_LOCK is 1 (see 802.3-2008 Figure 49-12 and 802.3ba-2010 Figure 82-20).
+ * 10GBASE-R or 40GBASE-R with FEC enabled: Set when parity_invalid_cnt increments to 1
+ while fec_block_lock is 1 (see 802.3-2008 Figure 74-8). */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1C/H) 40GBASE-R bit interleaved parity error. Set when a BIP error is detected on any lane.
+ Valid if the LPCS type selected by BGX()_CMR()_CONFIG[LMAC_TYPE] is 40GBASE-R, and
+ never set otherwise. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1C/H) Correctable FEC error. Set when an FEC block with a correctable error is received on the
+ 10GBASE-R lane or any 40GBASE-R lane. Valid if the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is 10GBASE-R or 40GBASE-R, and never set otherwise. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1C/H) Uncorrectable FEC error. Set when an FEC block with an uncorrectable error is received on
+ the 10GBASE-R lane or any 40GBASE-R lane. Valid if the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is 10GBASE-R or 40GBASE-R, and never set otherwise. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1C/H) Autonegotiation page received. This bit is set along with
+ BGX()_SPU()_AN_STATUS[PAGE_RX] when a new page has been received and stored in
+ BGX()_SPU()_AN_LP_BASE or BGX()_SPU()_AN_LP_XNP. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1C/H) Autonegotiation link good. Set when the an_link_good variable is set as defined in
+ 802.3-2008 Figure 73-11, indicating that autonegotiation has completed. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1C/H) Autonegotiation complete. Set when BGX()_SPU()_AN_STATUS[AN_COMPLETE] is set,
+ indicating that the autonegotiation process has been completed and the link is up and
+ running using the negotiated highest common denominator (HCD) technology. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1C/H) BASE-R PMD training done. Set when the 10GBASE-R lane or all 40GBASE-R lanes have
+ successfully completed BASE-R PMD link training. Valid if the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is 10GBASE-R or 40GBASE-R and
+ BGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is 1, and never set otherwise. */
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1C/H) BASE-R PMD training failure. Set when BASE-R PMD link training has failed on the 10GBASE-R
+ lane or any 40GBASE-R lane. Valid if the LPCS type selected by
+ BGX()_CMR()_CONFIG[LMAC_TYPE] is 10GBASE-R or 40GBASE-R and
+ BGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is 1, and never set otherwise. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_int_s cn; */
+};
+typedef union bdk_bgxx_spux_int bdk_bgxx_spux_int_t;
+
+static inline uint64_t BDK_BGXX_SPUX_INT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_INT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010220ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010220ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010220ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_INT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_INT(a,b) bdk_bgxx_spux_int_t
+#define bustype_BDK_BGXX_SPUX_INT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_INT(a,b) "BGXX_SPUX_INT"
+#define device_bar_BDK_BGXX_SPUX_INT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_INT(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_INT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_int_ena_w1c
+ *
+ * BGX SPU Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_bgxx_spux_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[RX_LINK_UP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[RX_LINK_UP]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_int_ena_w1c_s cn81xx; */
+ /* struct bdk_bgxx_spux_int_ena_w1c_s cn88xx; */
+ struct bdk_bgxx_spux_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[RX_LINK_UP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[RX_LINK_UP]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_spux_int_ena_w1c bdk_bgxx_spux_int_ena_w1c_t;
+
+static inline uint64_t BDK_BGXX_SPUX_INT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_INT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010230ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010230ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010230ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_INT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_INT_ENA_W1C(a,b) bdk_bgxx_spux_int_ena_w1c_t
+#define bustype_BDK_BGXX_SPUX_INT_ENA_W1C(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_INT_ENA_W1C(a,b) "BGXX_SPUX_INT_ENA_W1C"
+#define device_bar_BDK_BGXX_SPUX_INT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_INT_ENA_W1C(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_INT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_int_ena_w1s
+ *
+ * BGX SPU Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_bgxx_spux_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[RX_LINK_UP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[RX_LINK_UP]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_int_ena_w1s_s cn81xx; */
+ /* struct bdk_bgxx_spux_int_ena_w1s_s cn88xx; */
+ struct bdk_bgxx_spux_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[RX_LINK_UP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[RX_LINK_UP]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_spux_int_ena_w1s bdk_bgxx_spux_int_ena_w1s_t;
+
+static inline uint64_t BDK_BGXX_SPUX_INT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_INT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010238ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010238ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010238ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_INT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_INT_ENA_W1S(a,b) bdk_bgxx_spux_int_ena_w1s_t
+#define bustype_BDK_BGXX_SPUX_INT_ENA_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_INT_ENA_W1S(a,b) "BGXX_SPUX_INT_ENA_W1S"
+#define device_bar_BDK_BGXX_SPUX_INT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_INT_ENA_W1S(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_INT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_int_w1s
+ *
+ * BGX SPU Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_bgxx_spux_int_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[RX_LINK_UP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[RX_LINK_UP]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1S/H) Reads or sets BGX(0..1)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_int_w1s_s cn81xx; */
+ /* struct bdk_bgxx_spux_int_w1s_s cn88xx; */
+ struct bdk_bgxx_spux_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[RX_LINK_UP]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_link_up : 1; /**< [ 0: 0](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[RX_LINK_UP]. */
+ uint64_t rx_link_down : 1; /**< [ 1: 1](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[RX_LINK_DOWN]. */
+ uint64_t err_blk : 1; /**< [ 2: 2](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[ERR_BLK]. */
+ uint64_t bitlckls : 1; /**< [ 3: 3](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[BITLCKLS]. */
+ uint64_t synlos : 1; /**< [ 4: 4](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[SYNLOS]. */
+ uint64_t algnlos : 1; /**< [ 5: 5](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[ALGNLOS]. */
+ uint64_t dbg_sync : 1; /**< [ 6: 6](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[DBG_SYNC]. */
+ uint64_t bip_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[BIP_ERR]. */
+ uint64_t fec_corr : 1; /**< [ 8: 8](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[FEC_CORR]. */
+ uint64_t fec_uncorr : 1; /**< [ 9: 9](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[FEC_UNCORR]. */
+ uint64_t an_page_rx : 1; /**< [ 10: 10](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[AN_PAGE_RX]. */
+ uint64_t an_link_good : 1; /**< [ 11: 11](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[AN_LINK_GOOD]. */
+ uint64_t an_complete : 1; /**< [ 12: 12](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[AN_COMPLETE]. */
+ uint64_t training_done : 1; /**< [ 13: 13](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[TRAINING_DONE]. */
+ uint64_t training_failure : 1; /**< [ 14: 14](R/W1S/H) Reads or sets BGX(0..3)_SPU(0..3)_INT[TRAINING_FAILURE]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_spux_int_w1s bdk_bgxx_spux_int_w1s_t;
+
+static inline uint64_t BDK_BGXX_SPUX_INT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_INT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010228ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010228ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010228ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_INT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_INT_W1S(a,b) bdk_bgxx_spux_int_w1s_t
+#define bustype_BDK_BGXX_SPUX_INT_W1S(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_INT_W1S(a,b) "BGXX_SPUX_INT_W1S"
+#define device_bar_BDK_BGXX_SPUX_INT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_INT_W1S(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_INT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_lpcs_states
+ *
+ * BGX SPU BASE-X Transmit/Receive States Registers
+ */
+union bdk_bgxx_spux_lpcs_states
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_lpcs_states_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t br_rx_sm : 3; /**< [ 14: 12](RO/H) BASE-R receive state machine state */
+ uint64_t reserved_10_11 : 2;
+ uint64_t bx_rx_sm : 2; /**< [ 9: 8](RO/H) BASE-X receive state machine state */
+ uint64_t deskew_am_found : 4; /**< [ 7: 4](RO/H) 40GBASE-R deskew state machine alignment marker found flag per logical PCS lane ID. */
+ uint64_t reserved_3 : 1;
+ uint64_t deskew_sm : 3; /**< [ 2: 0](RO/H) BASE-X and 40GBASE-R deskew state machine state */
+#else /* Word 0 - Little Endian */
+ uint64_t deskew_sm : 3; /**< [ 2: 0](RO/H) BASE-X and 40GBASE-R deskew state machine state */
+ uint64_t reserved_3 : 1;
+ uint64_t deskew_am_found : 4; /**< [ 7: 4](RO/H) 40GBASE-R deskew state machine alignment marker found flag per logical PCS lane ID. */
+ uint64_t bx_rx_sm : 2; /**< [ 9: 8](RO/H) BASE-X receive state machine state */
+ uint64_t reserved_10_11 : 2;
+ uint64_t br_rx_sm : 3; /**< [ 14: 12](RO/H) BASE-R receive state machine state */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_lpcs_states_s cn; */
+};
+typedef union bdk_bgxx_spux_lpcs_states bdk_bgxx_spux_lpcs_states_t;
+
+static inline uint64_t BDK_BGXX_SPUX_LPCS_STATES(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_LPCS_STATES(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010208ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010208ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010208ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_LPCS_STATES", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_LPCS_STATES(a,b) bdk_bgxx_spux_lpcs_states_t
+#define bustype_BDK_BGXX_SPUX_LPCS_STATES(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_LPCS_STATES(a,b) "BGXX_SPUX_LPCS_STATES"
+#define device_bar_BDK_BGXX_SPUX_LPCS_STATES(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_LPCS_STATES(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_LPCS_STATES(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_misc_control
+ *
+ * BGX SPU Miscellaneous Control Registers
+ * "* RX logical PCS lane polarity vector \<3:0\> = [XOR_RXPLRT]\<3:0\> ^ {4{[RXPLRT]}}.
+ * * TX logical PCS lane polarity vector \<3:0\> = [XOR_TXPLRT]\<3:0\> ^ {4{[TXPLRT]}}.
+ *
+ * In short, keep [RXPLRT] and [TXPLRT] cleared, and use [XOR_RXPLRT] and [XOR_TXPLRT] fields to
+ * define
+ * the polarity per logical PCS lane. Only bit 0 of vector is used for 10GBASE-R, and only bits
+ * 1:0 of vector are used for RXAUI."
+ */
+union bdk_bgxx_spux_misc_control
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_misc_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t rx_packet_dis : 1; /**< [ 12: 12](R/W) Receive packet disable. Software can set or clear this bit at any time to gracefully
+ disable or re-enable packet reception by the LPCS. If this bit is set while a packet is
+ being received, the packet is completed and all subsequent received packets are discarded
+ by the LPCS. Similarly, if this bit is cleared while a received packet is being discarded,
+ packet reception resumes after the current packet is fully discarded. When set for a
+ 40GBASE-R or 10GBASE-R LMAC/LPCS type (selected by BGX()_CMR()_CONFIG[LMAC_TYPE]),
+ received errors and faults will be ignored while receive packets are discarded; idles will
+ be sent to the MAC layer (SMU) and the errored blocks counter
+ (BGX()_SPU()_BR_STATUS2[ERR_BLKS]) will not increment. */
+ uint64_t skip_after_term : 1; /**< [ 11: 11](R/W) Enable sending of idle skip after terminate. This bit is meaningful when the logical PCS
+ type is XAUI or RXAUI (selected by BGX()_CMR()_CONFIG[LMAC_TYPE]), and has no
+ effect otherwise. When set, the LMAC/LPCS transmits more idle skip columns for clock
+ compensation. Typically set in HiGig/HiGig2 modes; clear otherwise. This field can be set
+ to ensure sufficient density of XAUI idle skip (||R||) columns with a small transmit
+ inter-frame gap (IFG) in order to allow the link partner's receiver to delete ||R||
+ columns as needed for clock rate compensation. It is usually set when the LMAC's transmit
+ IFG is set to eight bytes in HiGig/HiGig2 modes (i.e. BGX()_SMU()_TX_IFG[IFG1] +
+ BGX()_SMU()_TX_IFG[IFG2] = 8), and should be cleared when the transmit IFG is
+ greater than eight bytes. When this bit is set, the SPU will send an ||R|| column after a
+ ||T0|| column (terminate in lane 0) if no ||R|| was sent in the previous IFG. This is a
+ minor deviation from the functionality specified in 802.3-2008 Figure 48-6 (PCS transmit
+ source state diagram), whereby the state will transition directly from SEND_DATA to
+ SEND_RANDOM_R after ||T0|| if no ||R|| was transmitted in the previous IFG. Sending ||R||
+ after ||T0|| only (and not ||T1||, |T2|| or ||T3||) ensures that the check_end function at
+ the receiving end, as defined in 802.3-2008 sub-clause 48.2.6.1.4, does not detect an
+ error due to this functional change. When this bit is clear, the LMAC will fully conform
+ to the functionality specified in Figure 48-6. */
+ uint64_t intlv_rdisp : 1; /**< [ 10: 10](R/W) RXAUI interleaved running disparity. This bit is meaningful when the logical PCS type is
+ RXAUI (BGX()_CMR()_CONFIG[LMAC_TYPE] = RXAUI), and has no effect otherwise. It
+ selects which disparity calculation to use when combining or splitting the RXAUI lanes, as
+ follows:
+
+ _ 0 = Common running disparity. Common running disparity is computed for even and odd
+ code-
+ groups of an RXAUI lane, i.e. interleave lanes before PCS layer as described in the Dune
+ Networks/Broadcom RXAUI v2.1 specification. This obeys 6.25GHz SerDes disparity.
+
+ _ 1 = Interleaved running disparity: Running disparity is computed separately for even and
+ odd code-groups of an RXAUI lane, i.e. interleave lanes after PCS layer as described in
+ the Marvell RXAUI Interface specification. This does not obey 6.25GHz SerDes disparity. */
+ uint64_t xor_rxplrt : 4; /**< [ 9: 6](R/W) RX polarity control per logical PCS lane */
+ uint64_t xor_txplrt : 4; /**< [ 5: 2](R/W) TX polarity control per logical PCS lane */
+ uint64_t rxplrt : 1; /**< [ 1: 1](R/W) Receive polarity. 1 = inverted polarity. 0 = normal polarity. */
+ uint64_t txplrt : 1; /**< [ 0: 0](R/W) Transmit polarity. 1 = inverted polarity. 0 = normal polarity. */
+#else /* Word 0 - Little Endian */
+ uint64_t txplrt : 1; /**< [ 0: 0](R/W) Transmit polarity. 1 = inverted polarity. 0 = normal polarity. */
+ uint64_t rxplrt : 1; /**< [ 1: 1](R/W) Receive polarity. 1 = inverted polarity. 0 = normal polarity. */
+ uint64_t xor_txplrt : 4; /**< [ 5: 2](R/W) TX polarity control per logical PCS lane */
+ uint64_t xor_rxplrt : 4; /**< [ 9: 6](R/W) RX polarity control per logical PCS lane */
+ uint64_t intlv_rdisp : 1; /**< [ 10: 10](R/W) RXAUI interleaved running disparity. This bit is meaningful when the logical PCS type is
+ RXAUI (BGX()_CMR()_CONFIG[LMAC_TYPE] = RXAUI), and has no effect otherwise. It
+ selects which disparity calculation to use when combining or splitting the RXAUI lanes, as
+ follows:
+
+ _ 0 = Common running disparity. Common running disparity is computed for even and odd
+ code-
+ groups of an RXAUI lane, i.e. interleave lanes before PCS layer as described in the Dune
+ Networks/Broadcom RXAUI v2.1 specification. This obeys 6.25GHz SerDes disparity.
+
+ _ 1 = Interleaved running disparity: Running disparity is computed separately for even and
+ odd code-groups of an RXAUI lane, i.e. interleave lanes after PCS layer as described in
+ the Marvell RXAUI Interface specification. This does not obey 6.25GHz SerDes disparity. */
+ uint64_t skip_after_term : 1; /**< [ 11: 11](R/W) Enable sending of idle skip after terminate. This bit is meaningful when the logical PCS
+ type is XAUI or RXAUI (selected by BGX()_CMR()_CONFIG[LMAC_TYPE]), and has no
+ effect otherwise. When set, the LMAC/LPCS transmits more idle skip columns for clock
+ compensation. Typically set in HiGig/HiGig2 modes; clear otherwise. This field can be set
+ to ensure sufficient density of XAUI idle skip (||R||) columns with a small transmit
+ inter-frame gap (IFG) in order to allow the link partner's receiver to delete ||R||
+ columns as needed for clock rate compensation. It is usually set when the LMAC's transmit
+ IFG is set to eight bytes in HiGig/HiGig2 modes (i.e. BGX()_SMU()_TX_IFG[IFG1] +
+ BGX()_SMU()_TX_IFG[IFG2] = 8), and should be cleared when the transmit IFG is
+ greater than eight bytes. When this bit is set, the SPU will send an ||R|| column after a
+ ||T0|| column (terminate in lane 0) if no ||R|| was sent in the previous IFG. This is a
+ minor deviation from the functionality specified in 802.3-2008 Figure 48-6 (PCS transmit
+ source state diagram), whereby the state will transition directly from SEND_DATA to
+ SEND_RANDOM_R after ||T0|| if no ||R|| was transmitted in the previous IFG. Sending ||R||
+ after ||T0|| only (and not ||T1||, |T2|| or ||T3||) ensures that the check_end function at
+ the receiving end, as defined in 802.3-2008 sub-clause 48.2.6.1.4, does not detect an
+ error due to this functional change. When this bit is clear, the LMAC will fully conform
+ to the functionality specified in Figure 48-6. */
+ uint64_t rx_packet_dis : 1; /**< [ 12: 12](R/W) Receive packet disable. Software can set or clear this bit at any time to gracefully
+ disable or re-enable packet reception by the LPCS. If this bit is set while a packet is
+ being received, the packet is completed and all subsequent received packets are discarded
+ by the LPCS. Similarly, if this bit is cleared while a received packet is being discarded,
+ packet reception resumes after the current packet is fully discarded. When set for a
+ 40GBASE-R or 10GBASE-R LMAC/LPCS type (selected by BGX()_CMR()_CONFIG[LMAC_TYPE]),
+ received errors and faults will be ignored while receive packets are discarded; idles will
+ be sent to the MAC layer (SMU) and the errored blocks counter
+ (BGX()_SPU()_BR_STATUS2[ERR_BLKS]) will not increment. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_misc_control_s cn; */
+};
+typedef union bdk_bgxx_spux_misc_control bdk_bgxx_spux_misc_control_t;
+
+static inline uint64_t BDK_BGXX_SPUX_MISC_CONTROL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_MISC_CONTROL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010218ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010218ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010218ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_MISC_CONTROL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_MISC_CONTROL(a,b) bdk_bgxx_spux_misc_control_t
+#define bustype_BDK_BGXX_SPUX_MISC_CONTROL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_MISC_CONTROL(a,b) "BGXX_SPUX_MISC_CONTROL"
+#define device_bar_BDK_BGXX_SPUX_MISC_CONTROL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_MISC_CONTROL(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_MISC_CONTROL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_spd_abil
+ *
+ * BGX SPU PCS Speed Ability Registers
+ */
+union bdk_bgxx_spux_spd_abil
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_spd_abil_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t hundredgb : 1; /**< [ 3: 3](RO) 100G capable. Always 0. */
+ uint64_t fortygb : 1; /**< [ 2: 2](RO/H) 40G capable. Always 1. */
+ uint64_t tenpasst : 1; /**< [ 1: 1](RO) 10PASS-TS/2BASE-TL capable. Always 0. */
+ uint64_t tengb : 1; /**< [ 0: 0](RO/H) 10G capable. Always 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t tengb : 1; /**< [ 0: 0](RO/H) 10G capable. Always 1. */
+ uint64_t tenpasst : 1; /**< [ 1: 1](RO) 10PASS-TS/2BASE-TL capable. Always 0. */
+ uint64_t fortygb : 1; /**< [ 2: 2](RO/H) 40G capable. Always 1. */
+ uint64_t hundredgb : 1; /**< [ 3: 3](RO) 100G capable. Always 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_spd_abil_s cn; */
+};
+typedef union bdk_bgxx_spux_spd_abil bdk_bgxx_spux_spd_abil_t;
+
+static inline uint64_t BDK_BGXX_SPUX_SPD_ABIL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_SPD_ABIL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010010ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010010ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010010ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_SPD_ABIL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_SPD_ABIL(a,b) bdk_bgxx_spux_spd_abil_t
+#define bustype_BDK_BGXX_SPUX_SPD_ABIL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_SPD_ABIL(a,b) "BGXX_SPUX_SPD_ABIL"
+#define device_bar_BDK_BGXX_SPUX_SPD_ABIL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_SPD_ABIL(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_SPD_ABIL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_status1
+ *
+ * BGX SPU Status 1 Registers
+ */
+union bdk_bgxx_spux_status1
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_status1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t flt : 1; /**< [ 7: 7](RO/H) Fault condition detected.
+ This bit is a logical OR of BGX()_SPU()_STATUS2[XMTFLT, RCVFLT]. */
+ uint64_t reserved_3_6 : 4;
+ uint64_t rcv_lnk : 1; /**< [ 2: 2](R/W1S/H) PCS receive link status:
+ 0 = receive link down.
+ 1 = receive link up.
+
+ This is a latching-low bit; it stays clear until a write-1-to-set by software.
+
+ For a BASE-X logical PCS type (in the associated BGX()_CMR()_CONFIG[LMAC_TYPE] =
+ XAUI or RXAUI), this is a latching-low version of BGX()_SPU()_BX_STATUS[ALIGND].
+
+ For a BASE-R logical PCS type (in the associated BGX()_CMR()_CONFIG[LMAC_TYPE] =
+ 10G_R or 40G_R), this is a latching-low version of
+ BGX()_SPU()_BR_STATUS1[RCV_LNK]. */
+ uint64_t lpable : 1; /**< [ 1: 1](RO) Low-power ability. Always returns 1 to indicate that the LPCS supports low-power mode. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t lpable : 1; /**< [ 1: 1](RO) Low-power ability. Always returns 1 to indicate that the LPCS supports low-power mode. */
+ uint64_t rcv_lnk : 1; /**< [ 2: 2](R/W1S/H) PCS receive link status:
+ 0 = receive link down.
+ 1 = receive link up.
+
+ This is a latching-low bit; it stays clear until a write-1-to-set by software.
+
+ For a BASE-X logical PCS type (in the associated BGX()_CMR()_CONFIG[LMAC_TYPE] =
+ XAUI or RXAUI), this is a latching-low version of BGX()_SPU()_BX_STATUS[ALIGND].
+
+ For a BASE-R logical PCS type (in the associated BGX()_CMR()_CONFIG[LMAC_TYPE] =
+ 10G_R or 40G_R), this is a latching-low version of
+ BGX()_SPU()_BR_STATUS1[RCV_LNK]. */
+ uint64_t reserved_3_6 : 4;
+ uint64_t flt : 1; /**< [ 7: 7](RO/H) Fault condition detected.
+ This bit is a logical OR of BGX()_SPU()_STATUS2[XMTFLT, RCVFLT]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_status1_s cn; */
+};
+typedef union bdk_bgxx_spux_status1 bdk_bgxx_spux_status1_t;
+
+static inline uint64_t BDK_BGXX_SPUX_STATUS1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_STATUS1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010008ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010008ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010008ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_STATUS1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_STATUS1(a,b) bdk_bgxx_spux_status1_t
+#define bustype_BDK_BGXX_SPUX_STATUS1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_STATUS1(a,b) "BGXX_SPUX_STATUS1"
+#define device_bar_BDK_BGXX_SPUX_STATUS1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_STATUS1(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_STATUS1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu#_status2
+ *
+ * BGX SPU Status 2 Registers
+ */
+union bdk_bgxx_spux_status2
+{
+ uint64_t u;
+ struct bdk_bgxx_spux_status2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t dev : 2; /**< [ 15: 14](RO/H) Device present. Always returns 0x2 to indicate a device is present at this address. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t xmtflt : 1; /**< [ 11: 11](RO/H) Transmit fault. Always returns 0. */
+ uint64_t rcvflt : 1; /**< [ 10: 10](R/W1C/H) Receive fault: 1 = receive fault. 0 = no receive fault. Latching high bit; stays set until
+ a write-1-to-clear by software. */
+ uint64_t reserved_6_9 : 4;
+ uint64_t hundredgb_r : 1; /**< [ 5: 5](RO) 100GBASE-R capable. Always 0. */
+ uint64_t fortygb_r : 1; /**< [ 4: 4](RO/H) 40GBASE-R capable. Always 1. */
+ uint64_t tengb_t : 1; /**< [ 3: 3](RO) 10GBASE-T capable. Always 0. */
+ uint64_t tengb_w : 1; /**< [ 2: 2](RO) 10GBASE-W capable. Always 0. */
+ uint64_t tengb_x : 1; /**< [ 1: 1](RO/H) 10GBASE-X capable. Always 1. */
+ uint64_t tengb_r : 1; /**< [ 0: 0](RO/H) 10GBASE-R capable. Always 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t tengb_r : 1; /**< [ 0: 0](RO/H) 10GBASE-R capable. Always 1. */
+ uint64_t tengb_x : 1; /**< [ 1: 1](RO/H) 10GBASE-X capable. Always 1. */
+ uint64_t tengb_w : 1; /**< [ 2: 2](RO) 10GBASE-W capable. Always 0. */
+ uint64_t tengb_t : 1; /**< [ 3: 3](RO) 10GBASE-T capable. Always 0. */
+ uint64_t fortygb_r : 1; /**< [ 4: 4](RO/H) 40GBASE-R capable. Always 1. */
+ uint64_t hundredgb_r : 1; /**< [ 5: 5](RO) 100GBASE-R capable. Always 0. */
+ uint64_t reserved_6_9 : 4;
+ uint64_t rcvflt : 1; /**< [ 10: 10](R/W1C/H) Receive fault: 1 = receive fault. 0 = no receive fault. Latching high bit; stays set until
+ a write-1-to-clear by software. */
+ uint64_t xmtflt : 1; /**< [ 11: 11](RO/H) Transmit fault. Always returns 0. */
+ uint64_t reserved_12_13 : 2;
+ uint64_t dev : 2; /**< [ 15: 14](RO/H) Device present. Always returns 0x2 to indicate a device is present at this address. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spux_status2_s cn; */
+};
+typedef union bdk_bgxx_spux_status2 bdk_bgxx_spux_status2_t;
+
+static inline uint64_t BDK_BGXX_SPUX_STATUS2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPUX_STATUS2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010020ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010020ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010020ll + 0x1000000ll * ((a) & 0x1) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPUX_STATUS2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPUX_STATUS2(a,b) bdk_bgxx_spux_status2_t
+#define bustype_BDK_BGXX_SPUX_STATUS2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPUX_STATUS2(a,b) "BGXX_SPUX_STATUS2"
+#define device_bar_BDK_BGXX_SPUX_STATUS2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPUX_STATUS2(a,b) (a)
+#define arguments_BDK_BGXX_SPUX_STATUS2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu_bist_status
+ *
+ * BGX SPU BIST Status Registers
+ * This register provides memory BIST status from the SPU receive buffer lane FIFOs.
+ */
+union bdk_bgxx_spu_bist_status
+{
+ uint64_t u;
+ struct bdk_bgxx_spu_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t rx_buf_bist_status : 4; /**< [ 3: 0](RO/H) SPU receive buffer BIST status for lanes 3-0. One bit per SerDes lane, set to indicate
+ BIST
+ failure for the associated receive buffer lane FIFO. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_buf_bist_status : 4; /**< [ 3: 0](RO/H) SPU receive buffer BIST status for lanes 3-0. One bit per SerDes lane, set to indicate
+ BIST
+ failure for the associated receive buffer lane FIFO. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spu_bist_status_s cn; */
+};
+typedef union bdk_bgxx_spu_bist_status bdk_bgxx_spu_bist_status_t;
+
+static inline uint64_t BDK_BGXX_SPU_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPU_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0010330ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0010330ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0010330ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_SPU_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPU_BIST_STATUS(a) bdk_bgxx_spu_bist_status_t
+#define bustype_BDK_BGXX_SPU_BIST_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPU_BIST_STATUS(a) "BGXX_SPU_BIST_STATUS"
+#define device_bar_BDK_BGXX_SPU_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPU_BIST_STATUS(a) (a)
+#define arguments_BDK_BGXX_SPU_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_spu_dbg_control
+ *
+ * BGX SPU Debug Control Registers
+ */
+union bdk_bgxx_spu_dbg_control
+{
+ uint64_t u;
+ struct bdk_bgxx_spu_dbg_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t ms_clk_period : 12; /**< [ 55: 44](R/W) Millisecond clock period. Specifies the number of microsecond clock ticks per millisecond,
+ minus one. The default value of 999 (0x3E7) should be used during normal operation; other
+ values may be used for test/debug purposes. */
+ uint64_t us_clk_period : 12; /**< [ 43: 32](R/W) Microsecond clock period. Specifies the number of SCLK cycles per microseconds, minus one.
+ For example, if SCLK runs at 1.3 GHz, the number of SCLK cycles per microsecond is 1,300
+ so the value of this field should be 1,299 (0x513). This is used by the BASE-R BER monitor
+ timers. */
+ uint64_t reserved_31 : 1;
+ uint64_t br_ber_mon_dis : 1; /**< [ 30: 30](R/W) BASE-R bit error rate monitor disable. This bit should be clear for normal operation.
+ Setting it disables the BASE-R BER monitor state machine defined in 802.3-2008 Figure
+ 49-13 for 10GBASE-R and 802.3ba-2010 Figure 82-13 for 40GBASE-R. */
+ uint64_t an_nonce_match_dis : 1; /**< [ 29: 29](R/W) Autonegotiation nonce match disable. This bit should be clear for normal operation.
+ Setting it disables Nonce Match check by forcing nonce_match variable to 0 in the
+ autonegotiation arbitration state diagram, as defined in 802.3-2008 Figure 73-11. This bit
+ can
+ be set by software for test purposes, e.g. for running autonegotiation in loopback mode. */
+ uint64_t timestamp_norm_dis : 1; /**< [ 28: 28](R/W) 40GBASE-R RX timestamp normalization disable. This bit controls the generation of the
+ receive SOP timestamp passed to the SMU sub-block for a 40GBASE-R LMAC/LPCS. When this bit
+ is clear, SPU normalizes the receive SOP timestamp in order to compensate for lane-to-lane
+ skew on a 40GBASE-R link, as described below. When this bit is set, timestamp
+ normalization is disabled and SPU directly passes the captured SOP timestamp values to
+ SMU.
+
+ In 40GBASE-R mode, a packet's SOP block can be transferred on any of the LMAC's lanes. In
+ the presence of lane-to-lane skew, the SOP delay from transmit (by the link partner) to
+ receive by SPU varies depending on which lane is used by the SOP block. This variation
+ reduces the accuracy of the received SOP timestamp relative to when it was transmitted by
+ the link partner.
+
+ SPU captures the timestamp of the alignment marker received on each SerDes lane during
+ align/skew detection; the captured value can be read from the SerDes lane's
+ BGX()_SPU_SDS()_SKEW_STATUS[SKEW_STATUS] field (BGX_SPU_SDS_SKEW_STATUS_S[AM_TIMESTAMP]
+ sub-field). If
+ alignment markers are transmitted at about the same time on all lanes by the link partner,
+ then the difference between the BGX_SPU_SDS_SKEW_STATUS_S[AM_TIMESTAMP] values for a pair
+ of lanes represents the
+ approximate skew between those lanes.
+
+ SPU uses the 40GBASE-R LMAC's programmed PCS lane 0 as a reference and computes the
+ BGX_SPU_SDS_SKEW_STATUS_S[AM_TIMESTAMP] delta of every other lane relative to PCS lane 0.
+ When normalization is
+ enabled, SPU adjusts the timestamp of a received SOP by subtracting the receiving lane's
+ BGX_SPU_SDS_SKEW_STATUS_S[AM_TIMESTAMP] delta from the captured timestamp value. The
+ adjusted/normalized timestamp
+ value is then passed to SMU along with the SOP.
+
+ Software can determine the actual maximum skew of a 40GBASE-R link by examining the
+ BGX_SPU_SDS_SKEW_STATUS_S[AM_TIMESTAMP] values in the BGX()_SPU_SDS()_SKEW_STATUS
+ registers, and decide if
+ timestamp normalization should be enabled or disabled to improve PTP accuracy.
+ Normalization improves accuracy for larger skew values but reduces the accuracy (due to
+ timestamp measurement errors) for small skew values. */
+ uint64_t rx_buf_flip_synd : 8; /**< [ 27: 20](R/W) Flip SPU receive buffer FIFO ECC bits. Two bits per SerDes lane; used to inject single-bit
+ and
+ double-bit errors into the ECC field on writes to the associated SPU receive buffer lane
+ FIFO, as
+ follows:
+ 0x0 = Normal operation.
+ 0x1 = SBE on ECC bit 0.
+ 0x2 = SBE on ECC bit 1.
+ 0x3 = DBE on ECC bits 1:0. */
+ uint64_t br_pmd_train_soft_en : 1; /**< [ 19: 19](R/W) Enable BASE-R PMD software controlled link training. This bit configures the operation
+ mode for BASE-R link training for all LMACs and lanes. When this bit is set along with
+ BGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] for a given LMAC, the BASE-R link training
+ protocol for that LMAC is executed under software control, whereby the contents the
+ BGX()_SPU()_BR_PMD_LD_CUP and BGX()_SPU()_BR_PMD_LD_REP registers are
+ updated by software. When this bit is clear and
+ BGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is set, the link training protocol is fully
+ automated in hardware, whereby the contents BGX()_SPU()_BR_PMD_LD_CUP and
+ BGX()_SPU()_BR_PMD_LD_REP registers are automatically updated by hardware. */
+ uint64_t an_arb_link_chk_en : 1; /**< [ 18: 18](R/W) Enable link status checking by autonegotiation arbitration state machine. When
+ autonegotiation is enabled (BGX()_SPU()_AN_CONTROL[AN_EN] is set), this bit controls
+ the behavior of the autonegotiation arbitration state machine when it reaches the AN GOOD
+ CHECK state after DME pages are successfully exchanged, as defined in Figure 73-11 in
+ 802.3-2008.
+
+ When this bit is set and the negotiated highest common denominator (HCD) technology
+ matches BGX()_CMR()_CONFIG[LMAC_TYPE], the autonegotiation arbitration SM
+ performs the actions defined for the AN GOOD CHECK state in Figure 73-11, i.e. run the
+ link_fail_inhibit timer and eventually transition to the AN GOOD or TRANSMIT DISABLE
+ state.
+
+ When this bit is clear or the HCD technology does not match BGX()_CMR()_CONFIG[LMAC_TYPE],
+ the AN arbitration
+ SM stays in the AN GOOD CHECK state, with the expectation that software will perform the
+ appropriate actions to complete the autonegotiation protocol, as follows:
+
+ * If this bit is clear and the HCD technology matches BGX()_CMR()_CONFIG[LMAC_TYPE], clear
+ BGX()_SPU()_AN_CONTROL[AN_EN].
+
+ * Otherwise, disable the LPCS by clearing the BGX()_CMR()_CONFIG[ENABLE], clear
+ BGX()_SPU()_AN_CONTROL[AN_EN], reconfigure the LPCS with the correct
+ BGX()_CMR()_CONFIG[LMAC_TYPE],
+ and re-enable the LPCS by setting BGX()_CMR()_CONFIG[ENABLE].
+
+ In both cases, software should implement the link_fail_inhibit timer and verify the link
+ status as specified for the AN GOOD CHECK state. */
+ uint64_t rx_buf_cor_dis : 1; /**< [ 17: 17](R/W) When set, disables ECC correction on all SPU receive buffer FIFOs. */
+ uint64_t scramble_dis : 1; /**< [ 16: 16](R/W) BASE-R scrambler/descrambler disable. Setting this bit to 1 disables the BASE-R scrambler
+ & descrambler functions and FEC PN-2112 scrambler & descrambler functions for debug
+ purposes. */
+ uint64_t reserved_15 : 1;
+ uint64_t marker_rxp : 15; /**< [ 14: 0](R/W) BASE-R alignment marker receive period. For a multilane BASE-R logical PCS (i.e.
+ 40GBASE-R), this field specifies the expected alignment marker receive period per lane,
+ i.e. the expected number of received 66b non-marker blocks between consecutive markers on
+ the same lane. The default value corresponds to a period of 16363 blocks (exclusive) as
+ specified in 802.3ba-2010. Must be greater than 64. */
+#else /* Word 0 - Little Endian */
+ uint64_t marker_rxp : 15; /**< [ 14: 0](R/W) BASE-R alignment marker receive period. For a multilane BASE-R logical PCS (i.e.
+ 40GBASE-R), this field specifies the expected alignment marker receive period per lane,
+ i.e. the expected number of received 66b non-marker blocks between consecutive markers on
+ the same lane. The default value corresponds to a period of 16363 blocks (exclusive) as
+ specified in 802.3ba-2010. Must be greater than 64. */
+ uint64_t reserved_15 : 1;
+ uint64_t scramble_dis : 1; /**< [ 16: 16](R/W) BASE-R scrambler/descrambler disable. Setting this bit to 1 disables the BASE-R scrambler
+ & descrambler functions and FEC PN-2112 scrambler & descrambler functions for debug
+ purposes. */
+ uint64_t rx_buf_cor_dis : 1; /**< [ 17: 17](R/W) When set, disables ECC correction on all SPU receive buffer FIFOs. */
+ uint64_t an_arb_link_chk_en : 1; /**< [ 18: 18](R/W) Enable link status checking by autonegotiation arbitration state machine. When
+ autonegotiation is enabled (BGX()_SPU()_AN_CONTROL[AN_EN] is set), this bit controls
+ the behavior of the autonegotiation arbitration state machine when it reaches the AN GOOD
+ CHECK state after DME pages are successfully exchanged, as defined in Figure 73-11 in
+ 802.3-2008.
+
+ When this bit is set and the negotiated highest common denominator (HCD) technology
+ matches BGX()_CMR()_CONFIG[LMAC_TYPE], the autonegotiation arbitration SM
+ performs the actions defined for the AN GOOD CHECK state in Figure 73-11, i.e. run the
+ link_fail_inhibit timer and eventually transition to the AN GOOD or TRANSMIT DISABLE
+ state.
+
+ When this bit is clear or the HCD technology does not match BGX()_CMR()_CONFIG[LMAC_TYPE],
+ the AN arbitration
+ SM stays in the AN GOOD CHECK state, with the expectation that software will perform the
+ appropriate actions to complete the autonegotiation protocol, as follows:
+
+ * If this bit is clear and the HCD technology matches BGX()_CMR()_CONFIG[LMAC_TYPE], clear
+ BGX()_SPU()_AN_CONTROL[AN_EN].
+
+ * Otherwise, disable the LPCS by clearing the BGX()_CMR()_CONFIG[ENABLE], clear
+ BGX()_SPU()_AN_CONTROL[AN_EN], reconfigure the LPCS with the correct
+ BGX()_CMR()_CONFIG[LMAC_TYPE],
+ and re-enable the LPCS by setting BGX()_CMR()_CONFIG[ENABLE].
+
+ In both cases, software should implement the link_fail_inhibit timer and verify the link
+ status as specified for the AN GOOD CHECK state. */
+ uint64_t br_pmd_train_soft_en : 1; /**< [ 19: 19](R/W) Enable BASE-R PMD software controlled link training. This bit configures the operation
+ mode for BASE-R link training for all LMACs and lanes. When this bit is set along with
+ BGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] for a given LMAC, the BASE-R link training
+ protocol for that LMAC is executed under software control, whereby the contents the
+ BGX()_SPU()_BR_PMD_LD_CUP and BGX()_SPU()_BR_PMD_LD_REP registers are
+ updated by software. When this bit is clear and
+ BGX()_SPU()_BR_PMD_CONTROL[TRAIN_EN] is set, the link training protocol is fully
+ automated in hardware, whereby the contents BGX()_SPU()_BR_PMD_LD_CUP and
+ BGX()_SPU()_BR_PMD_LD_REP registers are automatically updated by hardware. */
+ uint64_t rx_buf_flip_synd : 8; /**< [ 27: 20](R/W) Flip SPU receive buffer FIFO ECC bits. Two bits per SerDes lane; used to inject single-bit
+ and
+ double-bit errors into the ECC field on writes to the associated SPU receive buffer lane
+ FIFO, as
+ follows:
+ 0x0 = Normal operation.
+ 0x1 = SBE on ECC bit 0.
+ 0x2 = SBE on ECC bit 1.
+ 0x3 = DBE on ECC bits 1:0. */
+ uint64_t timestamp_norm_dis : 1; /**< [ 28: 28](R/W) 40GBASE-R RX timestamp normalization disable. This bit controls the generation of the
+ receive SOP timestamp passed to the SMU sub-block for a 40GBASE-R LMAC/LPCS. When this bit
+ is clear, SPU normalizes the receive SOP timestamp in order to compensate for lane-to-lane
+ skew on a 40GBASE-R link, as described below. When this bit is set, timestamp
+ normalization is disabled and SPU directly passes the captured SOP timestamp values to
+ SMU.
+
+ In 40GBASE-R mode, a packet's SOP block can be transferred on any of the LMAC's lanes. In
+ the presence of lane-to-lane skew, the SOP delay from transmit (by the link partner) to
+ receive by SPU varies depending on which lane is used by the SOP block. This variation
+ reduces the accuracy of the received SOP timestamp relative to when it was transmitted by
+ the link partner.
+
+ SPU captures the timestamp of the alignment marker received on each SerDes lane during
+ align/skew detection; the captured value can be read from the SerDes lane's
+ BGX()_SPU_SDS()_SKEW_STATUS[SKEW_STATUS] field (BGX_SPU_SDS_SKEW_STATUS_S[AM_TIMESTAMP]
+ sub-field). If
+ alignment markers are transmitted at about the same time on all lanes by the link partner,
+ then the difference between the BGX_SPU_SDS_SKEW_STATUS_S[AM_TIMESTAMP] values for a pair
+ of lanes represents the
+ approximate skew between those lanes.
+
+ SPU uses the 40GBASE-R LMAC's programmed PCS lane 0 as a reference and computes the
+ BGX_SPU_SDS_SKEW_STATUS_S[AM_TIMESTAMP] delta of every other lane relative to PCS lane 0.
+ When normalization is
+ enabled, SPU adjusts the timestamp of a received SOP by subtracting the receiving lane's
+ BGX_SPU_SDS_SKEW_STATUS_S[AM_TIMESTAMP] delta from the captured timestamp value. The
+ adjusted/normalized timestamp
+ value is then passed to SMU along with the SOP.
+
+ Software can determine the actual maximum skew of a 40GBASE-R link by examining the
+ BGX_SPU_SDS_SKEW_STATUS_S[AM_TIMESTAMP] values in the BGX()_SPU_SDS()_SKEW_STATUS
+ registers, and decide if
+ timestamp normalization should be enabled or disabled to improve PTP accuracy.
+ Normalization improves accuracy for larger skew values but reduces the accuracy (due to
+ timestamp measurement errors) for small skew values. */
+ uint64_t an_nonce_match_dis : 1; /**< [ 29: 29](R/W) Autonegotiation nonce match disable. This bit should be clear for normal operation.
+ Setting it disables Nonce Match check by forcing nonce_match variable to 0 in the
+ autonegotiation arbitration state diagram, as defined in 802.3-2008 Figure 73-11. This bit
+ can
+ be set by software for test purposes, e.g. for running autonegotiation in loopback mode. */
+ uint64_t br_ber_mon_dis : 1; /**< [ 30: 30](R/W) BASE-R bit error rate monitor disable. This bit should be clear for normal operation.
+ Setting it disables the BASE-R BER monitor state machine defined in 802.3-2008 Figure
+ 49-13 for 10GBASE-R and 802.3ba-2010 Figure 82-13 for 40GBASE-R. */
+ uint64_t reserved_31 : 1;
+ uint64_t us_clk_period : 12; /**< [ 43: 32](R/W) Microsecond clock period. Specifies the number of SCLK cycles per microseconds, minus one.
+ For example, if SCLK runs at 1.3 GHz, the number of SCLK cycles per microsecond is 1,300
+ so the value of this field should be 1,299 (0x513). This is used by the BASE-R BER monitor
+ timers. */
+ uint64_t ms_clk_period : 12; /**< [ 55: 44](R/W) Millisecond clock period. Specifies the number of microsecond clock ticks per millisecond,
+ minus one. The default value of 999 (0x3E7) should be used during normal operation; other
+ values may be used for test/debug purposes. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spu_dbg_control_s cn; */
+};
+typedef union bdk_bgxx_spu_dbg_control bdk_bgxx_spu_dbg_control_t;
+
+static inline uint64_t BDK_BGXX_SPU_DBG_CONTROL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPU_DBG_CONTROL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0010300ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0010300ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0010300ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_SPU_DBG_CONTROL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPU_DBG_CONTROL(a) bdk_bgxx_spu_dbg_control_t
+#define bustype_BDK_BGXX_SPU_DBG_CONTROL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPU_DBG_CONTROL(a) "BGXX_SPU_DBG_CONTROL"
+#define device_bar_BDK_BGXX_SPU_DBG_CONTROL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPU_DBG_CONTROL(a) (a)
+#define arguments_BDK_BGXX_SPU_DBG_CONTROL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_spu_mem_int
+ *
+ * BGX SPU Memory Interrupt Registers
+ */
+union bdk_bgxx_spu_mem_int
+{
+ uint64_t u;
+ struct bdk_bgxx_spu_mem_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1C/H) SPU receive buffer single-bit error for lanes 3-0. One bit per physical SerDes lane. Each
+ bit is set when the associated receive buffer lane FIFO detects a single-bit ECC error. */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1C/H) SPU receive buffer double-bit error for lanes 3-0. One bit per physical SerDes lane. Each
+ bit is set when the associated receive buffer lane FIFO detects a double-bit ECC error. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1C/H) SPU receive buffer double-bit error for lanes 3-0. One bit per physical SerDes lane. Each
+ bit is set when the associated receive buffer lane FIFO detects a double-bit ECC error. */
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1C/H) SPU receive buffer single-bit error for lanes 3-0. One bit per physical SerDes lane. Each
+ bit is set when the associated receive buffer lane FIFO detects a single-bit ECC error. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spu_mem_int_s cn; */
+};
+typedef union bdk_bgxx_spu_mem_int bdk_bgxx_spu_mem_int_t;
+
+static inline uint64_t BDK_BGXX_SPU_MEM_INT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPU_MEM_INT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0010310ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0010310ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0010310ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_SPU_MEM_INT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPU_MEM_INT(a) bdk_bgxx_spu_mem_int_t
+#define bustype_BDK_BGXX_SPU_MEM_INT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPU_MEM_INT(a) "BGXX_SPU_MEM_INT"
+#define device_bar_BDK_BGXX_SPU_MEM_INT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPU_MEM_INT(a) (a)
+#define arguments_BDK_BGXX_SPU_MEM_INT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_spu_mem_int_ena_w1c
+ *
+ * BGX SPU Memory Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_bgxx_spu_mem_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_bgxx_spu_mem_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU_MEM_INT[RX_BUF_DBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU_MEM_INT[RX_BUF_DBE]. */
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1C/H) Reads or clears enable for BGX(0..1)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spu_mem_int_ena_w1c_s cn81xx; */
+ /* struct bdk_bgxx_spu_mem_int_ena_w1c_s cn88xx; */
+ struct bdk_bgxx_spu_mem_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU_MEM_INT[RX_BUF_DBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU_MEM_INT[RX_BUF_DBE]. */
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1C/H) Reads or clears enable for BGX(0..3)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_spu_mem_int_ena_w1c bdk_bgxx_spu_mem_int_ena_w1c_t;
+
+static inline uint64_t BDK_BGXX_SPU_MEM_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPU_MEM_INT_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0010320ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0010320ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0010320ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_SPU_MEM_INT_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPU_MEM_INT_ENA_W1C(a) bdk_bgxx_spu_mem_int_ena_w1c_t
+#define bustype_BDK_BGXX_SPU_MEM_INT_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPU_MEM_INT_ENA_W1C(a) "BGXX_SPU_MEM_INT_ENA_W1C"
+#define device_bar_BDK_BGXX_SPU_MEM_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPU_MEM_INT_ENA_W1C(a) (a)
+#define arguments_BDK_BGXX_SPU_MEM_INT_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_spu_mem_int_ena_w1s
+ *
+ * BGX SPU Memory Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_bgxx_spu_mem_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_spu_mem_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU_MEM_INT[RX_BUF_DBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU_MEM_INT[RX_BUF_DBE]. */
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1S/H) Reads or sets enable for BGX(0..1)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spu_mem_int_ena_w1s_s cn81xx; */
+ /* struct bdk_bgxx_spu_mem_int_ena_w1s_s cn88xx; */
+ struct bdk_bgxx_spu_mem_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU_MEM_INT[RX_BUF_DBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU_MEM_INT[RX_BUF_DBE]. */
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1S/H) Reads or sets enable for BGX(0..3)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_spu_mem_int_ena_w1s bdk_bgxx_spu_mem_int_ena_w1s_t;
+
+static inline uint64_t BDK_BGXX_SPU_MEM_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPU_MEM_INT_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0010328ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0010328ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0010328ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_SPU_MEM_INT_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPU_MEM_INT_ENA_W1S(a) bdk_bgxx_spu_mem_int_ena_w1s_t
+#define bustype_BDK_BGXX_SPU_MEM_INT_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPU_MEM_INT_ENA_W1S(a) "BGXX_SPU_MEM_INT_ENA_W1S"
+#define device_bar_BDK_BGXX_SPU_MEM_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPU_MEM_INT_ENA_W1S(a) (a)
+#define arguments_BDK_BGXX_SPU_MEM_INT_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_spu_mem_int_w1s
+ *
+ * BGX SPU Memory Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_bgxx_spu_mem_int_w1s
+{
+ uint64_t u;
+ struct bdk_bgxx_spu_mem_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1S/H) Reads or sets BGX(0..1)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1S/H) Reads or sets BGX(0..1)_SPU_MEM_INT[RX_BUF_DBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1S/H) Reads or sets BGX(0..1)_SPU_MEM_INT[RX_BUF_DBE]. */
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1S/H) Reads or sets BGX(0..1)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spu_mem_int_w1s_s cn81xx; */
+ /* struct bdk_bgxx_spu_mem_int_w1s_s cn88xx; */
+ struct bdk_bgxx_spu_mem_int_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1S/H) Reads or sets BGX(0..3)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1S/H) Reads or sets BGX(0..3)_SPU_MEM_INT[RX_BUF_DBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_buf_dbe : 4; /**< [ 3: 0](R/W1S/H) Reads or sets BGX(0..3)_SPU_MEM_INT[RX_BUF_DBE]. */
+ uint64_t rx_buf_sbe : 4; /**< [ 7: 4](R/W1S/H) Reads or sets BGX(0..3)_SPU_MEM_INT[RX_BUF_SBE]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_bgxx_spu_mem_int_w1s bdk_bgxx_spu_mem_int_w1s_t;
+
+static inline uint64_t BDK_BGXX_SPU_MEM_INT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPU_MEM_INT_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0010318ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0010318ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0010318ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_SPU_MEM_INT_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPU_MEM_INT_W1S(a) bdk_bgxx_spu_mem_int_w1s_t
+#define bustype_BDK_BGXX_SPU_MEM_INT_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPU_MEM_INT_W1S(a) "BGXX_SPU_MEM_INT_W1S"
+#define device_bar_BDK_BGXX_SPU_MEM_INT_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPU_MEM_INT_W1S(a) (a)
+#define arguments_BDK_BGXX_SPU_MEM_INT_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_spu_mem_status
+ *
+ * BGX SPU Memory Status Registers
+ * This register provides memory ECC status from the SPU receive buffer lane FIFOs.
+ */
+union bdk_bgxx_spu_mem_status
+{
+ uint64_t u;
+ struct bdk_bgxx_spu_mem_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t rx_buf_ecc_synd : 32; /**< [ 31: 0](RO/H) SPU receive buffer ECC syndromes for lanes 3-0. 8-bit syndrome sub-field per SerDes lane.
+ Each
+ 8-bit sub-field contains the syndrome of the latest single-bit or double-bit ECC error
+ detected by the associated receive buffer lane FIFO, i.e. it is loaded when the
+ corresponding
+ BGX()_SPU_MEM_INT[RX_BUF_SBE] or BGX()_SPU_MEM_INT[RX_BUF_DBE] bit is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_buf_ecc_synd : 32; /**< [ 31: 0](RO/H) SPU receive buffer ECC syndromes for lanes 3-0. 8-bit syndrome sub-field per SerDes lane.
+ Each
+ 8-bit sub-field contains the syndrome of the latest single-bit or double-bit ECC error
+ detected by the associated receive buffer lane FIFO, i.e. it is loaded when the
+ corresponding
+ BGX()_SPU_MEM_INT[RX_BUF_SBE] or BGX()_SPU_MEM_INT[RX_BUF_DBE] bit is set. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spu_mem_status_s cn; */
+};
+typedef union bdk_bgxx_spu_mem_status bdk_bgxx_spu_mem_status_t;
+
+static inline uint64_t BDK_BGXX_SPU_MEM_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPU_MEM_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0e0010308ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0e0010308ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0e0010308ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("BGXX_SPU_MEM_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPU_MEM_STATUS(a) bdk_bgxx_spu_mem_status_t
+#define bustype_BDK_BGXX_SPU_MEM_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPU_MEM_STATUS(a) "BGXX_SPU_MEM_STATUS"
+#define device_bar_BDK_BGXX_SPU_MEM_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPU_MEM_STATUS(a) (a)
+#define arguments_BDK_BGXX_SPU_MEM_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) bgx#_spu_sds#_skew_status
+ *
+ * BGX SPU SerDes Lane Skew Status Registers
+ * This register provides SerDes lane skew status. One register per physical SerDes lane.
+ */
+union bdk_bgxx_spu_sdsx_skew_status
+{
+ uint64_t u;
+ struct bdk_bgxx_spu_sdsx_skew_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t skew_status : 32; /**< [ 31: 0](RO/H) Format defined by BGX_SPU_SDS_SKEW_STATUS_S. */
+#else /* Word 0 - Little Endian */
+ uint64_t skew_status : 32; /**< [ 31: 0](RO/H) Format defined by BGX_SPU_SDS_SKEW_STATUS_S. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spu_sdsx_skew_status_s cn; */
+};
+typedef union bdk_bgxx_spu_sdsx_skew_status bdk_bgxx_spu_sdsx_skew_status_t;
+
+static inline uint64_t BDK_BGXX_SPU_SDSX_SKEW_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPU_SDSX_SKEW_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010340ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010340ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010340ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPU_SDSX_SKEW_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPU_SDSX_SKEW_STATUS(a,b) bdk_bgxx_spu_sdsx_skew_status_t
+#define bustype_BDK_BGXX_SPU_SDSX_SKEW_STATUS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPU_SDSX_SKEW_STATUS(a,b) "BGXX_SPU_SDSX_SKEW_STATUS"
+#define device_bar_BDK_BGXX_SPU_SDSX_SKEW_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPU_SDSX_SKEW_STATUS(a,b) (a)
+#define arguments_BDK_BGXX_SPU_SDSX_SKEW_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) bgx#_spu_sds#_states
+ *
+ * BGX SPU SerDes States Registers
+ * This register provides SerDes lane states. One register per physical SerDes lane.
+ */
+union bdk_bgxx_spu_sdsx_states
+{
+ uint64_t u;
+ struct bdk_bgxx_spu_sdsx_states_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t am_lock_invld_cnt : 2; /**< [ 51: 50](RO/H) 40GBASE-R alignment marker lock state machine invalid AM counter. */
+ uint64_t am_lock_sm : 2; /**< [ 49: 48](RO/H) 40GBASE-R alignment marker lock state machine state. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t train_sm : 3; /**< [ 44: 42](RO/H) Link training state machine state. */
+ uint64_t train_code_viol : 1; /**< [ 41: 41](RO/H) Link training code violation in received control channel. */
+ uint64_t train_frame_lock : 1; /**< [ 40: 40](RO/H) Link training frame lock status. */
+ uint64_t train_lock_found_1st_marker : 1;/**< [ 39: 39](RO/H) Link training lock state machine found first marker flag. */
+ uint64_t train_lock_bad_markers : 3; /**< [ 38: 36](RO/H) Link training lock state machine bad markers counter. */
+ uint64_t reserved_35 : 1;
+ uint64_t an_arb_sm : 3; /**< [ 34: 32](RO/H) Autonegotiation arbitration state machine state. */
+ uint64_t an_rx_sm : 2; /**< [ 31: 30](RO/H) Autonegotiation receive state machine state. */
+ uint64_t reserved_29 : 1;
+ uint64_t fec_block_sync : 1; /**< [ 28: 28](RO/H) FEC block sync status. */
+ uint64_t fec_sync_cnt : 4; /**< [ 27: 24](RO/H) FEC block sync state machine good/bad parity block counter. */
+ uint64_t reserved_23 : 1;
+ uint64_t br_sh_invld_cnt : 7; /**< [ 22: 16](RO/H) BASE-R lock state machine invalid sync header counter. */
+ uint64_t br_block_lock : 1; /**< [ 15: 15](RO/H) BASE-R block lock status. */
+ uint64_t br_sh_cnt : 11; /**< [ 14: 4](RO/H) BASE-R lock state machine sync header counter */
+ uint64_t bx_sync_sm : 4; /**< [ 3: 0](RO/H) BASE-X PCS synchronization state machine state */
+#else /* Word 0 - Little Endian */
+ uint64_t bx_sync_sm : 4; /**< [ 3: 0](RO/H) BASE-X PCS synchronization state machine state */
+ uint64_t br_sh_cnt : 11; /**< [ 14: 4](RO/H) BASE-R lock state machine sync header counter */
+ uint64_t br_block_lock : 1; /**< [ 15: 15](RO/H) BASE-R block lock status. */
+ uint64_t br_sh_invld_cnt : 7; /**< [ 22: 16](RO/H) BASE-R lock state machine invalid sync header counter. */
+ uint64_t reserved_23 : 1;
+ uint64_t fec_sync_cnt : 4; /**< [ 27: 24](RO/H) FEC block sync state machine good/bad parity block counter. */
+ uint64_t fec_block_sync : 1; /**< [ 28: 28](RO/H) FEC block sync status. */
+ uint64_t reserved_29 : 1;
+ uint64_t an_rx_sm : 2; /**< [ 31: 30](RO/H) Autonegotiation receive state machine state. */
+ uint64_t an_arb_sm : 3; /**< [ 34: 32](RO/H) Autonegotiation arbitration state machine state. */
+ uint64_t reserved_35 : 1;
+ uint64_t train_lock_bad_markers : 3; /**< [ 38: 36](RO/H) Link training lock state machine bad markers counter. */
+ uint64_t train_lock_found_1st_marker : 1;/**< [ 39: 39](RO/H) Link training lock state machine found first marker flag. */
+ uint64_t train_frame_lock : 1; /**< [ 40: 40](RO/H) Link training frame lock status. */
+ uint64_t train_code_viol : 1; /**< [ 41: 41](RO/H) Link training code violation in received control channel. */
+ uint64_t train_sm : 3; /**< [ 44: 42](RO/H) Link training state machine state. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t am_lock_sm : 2; /**< [ 49: 48](RO/H) 40GBASE-R alignment marker lock state machine state. */
+ uint64_t am_lock_invld_cnt : 2; /**< [ 51: 50](RO/H) 40GBASE-R alignment marker lock state machine invalid AM counter. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_bgxx_spu_sdsx_states_s cn; */
+};
+typedef union bdk_bgxx_spu_sdsx_states bdk_bgxx_spu_sdsx_states_t;
+
+static inline uint64_t BDK_BGXX_SPU_SDSX_STATES(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_BGXX_SPU_SDSX_STATES(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010360ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=3)))
+ return 0x87e0e0010360ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0e0010360ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x3);
+ __bdk_csr_fatal("BGXX_SPU_SDSX_STATES", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_BGXX_SPU_SDSX_STATES(a,b) bdk_bgxx_spu_sdsx_states_t
+#define bustype_BDK_BGXX_SPU_SDSX_STATES(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_BGXX_SPU_SDSX_STATES(a,b) "BGXX_SPU_SDSX_STATES"
+#define device_bar_BDK_BGXX_SPU_SDSX_STATES(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_BGXX_SPU_SDSX_STATES(a,b) (a)
+#define arguments_BDK_BGXX_SPU_SDSX_STATES(a,b) (a),(b),-1,-1
+
+#endif /* __BDK_CSRS_BGX_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-dap.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-dap.h
new file mode 100644
index 0000000000..99e0574021
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-dap.h
@@ -0,0 +1,1010 @@
+#ifndef __BDK_CSRS_DAP_H__
+#define __BDK_CSRS_DAP_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium DAP.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration dap_bar_e
+ *
+ * DAP Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_DAP_BAR_E_DAP_PF_BAR0 (0x87e002000000ll)
+#define BDK_DAP_BAR_E_DAP_PF_BAR0_SIZE 0x100000ull
+#define BDK_DAP_BAR_E_DAP_PF_BAR2_CN8 (0x87a080000000ll)
+#define BDK_DAP_BAR_E_DAP_PF_BAR2_CN8_SIZE 0x2000000ull
+#define BDK_DAP_BAR_E_DAP_PF_BAR2_CN9 (0x87a000000000ll)
+#define BDK_DAP_BAR_E_DAP_PF_BAR2_CN9_SIZE 0x100000000ull
+
+/**
+ * Register (RSL) dap_const
+ *
+ * DAP Constant Register
+ * This register contains constant for software discovery.
+ */
+union bdk_dap_const
+{
+ uint64_t u;
+ struct bdk_dap_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dap_const_s cn; */
+};
+typedef union bdk_dap_const bdk_dap_const_t;
+
+#define BDK_DAP_CONST BDK_DAP_CONST_FUNC()
+static inline uint64_t BDK_DAP_CONST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DAP_CONST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e002000130ll;
+ __bdk_csr_fatal("DAP_CONST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DAP_CONST bdk_dap_const_t
+#define bustype_BDK_DAP_CONST BDK_CSR_TYPE_RSL
+#define basename_BDK_DAP_CONST "DAP_CONST"
+#define device_bar_BDK_DAP_CONST 0x0 /* PF_BAR0 */
+#define busnum_BDK_DAP_CONST 0
+#define arguments_BDK_DAP_CONST -1,-1,-1,-1
+
+/**
+ * Register (RSL) dap_eco
+ *
+ * INTERNAL: DAP ECO Register
+ */
+union bdk_dap_eco
+{
+ uint64_t u;
+ struct bdk_dap_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t eco_ro : 16; /**< [ 31: 16](RO) Reserved for ECO usage. */
+ uint64_t eco_rw : 16; /**< [ 15: 0](R/W) Reserved for ECO usage. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw : 16; /**< [ 15: 0](R/W) Reserved for ECO usage. */
+ uint64_t eco_ro : 16; /**< [ 31: 16](RO) Reserved for ECO usage. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dap_eco_s cn; */
+};
+typedef union bdk_dap_eco bdk_dap_eco_t;
+
+#define BDK_DAP_ECO BDK_DAP_ECO_FUNC()
+static inline uint64_t BDK_DAP_ECO_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DAP_ECO_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e002000120ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e002000120ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x87e002000120ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e002000120ll;
+ __bdk_csr_fatal("DAP_ECO", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DAP_ECO bdk_dap_eco_t
+#define bustype_BDK_DAP_ECO BDK_CSR_TYPE_RSL
+#define basename_BDK_DAP_ECO "DAP_ECO"
+#define device_bar_BDK_DAP_ECO 0x0 /* PF_BAR0 */
+#define busnum_BDK_DAP_ECO 0
+#define arguments_BDK_DAP_ECO -1,-1,-1,-1
+
+/**
+ * Register (RSL32b) dap_hwpoll_cnt
+ *
+ * DAP Hardware Poll Counter Register
+ */
+union bdk_dap_hwpoll_cnt
+{
+ uint32_t u;
+ struct bdk_dap_hwpoll_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t poll_dis : 1; /**< [ 31: 31](R/W) Disable hardware polling. For diagnostic use only. */
+ uint32_t reserved_16_30 : 15;
+ uint32_t count : 16; /**< [ 15: 0](R/W) Number of coprocessor-clocks between DAP bus poll intervals.
+ With the approximate transaction delay of 256 cycles, the default
+ results in a poll approximately every 2048 cycles.
+ Must not be zero. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint32_t count : 16; /**< [ 15: 0](R/W) Number of coprocessor-clocks between DAP bus poll intervals.
+ With the approximate transaction delay of 256 cycles, the default
+ results in a poll approximately every 2048 cycles.
+ Must not be zero. For diagnostic use only. */
+ uint32_t reserved_16_30 : 15;
+ uint32_t poll_dis : 1; /**< [ 31: 31](R/W) Disable hardware polling. For diagnostic use only. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_dap_hwpoll_cnt_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t reserved_16_30 : 15;
+ uint32_t count : 16; /**< [ 15: 0](R/W) Number of coprocessor-clocks between DAP bus poll intervals.
+ With the approximate transaction delay of 256 cycles, the default
+ results in a poll approximately every 2048 cycles.
+ Must not be zero. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint32_t count : 16; /**< [ 15: 0](R/W) Number of coprocessor-clocks between DAP bus poll intervals.
+ With the approximate transaction delay of 256 cycles, the default
+ results in a poll approximately every 2048 cycles.
+ Must not be zero. For diagnostic use only. */
+ uint32_t reserved_16_30 : 15;
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_dap_hwpoll_cnt_s cn9; */
+ /* struct bdk_dap_hwpoll_cnt_s cn81xx; */
+ /* struct bdk_dap_hwpoll_cnt_s cn83xx; */
+ /* struct bdk_dap_hwpoll_cnt_s cn88xxp2; */
+};
+typedef union bdk_dap_hwpoll_cnt bdk_dap_hwpoll_cnt_t;
+
+#define BDK_DAP_HWPOLL_CNT BDK_DAP_HWPOLL_CNT_FUNC()
+static inline uint64_t BDK_DAP_HWPOLL_CNT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DAP_HWPOLL_CNT_FUNC(void)
+{
+ return 0x87e002000114ll;
+}
+
+#define typedef_BDK_DAP_HWPOLL_CNT bdk_dap_hwpoll_cnt_t
+#define bustype_BDK_DAP_HWPOLL_CNT BDK_CSR_TYPE_RSL32b
+#define basename_BDK_DAP_HWPOLL_CNT "DAP_HWPOLL_CNT"
+#define device_bar_BDK_DAP_HWPOLL_CNT 0x0 /* PF_BAR0 */
+#define busnum_BDK_DAP_HWPOLL_CNT 0
+#define arguments_BDK_DAP_HWPOLL_CNT -1,-1,-1,-1
+
+/**
+ * Register (RSL32b) dap_imp_dar
+ *
+ * DAP Debug Authentication Register
+ * This register controls the device enables and secure/nonsecure access permissions.
+ */
+union bdk_dap_imp_dar
+{
+ uint32_t u;
+ struct bdk_dap_imp_dar_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t distracefeature : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ Passed to trace unit, but not presently used.
+ 0 = Future trace feature enabled.
+ 1 = Future trace feature disabled. */
+ uint32_t distrace : 1; /**< [ 28: 28](R/W) Disable trace unit discovery.
+ 0 = Trace unit is discoverable by software.
+ 1 = Trace unit is hidden. */
+ uint32_t reserved_11_27 : 17;
+ uint32_t cabnsen : 1; /**< [ 10: 10](R/W) Enable nonsecure CAB accesses from NCB and RSL devices.
+ 0 = Return fault on nonsecure CAB accesses.
+ 1 = Enable nonsecure CAB accesses. */
+ uint32_t caben : 1; /**< [ 9: 9](R/W) Enable CAB accesses from NCB and RSL devices.
+ 0 = Return fault for all CAB accesses.
+ 1 = Enable all CAB accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t reserved_6_8 : 3;
+ uint32_t deviceen : 1; /**< [ 5: 5](R/W) Set this bit to use CVM-AP inside DAP for CNXXXX addressing accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t dabdeviceen : 1; /**< [ 4: 4](R/W) Set this bit to use ARM-AP inside DAP for DAB serial bus accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spniden : 1; /**< [ 3: 3](R/W) Set this bit to enable secure non invasive debug enable.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t spiden : 1; /**< [ 2: 2](R/W) Set this bit to enable secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t niden : 1; /**< [ 1: 1](R/W) Set this bit to enable non secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t dbgen : 1; /**< [ 0: 0](R/W) Set this bit to enable debug enable.
+
+ When in trusted-mode resets to 0, else 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbgen : 1; /**< [ 0: 0](R/W) Set this bit to enable debug enable.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t niden : 1; /**< [ 1: 1](R/W) Set this bit to enable non secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spiden : 1; /**< [ 2: 2](R/W) Set this bit to enable secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spniden : 1; /**< [ 3: 3](R/W) Set this bit to enable secure non invasive debug enable.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t dabdeviceen : 1; /**< [ 4: 4](R/W) Set this bit to use ARM-AP inside DAP for DAB serial bus accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t deviceen : 1; /**< [ 5: 5](R/W) Set this bit to use CVM-AP inside DAP for CNXXXX addressing accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t reserved_6_8 : 3;
+ uint32_t caben : 1; /**< [ 9: 9](R/W) Enable CAB accesses from NCB and RSL devices.
+ 0 = Return fault for all CAB accesses.
+ 1 = Enable all CAB accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t cabnsen : 1; /**< [ 10: 10](R/W) Enable nonsecure CAB accesses from NCB and RSL devices.
+ 0 = Return fault on nonsecure CAB accesses.
+ 1 = Enable nonsecure CAB accesses. */
+ uint32_t reserved_11_27 : 17;
+ uint32_t distrace : 1; /**< [ 28: 28](R/W) Disable trace unit discovery.
+ 0 = Trace unit is discoverable by software.
+ 1 = Trace unit is hidden. */
+ uint32_t distracefeature : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ Passed to trace unit, but not presently used.
+ 0 = Future trace feature enabled.
+ 1 = Future trace feature disabled. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_dap_imp_dar_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_11_31 : 21;
+ uint32_t cabnsen : 1; /**< [ 10: 10](R/W) Enable nonsecure CAB accesses from NCB and RSL devices.
+ 0 = Return fault on nonsecure CAB accesses.
+ 1 = Enable nonsecure CAB accesses. */
+ uint32_t caben : 1; /**< [ 9: 9](R/W) Enable CAB accesses from NCB and RSL devices.
+ 0 = Return fault for all CAB accesses.
+ 1 = Enable all CAB accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t reserved_6_8 : 3;
+ uint32_t deviceen : 1; /**< [ 5: 5](R/W) Set this bit to use CVM-AP inside DAP for CNXXXX addressing accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t dabdeviceen : 1; /**< [ 4: 4](R/W) Set this bit to use ARM-AP inside DAP for DAB serial bus accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spniden : 1; /**< [ 3: 3](R/W) Set this bit to enable secure non invasive debug enable.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t spiden : 1; /**< [ 2: 2](R/W) Set this bit to enable secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t niden : 1; /**< [ 1: 1](R/W) Set this bit to enable non secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t dbgen : 1; /**< [ 0: 0](R/W) Set this bit to enable debug enable.
+
+ When in trusted-mode resets to 0, else 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbgen : 1; /**< [ 0: 0](R/W) Set this bit to enable debug enable.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t niden : 1; /**< [ 1: 1](R/W) Set this bit to enable non secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spiden : 1; /**< [ 2: 2](R/W) Set this bit to enable secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spniden : 1; /**< [ 3: 3](R/W) Set this bit to enable secure non invasive debug enable.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t dabdeviceen : 1; /**< [ 4: 4](R/W) Set this bit to use ARM-AP inside DAP for DAB serial bus accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t deviceen : 1; /**< [ 5: 5](R/W) Set this bit to use CVM-AP inside DAP for CNXXXX addressing accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t reserved_6_8 : 3;
+ uint32_t caben : 1; /**< [ 9: 9](R/W) Enable CAB accesses from NCB and RSL devices.
+ 0 = Return fault for all CAB accesses.
+ 1 = Enable all CAB accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t cabnsen : 1; /**< [ 10: 10](R/W) Enable nonsecure CAB accesses from NCB and RSL devices.
+ 0 = Return fault on nonsecure CAB accesses.
+ 1 = Enable nonsecure CAB accesses. */
+ uint32_t reserved_11_31 : 21;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_dap_imp_dar_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t distracefeature : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ Passed to trace unit, but not presently used.
+ 0 = Future trace feature enabled.
+ 1 = Future trace feature disabled. */
+ uint32_t distrace : 1; /**< [ 28: 28](RO) Trace unit is always discoverable in CNXXXX.
+ 0 = Trace unit is discoverable by software.
+ 1 = Trace unit is hidden.
+
+ In CNXXXX, always discoverable. */
+ uint32_t reserved_11_27 : 17;
+ uint32_t cabnsen : 1; /**< [ 10: 10](R/W) Enable nonsecure CAB accesses from NCB and RSL devices.
+ 0 = Return fault on nonsecure CAB accesses.
+ 1 = Enable nonsecure CAB accesses. */
+ uint32_t caben : 1; /**< [ 9: 9](R/W) Enable CAB accesses from NCB and RSL devices.
+ 0 = Return fault for all CAB accesses.
+ 1 = Enable all CAB accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t reserved_6_8 : 3;
+ uint32_t deviceen : 1; /**< [ 5: 5](R/W) Set this bit to use CVM-AP inside DAP for CNXXXX addressing accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t dabdeviceen : 1; /**< [ 4: 4](R/W) Set this bit to use ARM-AP inside DAP for DAB serial bus accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spniden : 1; /**< [ 3: 3](R/W) Set this bit to enable secure noninvasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spiden : 1; /**< [ 2: 2](R/W) Set this bit to enable secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t niden : 1; /**< [ 1: 1](R/W) Set this bit to enable nonsecure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t dbgen : 1; /**< [ 0: 0](R/W) Set this bit to enable debug enable.
+
+ When in trusted mode resets to zero, else one. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbgen : 1; /**< [ 0: 0](R/W) Set this bit to enable debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t niden : 1; /**< [ 1: 1](R/W) Set this bit to enable nonsecure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spiden : 1; /**< [ 2: 2](R/W) Set this bit to enable secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spniden : 1; /**< [ 3: 3](R/W) Set this bit to enable secure noninvasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t dabdeviceen : 1; /**< [ 4: 4](R/W) Set this bit to use ARM-AP inside DAP for DAB serial bus accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t deviceen : 1; /**< [ 5: 5](R/W) Set this bit to use CVM-AP inside DAP for CNXXXX addressing accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t reserved_6_8 : 3;
+ uint32_t caben : 1; /**< [ 9: 9](R/W) Enable CAB accesses from NCB and RSL devices.
+ 0 = Return fault for all CAB accesses.
+ 1 = Enable all CAB accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t cabnsen : 1; /**< [ 10: 10](R/W) Enable nonsecure CAB accesses from NCB and RSL devices.
+ 0 = Return fault on nonsecure CAB accesses.
+ 1 = Enable nonsecure CAB accesses. */
+ uint32_t reserved_11_27 : 17;
+ uint32_t distrace : 1; /**< [ 28: 28](RO) Trace unit is always discoverable in CNXXXX.
+ 0 = Trace unit is discoverable by software.
+ 1 = Trace unit is hidden.
+
+ In CNXXXX, always discoverable. */
+ uint32_t distracefeature : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ Passed to trace unit, but not presently used.
+ 0 = Future trace feature enabled.
+ 1 = Future trace feature disabled. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_dap_imp_dar_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t distracefeature : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ Passed to trace unit, but not presently used.
+ 0 = Future trace feature enabled.
+ 1 = Future trace feature disabled. */
+ uint32_t distrace : 1; /**< [ 28: 28](R/W) Disable trace unit discovery.
+ 0 = Trace unit is discoverable by software.
+ 1 = Trace unit is hidden. */
+ uint32_t reserved_11_27 : 17;
+ uint32_t cabnsen : 1; /**< [ 10: 10](R/W) Enable nonsecure CAB accesses from NCB and RSL devices.
+ 0 = Return fault on nonsecure CAB accesses.
+ 1 = Enable nonsecure CAB accesses. */
+ uint32_t caben : 1; /**< [ 9: 9](R/W) Enable CAB accesses from NCB and RSL devices.
+ 0 = Return fault for all CAB accesses.
+ 1 = Enable all CAB accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t reserved_6_8 : 3;
+ uint32_t deviceen : 1; /**< [ 5: 5](R/W) Set this bit to use CVM-AP inside DAP for CNXXXX addressing accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t dabdeviceen : 1; /**< [ 4: 4](R/W) Set this bit to use ARM-AP inside DAP for DAB serial bus accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spniden : 1; /**< [ 3: 3](R/W) Set this bit to enable secure noninvasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spiden : 1; /**< [ 2: 2](R/W) Set this bit to enable secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t niden : 1; /**< [ 1: 1](R/W) Set this bit to enable nonsecure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t dbgen : 1; /**< [ 0: 0](R/W) Set this bit to enable debug enable.
+
+ When in trusted mode resets to zero, else one. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbgen : 1; /**< [ 0: 0](R/W) Set this bit to enable debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t niden : 1; /**< [ 1: 1](R/W) Set this bit to enable nonsecure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spiden : 1; /**< [ 2: 2](R/W) Set this bit to enable secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spniden : 1; /**< [ 3: 3](R/W) Set this bit to enable secure noninvasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t dabdeviceen : 1; /**< [ 4: 4](R/W) Set this bit to use ARM-AP inside DAP for DAB serial bus accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t deviceen : 1; /**< [ 5: 5](R/W) Set this bit to use CVM-AP inside DAP for CNXXXX addressing accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t reserved_6_8 : 3;
+ uint32_t caben : 1; /**< [ 9: 9](R/W) Enable CAB accesses from NCB and RSL devices.
+ 0 = Return fault for all CAB accesses.
+ 1 = Enable all CAB accesses.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t cabnsen : 1; /**< [ 10: 10](R/W) Enable nonsecure CAB accesses from NCB and RSL devices.
+ 0 = Return fault on nonsecure CAB accesses.
+ 1 = Enable nonsecure CAB accesses. */
+ uint32_t reserved_11_27 : 17;
+ uint32_t distrace : 1; /**< [ 28: 28](R/W) Disable trace unit discovery.
+ 0 = Trace unit is discoverable by software.
+ 1 = Trace unit is hidden. */
+ uint32_t distracefeature : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ Passed to trace unit, but not presently used.
+ 0 = Future trace feature enabled.
+ 1 = Future trace feature disabled. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_dap_imp_dar_cn81xx cn83xx; */
+ struct bdk_dap_imp_dar_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t distracefeature : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ Passed to trace unit, but not presently used.
+ 0 = Future trace feature enabled.
+ 1 = Future trace feature disabled. */
+ uint32_t distrace : 1; /**< [ 28: 28](R/W) Disable trace unit discovery.
+ 0 = Trace unit is discoverable by software.
+ 1 = Trace unit is hidden. */
+ uint32_t reserved_11_27 : 17;
+ uint32_t cabnsen : 1; /**< [ 10: 10](R/W) Enable nonsecure CAB accesses from NCB and RSL devices.
+ 0 = Return fault on nonsecure CAB accesses.
+ 1 = Enable nonsecure CAB accesses. */
+ uint32_t caben : 1; /**< [ 9: 9](R/W) Enable CAB accesses from NCB and RSL devices.
+ 0 = Return fault for all CAB accesses.
+ 1 = Enable all CAB accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t reserved_6_8 : 3;
+ uint32_t deviceen : 1; /**< [ 5: 5](R/W) Set this bit to use CVM-AP inside DAP for CNXXXX addressing accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t dabdeviceen : 1; /**< [ 4: 4](R/W) Set this bit to use ARM-AP inside DAP for DAB serial bus accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t spniden : 1; /**< [ 3: 3](R/W) Set this bit to enable secure noninvasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spiden : 1; /**< [ 2: 2](R/W) Set this bit to enable secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t niden : 1; /**< [ 1: 1](R/W) Set this bit to enable nonsecure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t dbgen : 1; /**< [ 0: 0](R/W) Set this bit to enable debug enable.
+
+ When in trusted mode resets to zero, else one. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbgen : 1; /**< [ 0: 0](R/W) Set this bit to enable debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t niden : 1; /**< [ 1: 1](R/W) Set this bit to enable nonsecure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spiden : 1; /**< [ 2: 2](R/W) Set this bit to enable secure invasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t spniden : 1; /**< [ 3: 3](R/W) Set this bit to enable secure noninvasive debug enable.
+
+ When in trusted mode resets to zero, else one. */
+ uint32_t dabdeviceen : 1; /**< [ 4: 4](R/W) Set this bit to use ARM-AP inside DAP for DAB serial bus accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t deviceen : 1; /**< [ 5: 5](R/W) Set this bit to use CVM-AP inside DAP for CNXXXX addressing accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t reserved_6_8 : 3;
+ uint32_t caben : 1; /**< [ 9: 9](R/W) Enable CAB accesses from NCB and RSL devices.
+ 0 = Return fault for all CAB accesses.
+ 1 = Enable all CAB accesses.
+
+ When in trusted-mode resets to 0, else 1. */
+ uint32_t cabnsen : 1; /**< [ 10: 10](R/W) Enable nonsecure CAB accesses from NCB and RSL devices.
+ 0 = Return fault on nonsecure CAB accesses.
+ 1 = Enable nonsecure CAB accesses. */
+ uint32_t reserved_11_27 : 17;
+ uint32_t distrace : 1; /**< [ 28: 28](R/W) Disable trace unit discovery.
+ 0 = Trace unit is discoverable by software.
+ 1 = Trace unit is hidden. */
+ uint32_t distracefeature : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ Passed to trace unit, but not presently used.
+ 0 = Future trace feature enabled.
+ 1 = Future trace feature disabled. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_dap_imp_dar bdk_dap_imp_dar_t;
+
+#define BDK_DAP_IMP_DAR BDK_DAP_IMP_DAR_FUNC()
+static inline uint64_t BDK_DAP_IMP_DAR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DAP_IMP_DAR_FUNC(void)
+{
+ return 0x87e002000100ll;
+}
+
+#define typedef_BDK_DAP_IMP_DAR bdk_dap_imp_dar_t
+#define bustype_BDK_DAP_IMP_DAR BDK_CSR_TYPE_RSL32b
+#define basename_BDK_DAP_IMP_DAR "DAP_IMP_DAR"
+#define device_bar_BDK_DAP_IMP_DAR 0x0 /* PF_BAR0 */
+#define busnum_BDK_DAP_IMP_DAR 0
+#define arguments_BDK_DAP_IMP_DAR -1,-1,-1,-1
+
+/**
+ * Register (RSL32b) dap_owb_to
+ *
+ * DAP One-Wire-Bus Timeout Register
+ * This register configures the one-wire bus.
+ */
+union bdk_dap_owb_to
+{
+ uint32_t u;
+ struct bdk_dap_owb_to_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t to_dis : 1; /**< [ 31: 31](R/W) Disable timeout mechanism. */
+ uint32_t reserved_16_30 : 15;
+ uint32_t tovalue : 16; /**< [ 15: 0](R/W) Timeout value. If an OWB transaction is longer than this number
+ of coprocessor-clock cycles, it will timeout. */
+#else /* Word 0 - Little Endian */
+ uint32_t tovalue : 16; /**< [ 15: 0](R/W) Timeout value. If an OWB transaction is longer than this number
+ of coprocessor-clock cycles, it will timeout. */
+ uint32_t reserved_16_30 : 15;
+ uint32_t to_dis : 1; /**< [ 31: 31](R/W) Disable timeout mechanism. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_dap_owb_to_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t reserved_16_30 : 15;
+ uint32_t tovalue : 16; /**< [ 15: 0](R/W) Timeout value. If an OWB transaction is longer than this number
+ of coprocessor-clock cycles, it will timeout. */
+#else /* Word 0 - Little Endian */
+ uint32_t tovalue : 16; /**< [ 15: 0](R/W) Timeout value. If an OWB transaction is longer than this number
+ of coprocessor-clock cycles, it will timeout. */
+ uint32_t reserved_16_30 : 15;
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_dap_owb_to_s cn9; */
+ /* struct bdk_dap_owb_to_s cn81xx; */
+ /* struct bdk_dap_owb_to_s cn83xx; */
+ /* struct bdk_dap_owb_to_s cn88xxp2; */
+};
+typedef union bdk_dap_owb_to bdk_dap_owb_to_t;
+
+#define BDK_DAP_OWB_TO BDK_DAP_OWB_TO_FUNC()
+static inline uint64_t BDK_DAP_OWB_TO_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DAP_OWB_TO_FUNC(void)
+{
+ return 0x87e002000110ll;
+}
+
+#define typedef_BDK_DAP_OWB_TO bdk_dap_owb_to_t
+#define bustype_BDK_DAP_OWB_TO BDK_CSR_TYPE_RSL32b
+#define basename_BDK_DAP_OWB_TO "DAP_OWB_TO"
+#define device_bar_BDK_DAP_OWB_TO 0x0 /* PF_BAR0 */
+#define busnum_BDK_DAP_OWB_TO 0
+#define arguments_BDK_DAP_OWB_TO -1,-1,-1,-1
+
+/**
+ * Register (RSL) dap_owb_to_status
+ *
+ * DAP One Wire Bus Timeout Status Register
+ * This register reports error status.
+ */
+union bdk_dap_owb_to_status
+{
+ uint64_t u;
+ struct bdk_dap_owb_to_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t time_out : 1; /**< [ 0: 0](R/W1C/H) This bit will be set if there is timeout in one wire bus activity. */
+#else /* Word 0 - Little Endian */
+ uint64_t time_out : 1; /**< [ 0: 0](R/W1C/H) This bit will be set if there is timeout in one wire bus activity. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dap_owb_to_status_s cn; */
+};
+typedef union bdk_dap_owb_to_status bdk_dap_owb_to_status_t;
+
+#define BDK_DAP_OWB_TO_STATUS BDK_DAP_OWB_TO_STATUS_FUNC()
+static inline uint64_t BDK_DAP_OWB_TO_STATUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DAP_OWB_TO_STATUS_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e002000138ll;
+ __bdk_csr_fatal("DAP_OWB_TO_STATUS", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DAP_OWB_TO_STATUS bdk_dap_owb_to_status_t
+#define bustype_BDK_DAP_OWB_TO_STATUS BDK_CSR_TYPE_RSL
+#define basename_BDK_DAP_OWB_TO_STATUS "DAP_OWB_TO_STATUS"
+#define device_bar_BDK_DAP_OWB_TO_STATUS 0x0 /* PF_BAR0 */
+#define busnum_BDK_DAP_OWB_TO_STATUS 0
+#define arguments_BDK_DAP_OWB_TO_STATUS -1,-1,-1,-1
+
+/**
+ * Register (RSL) dap_rst_on_warm
+ *
+ * DAP Reset On Warm Reset Register
+ */
+union bdk_dap_rst_on_warm
+{
+ uint64_t u;
+ struct bdk_dap_rst_on_warm_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t rst_on_warm : 1; /**< [ 0: 0](R/W1S) Always reset DAR register.
+ Once set this bit cannot be cleared until the next cold reset.
+ [RST_ON_WARM] is set to one when trusted-mode changes from zero to one (i.e. a
+ non-trusted boot is followed by a trusted boot). */
+#else /* Word 0 - Little Endian */
+ uint64_t rst_on_warm : 1; /**< [ 0: 0](R/W1S) Always reset DAR register.
+ Once set this bit cannot be cleared until the next cold reset.
+ [RST_ON_WARM] is set to one when trusted-mode changes from zero to one (i.e. a
+ non-trusted boot is followed by a trusted boot). */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dap_rst_on_warm_s cn; */
+};
+typedef union bdk_dap_rst_on_warm bdk_dap_rst_on_warm_t;
+
+#define BDK_DAP_RST_ON_WARM BDK_DAP_RST_ON_WARM_FUNC()
+static inline uint64_t BDK_DAP_RST_ON_WARM_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DAP_RST_ON_WARM_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e002000128ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e002000128ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x87e002000128ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e002000128ll;
+ __bdk_csr_fatal("DAP_RST_ON_WARM", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DAP_RST_ON_WARM bdk_dap_rst_on_warm_t
+#define bustype_BDK_DAP_RST_ON_WARM BDK_CSR_TYPE_RSL
+#define basename_BDK_DAP_RST_ON_WARM "DAP_RST_ON_WARM"
+#define device_bar_BDK_DAP_RST_ON_WARM 0x0 /* PF_BAR0 */
+#define busnum_BDK_DAP_RST_ON_WARM 0
+#define arguments_BDK_DAP_RST_ON_WARM -1,-1,-1,-1
+
+/**
+ * Register (RSL) dap_scratch
+ *
+ * INTERNAL: DAP Scratch Register
+ *
+ * This register is a scratch register for software use.
+ */
+union bdk_dap_scratch
+{
+ uint64_t u;
+ struct bdk_dap_scratch_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Scratch data, not used by hardware. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Scratch data, not used by hardware. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dap_scratch_s cn; */
+};
+typedef union bdk_dap_scratch bdk_dap_scratch_t;
+
+#define BDK_DAP_SCRATCH BDK_DAP_SCRATCH_FUNC()
+static inline uint64_t BDK_DAP_SCRATCH_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DAP_SCRATCH_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e002000118ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e002000118ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x87e002000118ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e002000118ll;
+ __bdk_csr_fatal("DAP_SCRATCH", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DAP_SCRATCH bdk_dap_scratch_t
+#define bustype_BDK_DAP_SCRATCH BDK_CSR_TYPE_RSL
+#define basename_BDK_DAP_SCRATCH "DAP_SCRATCH"
+#define device_bar_BDK_DAP_SCRATCH 0x0 /* PF_BAR0 */
+#define busnum_BDK_DAP_SCRATCH 0
+#define arguments_BDK_DAP_SCRATCH -1,-1,-1,-1
+
+/**
+ * Register (RSL32b) dap_sraaddr
+ *
+ * DAP RSL Devices Broadcast Write Polling Register
+ * This register controls broadcast write or polling to the cores.
+ */
+union bdk_dap_sraaddr
+{
+ uint32_t u;
+ struct bdk_dap_sraaddr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t cabdabsel : 1; /**< [ 28: 28](R/W) CAB or DAB bus access selection for polling/broadcast write.
+ 0 = Polling/broadcast write is for DAB bus, bits \<20:5\> is the address offset.
+ 1 = Polling/broadcast write is for CAB bus, bits \<19:5\> is the register number. */
+ uint32_t reserved_21_27 : 7;
+ uint32_t regnum : 16; /**< [ 20: 5](R/W) If [CABDABSEL]=1, then \<19:5\> is the register number with these bit definitions:
+ \<19\>: Op0[0].
+ \<18:16\>: Op1.
+ \<15:12\>: CRn.
+ \<11:8\>: CRm.
+ \<7:5\>: Op.
+
+ If [CABDABSEL]=0, then [REGNUM] is the register offset. */
+ uint32_t reserved_2_4 : 3;
+ uint32_t errstatus : 1; /**< [ 1: 1](RAZ) Currently reserved. */
+ uint32_t busy : 1; /**< [ 0: 0](RO/H) Busy indicator if the broadcast write or polling still in progress.
+ 0 = Idle.
+ 1 = Broadcast write or polling still in progress. */
+#else /* Word 0 - Little Endian */
+ uint32_t busy : 1; /**< [ 0: 0](RO/H) Busy indicator if the broadcast write or polling still in progress.
+ 0 = Idle.
+ 1 = Broadcast write or polling still in progress. */
+ uint32_t errstatus : 1; /**< [ 1: 1](RAZ) Currently reserved. */
+ uint32_t reserved_2_4 : 3;
+ uint32_t regnum : 16; /**< [ 20: 5](R/W) If [CABDABSEL]=1, then \<19:5\> is the register number with these bit definitions:
+ \<19\>: Op0[0].
+ \<18:16\>: Op1.
+ \<15:12\>: CRn.
+ \<11:8\>: CRm.
+ \<7:5\>: Op.
+
+ If [CABDABSEL]=0, then [REGNUM] is the register offset. */
+ uint32_t reserved_21_27 : 7;
+ uint32_t cabdabsel : 1; /**< [ 28: 28](R/W) CAB or DAB bus access selection for polling/broadcast write.
+ 0 = Polling/broadcast write is for DAB bus, bits \<20:5\> is the address offset.
+ 1 = Polling/broadcast write is for CAB bus, bits \<19:5\> is the register number. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dap_sraaddr_s cn88xxp1; */
+ struct bdk_dap_sraaddr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t cabdabsel : 1; /**< [ 28: 28](R/W) CAB or DAB bus access selection for polling/broadcast write.
+ 0 = Polling/broadcast write is for DAB bus, bits \<20:5\> is the address offset.
+ 1 = Polling/broadcast write is for CAB bus, bits \<19:5\> is the register number. */
+ uint32_t reserved_21_27 : 7;
+ uint32_t regnum : 16; /**< [ 20: 5](R/W) If [CABDABSEL]=1, then \<19:5\> is the register number with these bit definitions:
+ \<19\>: Op0[0].
+ \<18:16\>: Op1.
+ \<15:12\>: CRn.
+ \<11:8\>: CRm.
+ \<7:5\>: Op.
+
+ If [CABDABSEL]=0, then [REGNUM] is the register offset. */
+ uint32_t reserved_2_4 : 3;
+ uint32_t errstatus : 1; /**< [ 1: 1](RAZ) Reserved. */
+ uint32_t busy : 1; /**< [ 0: 0](RO/H) Busy indicator if the broadcast write or polling still in progress.
+ 0 = Idle.
+ 1 = Broadcast write or polling still in progress. */
+#else /* Word 0 - Little Endian */
+ uint32_t busy : 1; /**< [ 0: 0](RO/H) Busy indicator if the broadcast write or polling still in progress.
+ 0 = Idle.
+ 1 = Broadcast write or polling still in progress. */
+ uint32_t errstatus : 1; /**< [ 1: 1](RAZ) Reserved. */
+ uint32_t reserved_2_4 : 3;
+ uint32_t regnum : 16; /**< [ 20: 5](R/W) If [CABDABSEL]=1, then \<19:5\> is the register number with these bit definitions:
+ \<19\>: Op0[0].
+ \<18:16\>: Op1.
+ \<15:12\>: CRn.
+ \<11:8\>: CRm.
+ \<7:5\>: Op.
+
+ If [CABDABSEL]=0, then [REGNUM] is the register offset. */
+ uint32_t reserved_21_27 : 7;
+ uint32_t cabdabsel : 1; /**< [ 28: 28](R/W) CAB or DAB bus access selection for polling/broadcast write.
+ 0 = Polling/broadcast write is for DAB bus, bits \<20:5\> is the address offset.
+ 1 = Polling/broadcast write is for CAB bus, bits \<19:5\> is the register number. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_dap_sraaddr_cn9 cn81xx; */
+ /* struct bdk_dap_sraaddr_cn9 cn83xx; */
+ struct bdk_dap_sraaddr_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t cabdabsel : 1; /**< [ 28: 28](R/W) CAB or DAB bus access selection for polling/broadcast write.
+ 0 = Polling/broadcast write is for DAB bus, bits \<20:5\> is the address offset.
+ 1 = Polling/broadcast write is for CAB bus, bits \<19:5\> is the register number. */
+ uint32_t reserved_21_27 : 7;
+ uint32_t regnum : 16; /**< [ 20: 5](R/W) If [CABDABSEL]=1, then \<19:5\> is the register number with these bit definitions:
+ \<19\>: Op0[0].
+ \<18:16\>: Op1
+ \<15:12\>: CRn.
+ \<11:8\>: CRm.
+ \<7:5\>: Op.
+
+ If [CABDABSEL]=0, then [REGNUM] is the register offset. */
+ uint32_t reserved_2_4 : 3;
+ uint32_t errstatus : 1; /**< [ 1: 1](RAZ) Reserved. */
+ uint32_t busy : 1; /**< [ 0: 0](RO/H) Busy indicator if the broadcast write or polling still in progress.
+ 0 = Idle.
+ 1 = Broadcast write or polling still in progress. */
+#else /* Word 0 - Little Endian */
+ uint32_t busy : 1; /**< [ 0: 0](RO/H) Busy indicator if the broadcast write or polling still in progress.
+ 0 = Idle.
+ 1 = Broadcast write or polling still in progress. */
+ uint32_t errstatus : 1; /**< [ 1: 1](RAZ) Reserved. */
+ uint32_t reserved_2_4 : 3;
+ uint32_t regnum : 16; /**< [ 20: 5](R/W) If [CABDABSEL]=1, then \<19:5\> is the register number with these bit definitions:
+ \<19\>: Op0[0].
+ \<18:16\>: Op1
+ \<15:12\>: CRn.
+ \<11:8\>: CRm.
+ \<7:5\>: Op.
+
+ If [CABDABSEL]=0, then [REGNUM] is the register offset. */
+ uint32_t reserved_21_27 : 7;
+ uint32_t cabdabsel : 1; /**< [ 28: 28](R/W) CAB or DAB bus access selection for polling/broadcast write.
+ 0 = Polling/broadcast write is for DAB bus, bits \<20:5\> is the address offset.
+ 1 = Polling/broadcast write is for CAB bus, bits \<19:5\> is the register number. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_dap_sraaddr bdk_dap_sraaddr_t;
+
+#define BDK_DAP_SRAADDR BDK_DAP_SRAADDR_FUNC()
+static inline uint64_t BDK_DAP_SRAADDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DAP_SRAADDR_FUNC(void)
+{
+ return 0x87e002000104ll;
+}
+
+#define typedef_BDK_DAP_SRAADDR bdk_dap_sraaddr_t
+#define bustype_BDK_DAP_SRAADDR BDK_CSR_TYPE_RSL32b
+#define basename_BDK_DAP_SRAADDR "DAP_SRAADDR"
+#define device_bar_BDK_DAP_SRAADDR 0x0 /* PF_BAR0 */
+#define busnum_BDK_DAP_SRAADDR 0
+#define arguments_BDK_DAP_SRAADDR -1,-1,-1,-1
+
+/**
+ * Register (RSL) dap_sradata
+ *
+ * DAP Broadcast Write Data Register
+ * Data register for broadcast writes and polling from the cores.
+ */
+union bdk_dap_sradata
+{
+ uint64_t u;
+ struct bdk_dap_sradata_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dap_sradata_s cn; */
+};
+typedef union bdk_dap_sradata bdk_dap_sradata_t;
+
+#define BDK_DAP_SRADATA BDK_DAP_SRADATA_FUNC()
+static inline uint64_t BDK_DAP_SRADATA_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DAP_SRADATA_FUNC(void)
+{
+ return 0x87e002000108ll;
+}
+
+#define typedef_BDK_DAP_SRADATA bdk_dap_sradata_t
+#define bustype_BDK_DAP_SRADATA BDK_CSR_TYPE_RSL
+#define basename_BDK_DAP_SRADATA "DAP_SRADATA"
+#define device_bar_BDK_DAP_SRADATA 0x0 /* PF_BAR0 */
+#define busnum_BDK_DAP_SRADATA 0
+#define arguments_BDK_DAP_SRADATA -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_DAP_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-dtx.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-dtx.h
new file mode 100644
index 0000000000..07bc044941
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-dtx.h
@@ -0,0 +1,12470 @@
+#ifndef __BDK_CSRS_DTX_H__
+#define __BDK_CSRS_DTX_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium DTX.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Register (RSL) dtx_bch_bcst_rsp
+ *
+ * DTX BCH Control Register
+ */
+union bdk_dtx_bch_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_bch_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bch_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_bch_bcst_rsp bdk_dtx_bch_bcst_rsp_t;
+
+#define BDK_DTX_BCH_BCST_RSP BDK_DTX_BCH_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_BCH_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BCH_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0feb88080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feb88080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feb88080ll;
+ __bdk_csr_fatal("DTX_BCH_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BCH_BCST_RSP bdk_dtx_bch_bcst_rsp_t
+#define bustype_BDK_DTX_BCH_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BCH_BCST_RSP "DTX_BCH_BCST_RSP"
+#define busnum_BDK_DTX_BCH_BCST_RSP 0
+#define arguments_BDK_DTX_BCH_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bch_ctl
+ *
+ * DTX BCH Control Register
+ */
+union bdk_dtx_bch_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_bch_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bch_ctl_s cn; */
+};
+typedef union bdk_dtx_bch_ctl bdk_dtx_bch_ctl_t;
+
+#define BDK_DTX_BCH_CTL BDK_DTX_BCH_CTL_FUNC()
+static inline uint64_t BDK_DTX_BCH_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BCH_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0feb88060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feb88060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feb88060ll;
+ __bdk_csr_fatal("DTX_BCH_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BCH_CTL bdk_dtx_bch_ctl_t
+#define bustype_BDK_DTX_BCH_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BCH_CTL "DTX_BCH_CTL"
+#define busnum_BDK_DTX_BCH_CTL 0
+#define arguments_BDK_DTX_BCH_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bch_dat#
+ *
+ * DTX BCH Raw Data Register
+ */
+union bdk_dtx_bch_datx
+{
+ uint64_t u;
+ struct bdk_dtx_bch_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bch_datx_s cn; */
+};
+typedef union bdk_dtx_bch_datx bdk_dtx_bch_datx_t;
+
+static inline uint64_t BDK_DTX_BCH_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BCH_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0feb88040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb88040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb88040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_BCH_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BCH_DATX(a) bdk_dtx_bch_datx_t
+#define bustype_BDK_DTX_BCH_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BCH_DATX(a) "DTX_BCH_DATX"
+#define busnum_BDK_DTX_BCH_DATX(a) (a)
+#define arguments_BDK_DTX_BCH_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bch_ena#
+ *
+ * DTX BCH Data Enable Register
+ */
+union bdk_dtx_bch_enax
+{
+ uint64_t u;
+ struct bdk_dtx_bch_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bch_enax_s cn; */
+};
+typedef union bdk_dtx_bch_enax bdk_dtx_bch_enax_t;
+
+static inline uint64_t BDK_DTX_BCH_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BCH_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0feb88020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb88020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb88020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_BCH_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BCH_ENAX(a) bdk_dtx_bch_enax_t
+#define bustype_BDK_DTX_BCH_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BCH_ENAX(a) "DTX_BCH_ENAX"
+#define busnum_BDK_DTX_BCH_ENAX(a) (a)
+#define arguments_BDK_DTX_BCH_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bch_sel#
+ *
+ * DTX BCH Select Register
+ */
+union bdk_dtx_bch_selx
+{
+ uint64_t u;
+ struct bdk_dtx_bch_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bch_selx_s cn; */
+};
+typedef union bdk_dtx_bch_selx bdk_dtx_bch_selx_t;
+
+static inline uint64_t BDK_DTX_BCH_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BCH_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0feb88000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb88000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb88000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_BCH_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BCH_SELX(a) bdk_dtx_bch_selx_t
+#define bustype_BDK_DTX_BCH_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BCH_SELX(a) "DTX_BCH_SELX"
+#define busnum_BDK_DTX_BCH_SELX(a) (a)
+#define arguments_BDK_DTX_BCH_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bgx#_bcst_rsp
+ *
+ * DTX BGX Control Register
+ */
+union bdk_dtx_bgxx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_bgxx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bgxx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_bgxx_bcst_rsp bdk_dtx_bgxx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_BGXX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BGXX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe700080ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0fe700080ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe700080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_BGXX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BGXX_BCST_RSP(a) bdk_dtx_bgxx_bcst_rsp_t
+#define bustype_BDK_DTX_BGXX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BGXX_BCST_RSP(a) "DTX_BGXX_BCST_RSP"
+#define busnum_BDK_DTX_BGXX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_BGXX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bgx#_ctl
+ *
+ * DTX BGX Control Register
+ */
+union bdk_dtx_bgxx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_bgxx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bgxx_ctl_s cn; */
+};
+typedef union bdk_dtx_bgxx_ctl bdk_dtx_bgxx_ctl_t;
+
+static inline uint64_t BDK_DTX_BGXX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BGXX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe700060ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0fe700060ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe700060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_BGXX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BGXX_CTL(a) bdk_dtx_bgxx_ctl_t
+#define bustype_BDK_DTX_BGXX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BGXX_CTL(a) "DTX_BGXX_CTL"
+#define busnum_BDK_DTX_BGXX_CTL(a) (a)
+#define arguments_BDK_DTX_BGXX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bgx#_dat#
+ *
+ * DTX BGX Raw Data Register
+ */
+union bdk_dtx_bgxx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_bgxx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bgxx_datx_s cn; */
+};
+typedef union bdk_dtx_bgxx_datx bdk_dtx_bgxx_datx_t;
+
+static inline uint64_t BDK_DTX_BGXX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BGXX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe700040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe700040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe700040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_BGXX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_BGXX_DATX(a,b) bdk_dtx_bgxx_datx_t
+#define bustype_BDK_DTX_BGXX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BGXX_DATX(a,b) "DTX_BGXX_DATX"
+#define busnum_BDK_DTX_BGXX_DATX(a,b) (a)
+#define arguments_BDK_DTX_BGXX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_bgx#_ena#
+ *
+ * DTX BGX Data Enable Register
+ */
+union bdk_dtx_bgxx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_bgxx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bgxx_enax_s cn; */
+};
+typedef union bdk_dtx_bgxx_enax bdk_dtx_bgxx_enax_t;
+
+static inline uint64_t BDK_DTX_BGXX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BGXX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe700020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe700020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe700020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_BGXX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_BGXX_ENAX(a,b) bdk_dtx_bgxx_enax_t
+#define bustype_BDK_DTX_BGXX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BGXX_ENAX(a,b) "DTX_BGXX_ENAX"
+#define busnum_BDK_DTX_BGXX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_BGXX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_bgx#_sel#
+ *
+ * DTX BGX Select Register
+ */
+union bdk_dtx_bgxx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_bgxx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bgxx_selx_s cn; */
+};
+typedef union bdk_dtx_bgxx_selx bdk_dtx_bgxx_selx_t;
+
+static inline uint64_t BDK_DTX_BGXX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BGXX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe700000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe700000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe700000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_BGXX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_BGXX_SELX(a,b) bdk_dtx_bgxx_selx_t
+#define bustype_BDK_DTX_BGXX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BGXX_SELX(a,b) "DTX_BGXX_SELX"
+#define busnum_BDK_DTX_BGXX_SELX(a,b) (a)
+#define arguments_BDK_DTX_BGXX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_broadcast_ctl
+ *
+ * DTX BROADCAST Control Register
+ */
+union bdk_dtx_broadcast_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_broadcast_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_broadcast_ctl_s cn; */
+};
+typedef union bdk_dtx_broadcast_ctl bdk_dtx_broadcast_ctl_t;
+
+#define BDK_DTX_BROADCAST_CTL BDK_DTX_BROADCAST_CTL_FUNC()
+static inline uint64_t BDK_DTX_BROADCAST_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BROADCAST_CTL_FUNC(void)
+{
+ return 0x87e0fe7f0060ll;
+}
+
+#define typedef_BDK_DTX_BROADCAST_CTL bdk_dtx_broadcast_ctl_t
+#define bustype_BDK_DTX_BROADCAST_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BROADCAST_CTL "DTX_BROADCAST_CTL"
+#define busnum_BDK_DTX_BROADCAST_CTL 0
+#define arguments_BDK_DTX_BROADCAST_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_broadcast_ena#
+ *
+ * DTX BROADCAST Data Enable Register
+ */
+union bdk_dtx_broadcast_enax
+{
+ uint64_t u;
+ struct bdk_dtx_broadcast_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_broadcast_enax_s cn; */
+};
+typedef union bdk_dtx_broadcast_enax bdk_dtx_broadcast_enax_t;
+
+static inline uint64_t BDK_DTX_BROADCAST_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BROADCAST_ENAX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe7f0020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_BROADCAST_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BROADCAST_ENAX(a) bdk_dtx_broadcast_enax_t
+#define bustype_BDK_DTX_BROADCAST_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BROADCAST_ENAX(a) "DTX_BROADCAST_ENAX"
+#define busnum_BDK_DTX_BROADCAST_ENAX(a) (a)
+#define arguments_BDK_DTX_BROADCAST_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_broadcast_sel#
+ *
+ * DTX BROADCAST Select Register
+ */
+union bdk_dtx_broadcast_selx
+{
+ uint64_t u;
+ struct bdk_dtx_broadcast_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_broadcast_selx_s cn; */
+};
+typedef union bdk_dtx_broadcast_selx bdk_dtx_broadcast_selx_t;
+
+static inline uint64_t BDK_DTX_BROADCAST_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BROADCAST_SELX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe7f0000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_BROADCAST_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BROADCAST_SELX(a) bdk_dtx_broadcast_selx_t
+#define bustype_BDK_DTX_BROADCAST_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BROADCAST_SELX(a) "DTX_BROADCAST_SELX"
+#define busnum_BDK_DTX_BROADCAST_SELX(a) (a)
+#define arguments_BDK_DTX_BROADCAST_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bts_bcst_rsp
+ *
+ * DTX BTS Control Register
+ */
+union bdk_dtx_bts_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_bts_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bts_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_bts_bcst_rsp bdk_dtx_bts_bcst_rsp_t;
+
+#define BDK_DTX_BTS_BCST_RSP BDK_DTX_BTS_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_BTS_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BTS_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe090080ll;
+ __bdk_csr_fatal("DTX_BTS_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BTS_BCST_RSP bdk_dtx_bts_bcst_rsp_t
+#define bustype_BDK_DTX_BTS_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BTS_BCST_RSP "DTX_BTS_BCST_RSP"
+#define busnum_BDK_DTX_BTS_BCST_RSP 0
+#define arguments_BDK_DTX_BTS_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bts_ctl
+ *
+ * DTX BTS Control Register
+ */
+union bdk_dtx_bts_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_bts_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bts_ctl_s cn; */
+};
+typedef union bdk_dtx_bts_ctl bdk_dtx_bts_ctl_t;
+
+#define BDK_DTX_BTS_CTL BDK_DTX_BTS_CTL_FUNC()
+static inline uint64_t BDK_DTX_BTS_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BTS_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe090060ll;
+ __bdk_csr_fatal("DTX_BTS_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BTS_CTL bdk_dtx_bts_ctl_t
+#define bustype_BDK_DTX_BTS_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BTS_CTL "DTX_BTS_CTL"
+#define busnum_BDK_DTX_BTS_CTL 0
+#define arguments_BDK_DTX_BTS_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bts_dat#
+ *
+ * DTX BTS Raw Data Register
+ */
+union bdk_dtx_bts_datx
+{
+ uint64_t u;
+ struct bdk_dtx_bts_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bts_datx_s cn; */
+};
+typedef union bdk_dtx_bts_datx bdk_dtx_bts_datx_t;
+
+static inline uint64_t BDK_DTX_BTS_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BTS_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe090040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_BTS_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BTS_DATX(a) bdk_dtx_bts_datx_t
+#define bustype_BDK_DTX_BTS_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BTS_DATX(a) "DTX_BTS_DATX"
+#define busnum_BDK_DTX_BTS_DATX(a) (a)
+#define arguments_BDK_DTX_BTS_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bts_ena#
+ *
+ * DTX BTS Data Enable Register
+ */
+union bdk_dtx_bts_enax
+{
+ uint64_t u;
+ struct bdk_dtx_bts_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bts_enax_s cn; */
+};
+typedef union bdk_dtx_bts_enax bdk_dtx_bts_enax_t;
+
+static inline uint64_t BDK_DTX_BTS_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BTS_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe090020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_BTS_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BTS_ENAX(a) bdk_dtx_bts_enax_t
+#define bustype_BDK_DTX_BTS_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BTS_ENAX(a) "DTX_BTS_ENAX"
+#define busnum_BDK_DTX_BTS_ENAX(a) (a)
+#define arguments_BDK_DTX_BTS_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_bts_sel#
+ *
+ * DTX BTS Select Register
+ */
+union bdk_dtx_bts_selx
+{
+ uint64_t u;
+ struct bdk_dtx_bts_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_bts_selx_s cn; */
+};
+typedef union bdk_dtx_bts_selx bdk_dtx_bts_selx_t;
+
+static inline uint64_t BDK_DTX_BTS_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_BTS_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe090000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_BTS_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_BTS_SELX(a) bdk_dtx_bts_selx_t
+#define bustype_BDK_DTX_BTS_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_BTS_SELX(a) "DTX_BTS_SELX"
+#define busnum_BDK_DTX_BTS_SELX(a) (a)
+#define arguments_BDK_DTX_BTS_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_msw_bcst_rsp
+ *
+ * DTX CCU_MSW Control Register
+ */
+union bdk_dtx_ccux_msw_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_msw_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_msw_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_ccux_msw_bcst_rsp bdk_dtx_ccux_msw_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_CCUX_MSW_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_MSW_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x87e0fec80080ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_CCUX_MSW_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_MSW_BCST_RSP(a) bdk_dtx_ccux_msw_bcst_rsp_t
+#define bustype_BDK_DTX_CCUX_MSW_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_MSW_BCST_RSP(a) "DTX_CCUX_MSW_BCST_RSP"
+#define busnum_BDK_DTX_CCUX_MSW_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_CCUX_MSW_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_msw_ctl
+ *
+ * DTX CCU_MSW Control Register
+ */
+union bdk_dtx_ccux_msw_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_msw_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_msw_ctl_s cn; */
+};
+typedef union bdk_dtx_ccux_msw_ctl bdk_dtx_ccux_msw_ctl_t;
+
+static inline uint64_t BDK_DTX_CCUX_MSW_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_MSW_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x87e0fec80060ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_CCUX_MSW_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_MSW_CTL(a) bdk_dtx_ccux_msw_ctl_t
+#define bustype_BDK_DTX_CCUX_MSW_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_MSW_CTL(a) "DTX_CCUX_MSW_CTL"
+#define busnum_BDK_DTX_CCUX_MSW_CTL(a) (a)
+#define arguments_BDK_DTX_CCUX_MSW_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_msw_dat#
+ *
+ * DTX CCU_MSW Raw Data Register
+ */
+union bdk_dtx_ccux_msw_datx
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_msw_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_msw_datx_s cn; */
+};
+typedef union bdk_dtx_ccux_msw_datx bdk_dtx_ccux_msw_datx_t;
+
+static inline uint64_t BDK_DTX_CCUX_MSW_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_MSW_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fec80040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_MSW_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_MSW_DATX(a,b) bdk_dtx_ccux_msw_datx_t
+#define bustype_BDK_DTX_CCUX_MSW_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_MSW_DATX(a,b) "DTX_CCUX_MSW_DATX"
+#define busnum_BDK_DTX_CCUX_MSW_DATX(a,b) (a)
+#define arguments_BDK_DTX_CCUX_MSW_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_msw_ena#
+ *
+ * DTX CCU_MSW Data Enable Register
+ */
+union bdk_dtx_ccux_msw_enax
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_msw_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_msw_enax_s cn; */
+};
+typedef union bdk_dtx_ccux_msw_enax bdk_dtx_ccux_msw_enax_t;
+
+static inline uint64_t BDK_DTX_CCUX_MSW_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_MSW_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fec80020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_MSW_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_MSW_ENAX(a,b) bdk_dtx_ccux_msw_enax_t
+#define bustype_BDK_DTX_CCUX_MSW_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_MSW_ENAX(a,b) "DTX_CCUX_MSW_ENAX"
+#define busnum_BDK_DTX_CCUX_MSW_ENAX(a,b) (a)
+#define arguments_BDK_DTX_CCUX_MSW_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_msw_sel#
+ *
+ * DTX CCU_MSW Select Register
+ */
+union bdk_dtx_ccux_msw_selx
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_msw_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_msw_selx_s cn; */
+};
+typedef union bdk_dtx_ccux_msw_selx bdk_dtx_ccux_msw_selx_t;
+
+static inline uint64_t BDK_DTX_CCUX_MSW_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_MSW_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fec80000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_MSW_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_MSW_SELX(a,b) bdk_dtx_ccux_msw_selx_t
+#define bustype_BDK_DTX_CCUX_MSW_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_MSW_SELX(a,b) "DTX_CCUX_MSW_SELX"
+#define busnum_BDK_DTX_CCUX_MSW_SELX(a,b) (a)
+#define arguments_BDK_DTX_CCUX_MSW_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_pic_bcst_rsp
+ *
+ * DTX CCU_PIC Control Register
+ */
+union bdk_dtx_ccux_pic_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_pic_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_pic_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_ccux_pic_bcst_rsp bdk_dtx_ccux_pic_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_CCUX_PIC_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_PIC_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x87e0fe280080ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_CCUX_PIC_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_PIC_BCST_RSP(a) bdk_dtx_ccux_pic_bcst_rsp_t
+#define bustype_BDK_DTX_CCUX_PIC_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_PIC_BCST_RSP(a) "DTX_CCUX_PIC_BCST_RSP"
+#define busnum_BDK_DTX_CCUX_PIC_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_CCUX_PIC_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_pic_ctl
+ *
+ * DTX CCU_PIC Control Register
+ */
+union bdk_dtx_ccux_pic_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_pic_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_pic_ctl_s cn; */
+};
+typedef union bdk_dtx_ccux_pic_ctl bdk_dtx_ccux_pic_ctl_t;
+
+static inline uint64_t BDK_DTX_CCUX_PIC_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_PIC_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x87e0fe280060ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_CCUX_PIC_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_PIC_CTL(a) bdk_dtx_ccux_pic_ctl_t
+#define bustype_BDK_DTX_CCUX_PIC_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_PIC_CTL(a) "DTX_CCUX_PIC_CTL"
+#define busnum_BDK_DTX_CCUX_PIC_CTL(a) (a)
+#define arguments_BDK_DTX_CCUX_PIC_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_pic_dat#
+ *
+ * DTX CCU_PIC Raw Data Register
+ */
+union bdk_dtx_ccux_pic_datx
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_pic_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_pic_datx_s cn; */
+};
+typedef union bdk_dtx_ccux_pic_datx bdk_dtx_ccux_pic_datx_t;
+
+static inline uint64_t BDK_DTX_CCUX_PIC_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_PIC_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe280040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_PIC_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_PIC_DATX(a,b) bdk_dtx_ccux_pic_datx_t
+#define bustype_BDK_DTX_CCUX_PIC_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_PIC_DATX(a,b) "DTX_CCUX_PIC_DATX"
+#define busnum_BDK_DTX_CCUX_PIC_DATX(a,b) (a)
+#define arguments_BDK_DTX_CCUX_PIC_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_pic_ena#
+ *
+ * DTX CCU_PIC Data Enable Register
+ */
+union bdk_dtx_ccux_pic_enax
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_pic_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_pic_enax_s cn; */
+};
+typedef union bdk_dtx_ccux_pic_enax bdk_dtx_ccux_pic_enax_t;
+
+static inline uint64_t BDK_DTX_CCUX_PIC_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_PIC_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe280020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_PIC_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_PIC_ENAX(a,b) bdk_dtx_ccux_pic_enax_t
+#define bustype_BDK_DTX_CCUX_PIC_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_PIC_ENAX(a,b) "DTX_CCUX_PIC_ENAX"
+#define busnum_BDK_DTX_CCUX_PIC_ENAX(a,b) (a)
+#define arguments_BDK_DTX_CCUX_PIC_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_pic_sel#
+ *
+ * DTX CCU_PIC Select Register
+ */
+union bdk_dtx_ccux_pic_selx
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_pic_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_pic_selx_s cn; */
+};
+typedef union bdk_dtx_ccux_pic_selx bdk_dtx_ccux_pic_selx_t;
+
+static inline uint64_t BDK_DTX_CCUX_PIC_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_PIC_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe280000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_PIC_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_PIC_SELX(a,b) bdk_dtx_ccux_pic_selx_t
+#define bustype_BDK_DTX_CCUX_PIC_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_PIC_SELX(a,b) "DTX_CCUX_PIC_SELX"
+#define busnum_BDK_DTX_CCUX_PIC_SELX(a,b) (a)
+#define arguments_BDK_DTX_CCUX_PIC_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_tad#_bcst_rsp
+ *
+ * DTX CCU_TAD Control Register
+ */
+union bdk_dtx_ccux_tadx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_tadx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_tadx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_ccux_tadx_bcst_rsp bdk_dtx_ccux_tadx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_CCUX_TADX_BCST_RSP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_TADX_BCST_RSP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fed00080ll + 0x10000ll * ((a) & 0x3) + 0x8000ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_TADX_BCST_RSP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_TADX_BCST_RSP(a,b) bdk_dtx_ccux_tadx_bcst_rsp_t
+#define bustype_BDK_DTX_CCUX_TADX_BCST_RSP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_TADX_BCST_RSP(a,b) "DTX_CCUX_TADX_BCST_RSP"
+#define busnum_BDK_DTX_CCUX_TADX_BCST_RSP(a,b) (a)
+#define arguments_BDK_DTX_CCUX_TADX_BCST_RSP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_tad#_ctl
+ *
+ * DTX CCU_TAD Control Register
+ */
+union bdk_dtx_ccux_tadx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_tadx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_tadx_ctl_s cn; */
+};
+typedef union bdk_dtx_ccux_tadx_ctl bdk_dtx_ccux_tadx_ctl_t;
+
+static inline uint64_t BDK_DTX_CCUX_TADX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_TADX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fed00060ll + 0x10000ll * ((a) & 0x3) + 0x8000ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_TADX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_TADX_CTL(a,b) bdk_dtx_ccux_tadx_ctl_t
+#define bustype_BDK_DTX_CCUX_TADX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_TADX_CTL(a,b) "DTX_CCUX_TADX_CTL"
+#define busnum_BDK_DTX_CCUX_TADX_CTL(a,b) (a)
+#define arguments_BDK_DTX_CCUX_TADX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ccu#_tad#_dat#
+ *
+ * DTX CCU_TAD Raw Data Register
+ */
+union bdk_dtx_ccux_tadx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_tadx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_tadx_datx_s cn; */
+};
+typedef union bdk_dtx_ccux_tadx_datx bdk_dtx_ccux_tadx_datx_t;
+
+static inline uint64_t BDK_DTX_CCUX_TADX_DATX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_TADX_DATX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1) && (c<=1)))
+ return 0x87e0fed00040ll + 0x10000ll * ((a) & 0x3) + 0x8000ll * ((b) & 0x1) + 8ll * ((c) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_TADX_DATX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_TADX_DATX(a,b,c) bdk_dtx_ccux_tadx_datx_t
+#define bustype_BDK_DTX_CCUX_TADX_DATX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_TADX_DATX(a,b,c) "DTX_CCUX_TADX_DATX"
+#define busnum_BDK_DTX_CCUX_TADX_DATX(a,b,c) (a)
+#define arguments_BDK_DTX_CCUX_TADX_DATX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) dtx_ccu#_tad#_ena#
+ *
+ * DTX CCU_TAD Data Enable Register
+ */
+union bdk_dtx_ccux_tadx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_tadx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_tadx_enax_s cn; */
+};
+typedef union bdk_dtx_ccux_tadx_enax bdk_dtx_ccux_tadx_enax_t;
+
+static inline uint64_t BDK_DTX_CCUX_TADX_ENAX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_TADX_ENAX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1) && (c<=1)))
+ return 0x87e0fed00020ll + 0x10000ll * ((a) & 0x3) + 0x8000ll * ((b) & 0x1) + 8ll * ((c) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_TADX_ENAX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_TADX_ENAX(a,b,c) bdk_dtx_ccux_tadx_enax_t
+#define bustype_BDK_DTX_CCUX_TADX_ENAX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_TADX_ENAX(a,b,c) "DTX_CCUX_TADX_ENAX"
+#define busnum_BDK_DTX_CCUX_TADX_ENAX(a,b,c) (a)
+#define arguments_BDK_DTX_CCUX_TADX_ENAX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) dtx_ccu#_tad#_sel#
+ *
+ * DTX CCU_TAD Select Register
+ */
+union bdk_dtx_ccux_tadx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_ccux_tadx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ccux_tadx_selx_s cn; */
+};
+typedef union bdk_dtx_ccux_tadx_selx bdk_dtx_ccux_tadx_selx_t;
+
+static inline uint64_t BDK_DTX_CCUX_TADX_SELX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CCUX_TADX_SELX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1) && (c<=1)))
+ return 0x87e0fed00000ll + 0x10000ll * ((a) & 0x3) + 0x8000ll * ((b) & 0x1) + 8ll * ((c) & 0x1);
+ __bdk_csr_fatal("DTX_CCUX_TADX_SELX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_DTX_CCUX_TADX_SELX(a,b,c) bdk_dtx_ccux_tadx_selx_t
+#define bustype_BDK_DTX_CCUX_TADX_SELX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CCUX_TADX_SELX(a,b,c) "DTX_CCUX_TADX_SELX"
+#define busnum_BDK_DTX_CCUX_TADX_SELX(a,b,c) (a)
+#define arguments_BDK_DTX_CCUX_TADX_SELX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) dtx_cde_bcst_rsp
+ *
+ * INTERNAL: DTX CDE Control Register
+ */
+union bdk_dtx_cde_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_cde_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cde_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_cde_bcst_rsp bdk_dtx_cde_bcst_rsp_t;
+
+#define BDK_DTX_CDE_BCST_RSP BDK_DTX_CDE_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_CDE_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CDE_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0fe7f8080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe7f8080ll;
+ __bdk_csr_fatal("DTX_CDE_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CDE_BCST_RSP bdk_dtx_cde_bcst_rsp_t
+#define bustype_BDK_DTX_CDE_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CDE_BCST_RSP "DTX_CDE_BCST_RSP"
+#define busnum_BDK_DTX_CDE_BCST_RSP 0
+#define arguments_BDK_DTX_CDE_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cde_ctl
+ *
+ * INTERNAL: DTX CDE Control Register
+ */
+union bdk_dtx_cde_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_cde_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cde_ctl_s cn; */
+};
+typedef union bdk_dtx_cde_ctl bdk_dtx_cde_ctl_t;
+
+#define BDK_DTX_CDE_CTL BDK_DTX_CDE_CTL_FUNC()
+static inline uint64_t BDK_DTX_CDE_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CDE_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0fe7f8060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe7f8060ll;
+ __bdk_csr_fatal("DTX_CDE_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CDE_CTL bdk_dtx_cde_ctl_t
+#define bustype_BDK_DTX_CDE_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CDE_CTL "DTX_CDE_CTL"
+#define busnum_BDK_DTX_CDE_CTL 0
+#define arguments_BDK_DTX_CDE_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cde_dat#
+ *
+ * INTERNAL: DTX CDE Raw Data Register
+ */
+union bdk_dtx_cde_datx
+{
+ uint64_t u;
+ struct bdk_dtx_cde_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cde_datx_s cn; */
+};
+typedef union bdk_dtx_cde_datx bdk_dtx_cde_datx_t;
+
+static inline uint64_t BDK_DTX_CDE_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CDE_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe7f8040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe7f8040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CDE_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CDE_DATX(a) bdk_dtx_cde_datx_t
+#define bustype_BDK_DTX_CDE_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CDE_DATX(a) "DTX_CDE_DATX"
+#define busnum_BDK_DTX_CDE_DATX(a) (a)
+#define arguments_BDK_DTX_CDE_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cde_ena#
+ *
+ * INTERNAL: DTX CDE Data Enable Register
+ */
+union bdk_dtx_cde_enax
+{
+ uint64_t u;
+ struct bdk_dtx_cde_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cde_enax_s cn; */
+};
+typedef union bdk_dtx_cde_enax bdk_dtx_cde_enax_t;
+
+static inline uint64_t BDK_DTX_CDE_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CDE_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe7f8020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe7f8020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CDE_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CDE_ENAX(a) bdk_dtx_cde_enax_t
+#define bustype_BDK_DTX_CDE_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CDE_ENAX(a) "DTX_CDE_ENAX"
+#define busnum_BDK_DTX_CDE_ENAX(a) (a)
+#define arguments_BDK_DTX_CDE_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cde_sel#
+ *
+ * INTERNAL: DTX CDE Select Register
+ */
+union bdk_dtx_cde_selx
+{
+ uint64_t u;
+ struct bdk_dtx_cde_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cde_selx_s cn; */
+};
+typedef union bdk_dtx_cde_selx bdk_dtx_cde_selx_t;
+
+static inline uint64_t BDK_DTX_CDE_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CDE_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe7f8000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe7f8000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CDE_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CDE_SELX(a) bdk_dtx_cde_selx_t
+#define bustype_BDK_DTX_CDE_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CDE_SELX(a) "DTX_CDE_SELX"
+#define busnum_BDK_DTX_CDE_SELX(a) (a)
+#define arguments_BDK_DTX_CDE_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cgx#_bcst_rsp
+ *
+ * DTX CGX Control Register
+ */
+union bdk_dtx_cgxx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_cgxx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cgxx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_cgxx_bcst_rsp bdk_dtx_cgxx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_CGXX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CGXX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0fe700080ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_CGXX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CGXX_BCST_RSP(a) bdk_dtx_cgxx_bcst_rsp_t
+#define bustype_BDK_DTX_CGXX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CGXX_BCST_RSP(a) "DTX_CGXX_BCST_RSP"
+#define busnum_BDK_DTX_CGXX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_CGXX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cgx#_ctl
+ *
+ * DTX CGX Control Register
+ */
+union bdk_dtx_cgxx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_cgxx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cgxx_ctl_s cn; */
+};
+typedef union bdk_dtx_cgxx_ctl bdk_dtx_cgxx_ctl_t;
+
+static inline uint64_t BDK_DTX_CGXX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CGXX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0fe700060ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_CGXX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CGXX_CTL(a) bdk_dtx_cgxx_ctl_t
+#define bustype_BDK_DTX_CGXX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CGXX_CTL(a) "DTX_CGXX_CTL"
+#define busnum_BDK_DTX_CGXX_CTL(a) (a)
+#define arguments_BDK_DTX_CGXX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cgx#_dat#
+ *
+ * DTX CGX Raw Data Register
+ */
+union bdk_dtx_cgxx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_cgxx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cgxx_datx_s cn; */
+};
+typedef union bdk_dtx_cgxx_datx bdk_dtx_cgxx_datx_t;
+
+static inline uint64_t BDK_DTX_CGXX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CGXX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe700040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CGXX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CGXX_DATX(a,b) bdk_dtx_cgxx_datx_t
+#define bustype_BDK_DTX_CGXX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CGXX_DATX(a,b) "DTX_CGXX_DATX"
+#define busnum_BDK_DTX_CGXX_DATX(a,b) (a)
+#define arguments_BDK_DTX_CGXX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_cgx#_ena#
+ *
+ * DTX CGX Data Enable Register
+ */
+union bdk_dtx_cgxx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_cgxx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cgxx_enax_s cn; */
+};
+typedef union bdk_dtx_cgxx_enax bdk_dtx_cgxx_enax_t;
+
+static inline uint64_t BDK_DTX_CGXX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CGXX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe700020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CGXX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CGXX_ENAX(a,b) bdk_dtx_cgxx_enax_t
+#define bustype_BDK_DTX_CGXX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CGXX_ENAX(a,b) "DTX_CGXX_ENAX"
+#define busnum_BDK_DTX_CGXX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_CGXX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_cgx#_sel#
+ *
+ * DTX CGX Select Register
+ */
+union bdk_dtx_cgxx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_cgxx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cgxx_selx_s cn; */
+};
+typedef union bdk_dtx_cgxx_selx bdk_dtx_cgxx_selx_t;
+
+static inline uint64_t BDK_DTX_CGXX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CGXX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe700000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CGXX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CGXX_SELX(a,b) bdk_dtx_cgxx_selx_t
+#define bustype_BDK_DTX_CGXX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CGXX_SELX(a,b) "DTX_CGXX_SELX"
+#define busnum_BDK_DTX_CGXX_SELX(a,b) (a)
+#define arguments_BDK_DTX_CGXX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_cpc_bcst_rsp
+ *
+ * DTX CPC Control Register
+ */
+union bdk_dtx_cpc_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_cpc_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpc_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_cpc_bcst_rsp bdk_dtx_cpc_bcst_rsp_t;
+
+#define BDK_DTX_CPC_BCST_RSP BDK_DTX_CPC_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_CPC_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPC_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feb68080ll;
+ __bdk_csr_fatal("DTX_CPC_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPC_BCST_RSP bdk_dtx_cpc_bcst_rsp_t
+#define bustype_BDK_DTX_CPC_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPC_BCST_RSP "DTX_CPC_BCST_RSP"
+#define busnum_BDK_DTX_CPC_BCST_RSP 0
+#define arguments_BDK_DTX_CPC_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpc_ctl
+ *
+ * DTX CPC Control Register
+ */
+union bdk_dtx_cpc_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_cpc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpc_ctl_s cn; */
+};
+typedef union bdk_dtx_cpc_ctl bdk_dtx_cpc_ctl_t;
+
+#define BDK_DTX_CPC_CTL BDK_DTX_CPC_CTL_FUNC()
+static inline uint64_t BDK_DTX_CPC_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPC_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feb68060ll;
+ __bdk_csr_fatal("DTX_CPC_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPC_CTL bdk_dtx_cpc_ctl_t
+#define bustype_BDK_DTX_CPC_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPC_CTL "DTX_CPC_CTL"
+#define busnum_BDK_DTX_CPC_CTL 0
+#define arguments_BDK_DTX_CPC_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpc_dat#
+ *
+ * DTX CPC Raw Data Register
+ */
+union bdk_dtx_cpc_datx
+{
+ uint64_t u;
+ struct bdk_dtx_cpc_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpc_datx_s cn; */
+};
+typedef union bdk_dtx_cpc_datx bdk_dtx_cpc_datx_t;
+
+static inline uint64_t BDK_DTX_CPC_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPC_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb68040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPC_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPC_DATX(a) bdk_dtx_cpc_datx_t
+#define bustype_BDK_DTX_CPC_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPC_DATX(a) "DTX_CPC_DATX"
+#define busnum_BDK_DTX_CPC_DATX(a) (a)
+#define arguments_BDK_DTX_CPC_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpc_ena#
+ *
+ * DTX CPC Data Enable Register
+ */
+union bdk_dtx_cpc_enax
+{
+ uint64_t u;
+ struct bdk_dtx_cpc_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpc_enax_s cn; */
+};
+typedef union bdk_dtx_cpc_enax bdk_dtx_cpc_enax_t;
+
+static inline uint64_t BDK_DTX_CPC_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPC_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb68020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPC_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPC_ENAX(a) bdk_dtx_cpc_enax_t
+#define bustype_BDK_DTX_CPC_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPC_ENAX(a) "DTX_CPC_ENAX"
+#define busnum_BDK_DTX_CPC_ENAX(a) (a)
+#define arguments_BDK_DTX_CPC_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpc_sel#
+ *
+ * DTX CPC Select Register
+ */
+union bdk_dtx_cpc_selx
+{
+ uint64_t u;
+ struct bdk_dtx_cpc_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpc_selx_s cn; */
+};
+typedef union bdk_dtx_cpc_selx bdk_dtx_cpc_selx_t;
+
+static inline uint64_t BDK_DTX_CPC_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPC_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb68000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPC_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPC_SELX(a) bdk_dtx_cpc_selx_t
+#define bustype_BDK_DTX_CPC_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPC_SELX(a) "DTX_CPC_SELX"
+#define busnum_BDK_DTX_CPC_SELX(a) (a)
+#define arguments_BDK_DTX_CPC_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt#_bcst_rsp
+ *
+ * DTX CPT Control Register
+ */
+union bdk_dtx_cptx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_cptx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cptx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_cptx_bcst_rsp bdk_dtx_cptx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_CPTX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPTX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb90080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPTX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPTX_BCST_RSP(a) bdk_dtx_cptx_bcst_rsp_t
+#define bustype_BDK_DTX_CPTX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPTX_BCST_RSP(a) "DTX_CPTX_BCST_RSP"
+#define busnum_BDK_DTX_CPTX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_CPTX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt#_ctl
+ *
+ * DTX CPT Control Register
+ */
+union bdk_dtx_cptx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_cptx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cptx_ctl_s cn; */
+};
+typedef union bdk_dtx_cptx_ctl bdk_dtx_cptx_ctl_t;
+
+static inline uint64_t BDK_DTX_CPTX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPTX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb90060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPTX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPTX_CTL(a) bdk_dtx_cptx_ctl_t
+#define bustype_BDK_DTX_CPTX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPTX_CTL(a) "DTX_CPTX_CTL"
+#define busnum_BDK_DTX_CPTX_CTL(a) (a)
+#define arguments_BDK_DTX_CPTX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt#_dat#
+ *
+ * DTX CPT Raw Data Register
+ */
+union bdk_dtx_cptx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_cptx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cptx_datx_s cn; */
+};
+typedef union bdk_dtx_cptx_datx bdk_dtx_cptx_datx_t;
+
+static inline uint64_t BDK_DTX_CPTX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPTX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb90040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CPTX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPTX_DATX(a,b) bdk_dtx_cptx_datx_t
+#define bustype_BDK_DTX_CPTX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPTX_DATX(a,b) "DTX_CPTX_DATX"
+#define busnum_BDK_DTX_CPTX_DATX(a,b) (a)
+#define arguments_BDK_DTX_CPTX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_cpt#_ena#
+ *
+ * DTX CPT Data Enable Register
+ */
+union bdk_dtx_cptx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_cptx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cptx_enax_s cn; */
+};
+typedef union bdk_dtx_cptx_enax bdk_dtx_cptx_enax_t;
+
+static inline uint64_t BDK_DTX_CPTX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPTX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb90020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CPTX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPTX_ENAX(a,b) bdk_dtx_cptx_enax_t
+#define bustype_BDK_DTX_CPTX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPTX_ENAX(a,b) "DTX_CPTX_ENAX"
+#define busnum_BDK_DTX_CPTX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_CPTX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_cpt#_sel#
+ *
+ * DTX CPT Select Register
+ */
+union bdk_dtx_cptx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_cptx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cptx_selx_s cn; */
+};
+typedef union bdk_dtx_cptx_selx bdk_dtx_cptx_selx_t;
+
+static inline uint64_t BDK_DTX_CPTX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPTX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb90000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_CPTX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPTX_SELX(a,b) bdk_dtx_cptx_selx_t
+#define bustype_BDK_DTX_CPTX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPTX_SELX(a,b) "DTX_CPTX_SELX"
+#define busnum_BDK_DTX_CPTX_SELX(a,b) (a)
+#define arguments_BDK_DTX_CPTX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_cpt0_bcst_rsp
+ *
+ * DTX CPT0 Control Register
+ */
+union bdk_dtx_cpt0_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_cpt0_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpt0_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_cpt0_bcst_rsp bdk_dtx_cpt0_bcst_rsp_t;
+
+#define BDK_DTX_CPT0_BCST_RSP BDK_DTX_CPT0_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_CPT0_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPT0_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0feb90080ll;
+ __bdk_csr_fatal("DTX_CPT0_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPT0_BCST_RSP bdk_dtx_cpt0_bcst_rsp_t
+#define bustype_BDK_DTX_CPT0_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPT0_BCST_RSP "DTX_CPT0_BCST_RSP"
+#define busnum_BDK_DTX_CPT0_BCST_RSP 0
+#define arguments_BDK_DTX_CPT0_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt0_ctl
+ *
+ * DTX CPT0 Control Register
+ */
+union bdk_dtx_cpt0_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_cpt0_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpt0_ctl_s cn; */
+};
+typedef union bdk_dtx_cpt0_ctl bdk_dtx_cpt0_ctl_t;
+
+#define BDK_DTX_CPT0_CTL BDK_DTX_CPT0_CTL_FUNC()
+static inline uint64_t BDK_DTX_CPT0_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPT0_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0feb90060ll;
+ __bdk_csr_fatal("DTX_CPT0_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPT0_CTL bdk_dtx_cpt0_ctl_t
+#define bustype_BDK_DTX_CPT0_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPT0_CTL "DTX_CPT0_CTL"
+#define busnum_BDK_DTX_CPT0_CTL 0
+#define arguments_BDK_DTX_CPT0_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt0_dat#
+ *
+ * DTX CPT0 Raw Data Register
+ */
+union bdk_dtx_cpt0_datx
+{
+ uint64_t u;
+ struct bdk_dtx_cpt0_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpt0_datx_s cn; */
+};
+typedef union bdk_dtx_cpt0_datx bdk_dtx_cpt0_datx_t;
+
+static inline uint64_t BDK_DTX_CPT0_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPT0_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0feb90040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPT0_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPT0_DATX(a) bdk_dtx_cpt0_datx_t
+#define bustype_BDK_DTX_CPT0_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPT0_DATX(a) "DTX_CPT0_DATX"
+#define busnum_BDK_DTX_CPT0_DATX(a) (a)
+#define arguments_BDK_DTX_CPT0_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt0_ena#
+ *
+ * DTX CPT0 Data Enable Register
+ */
+union bdk_dtx_cpt0_enax
+{
+ uint64_t u;
+ struct bdk_dtx_cpt0_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpt0_enax_s cn; */
+};
+typedef union bdk_dtx_cpt0_enax bdk_dtx_cpt0_enax_t;
+
+static inline uint64_t BDK_DTX_CPT0_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPT0_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0feb90020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPT0_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPT0_ENAX(a) bdk_dtx_cpt0_enax_t
+#define bustype_BDK_DTX_CPT0_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPT0_ENAX(a) "DTX_CPT0_ENAX"
+#define busnum_BDK_DTX_CPT0_ENAX(a) (a)
+#define arguments_BDK_DTX_CPT0_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt0_sel#
+ *
+ * DTX CPT0 Select Register
+ */
+union bdk_dtx_cpt0_selx
+{
+ uint64_t u;
+ struct bdk_dtx_cpt0_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpt0_selx_s cn; */
+};
+typedef union bdk_dtx_cpt0_selx bdk_dtx_cpt0_selx_t;
+
+static inline uint64_t BDK_DTX_CPT0_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPT0_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0feb90000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPT0_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPT0_SELX(a) bdk_dtx_cpt0_selx_t
+#define bustype_BDK_DTX_CPT0_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPT0_SELX(a) "DTX_CPT0_SELX"
+#define busnum_BDK_DTX_CPT0_SELX(a) (a)
+#define arguments_BDK_DTX_CPT0_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt_bcst_rsp
+ *
+ * DTX CPT Control Register
+ */
+union bdk_dtx_cpt_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_cpt_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpt_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_cpt_bcst_rsp bdk_dtx_cpt_bcst_rsp_t;
+
+#define BDK_DTX_CPT_BCST_RSP BDK_DTX_CPT_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_CPT_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPT_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feb90080ll;
+ __bdk_csr_fatal("DTX_CPT_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPT_BCST_RSP bdk_dtx_cpt_bcst_rsp_t
+#define bustype_BDK_DTX_CPT_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPT_BCST_RSP "DTX_CPT_BCST_RSP"
+#define busnum_BDK_DTX_CPT_BCST_RSP 0
+#define arguments_BDK_DTX_CPT_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt_ctl
+ *
+ * DTX CPT Control Register
+ */
+union bdk_dtx_cpt_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_cpt_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpt_ctl_s cn; */
+};
+typedef union bdk_dtx_cpt_ctl bdk_dtx_cpt_ctl_t;
+
+#define BDK_DTX_CPT_CTL BDK_DTX_CPT_CTL_FUNC()
+static inline uint64_t BDK_DTX_CPT_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPT_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feb90060ll;
+ __bdk_csr_fatal("DTX_CPT_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPT_CTL bdk_dtx_cpt_ctl_t
+#define bustype_BDK_DTX_CPT_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPT_CTL "DTX_CPT_CTL"
+#define busnum_BDK_DTX_CPT_CTL 0
+#define arguments_BDK_DTX_CPT_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt_dat#
+ *
+ * DTX CPT Raw Data Register
+ */
+union bdk_dtx_cpt_datx
+{
+ uint64_t u;
+ struct bdk_dtx_cpt_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpt_datx_s cn; */
+};
+typedef union bdk_dtx_cpt_datx bdk_dtx_cpt_datx_t;
+
+static inline uint64_t BDK_DTX_CPT_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPT_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb90040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPT_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPT_DATX(a) bdk_dtx_cpt_datx_t
+#define bustype_BDK_DTX_CPT_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPT_DATX(a) "DTX_CPT_DATX"
+#define busnum_BDK_DTX_CPT_DATX(a) (a)
+#define arguments_BDK_DTX_CPT_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt_ena#
+ *
+ * DTX CPT Data Enable Register
+ */
+union bdk_dtx_cpt_enax
+{
+ uint64_t u;
+ struct bdk_dtx_cpt_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpt_enax_s cn; */
+};
+typedef union bdk_dtx_cpt_enax bdk_dtx_cpt_enax_t;
+
+static inline uint64_t BDK_DTX_CPT_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPT_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb90020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPT_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPT_ENAX(a) bdk_dtx_cpt_enax_t
+#define bustype_BDK_DTX_CPT_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPT_ENAX(a) "DTX_CPT_ENAX"
+#define busnum_BDK_DTX_CPT_ENAX(a) (a)
+#define arguments_BDK_DTX_CPT_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_cpt_sel#
+ *
+ * DTX CPT Select Register
+ */
+union bdk_dtx_cpt_selx
+{
+ uint64_t u;
+ struct bdk_dtx_cpt_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_cpt_selx_s cn; */
+};
+typedef union bdk_dtx_cpt_selx bdk_dtx_cpt_selx_t;
+
+static inline uint64_t BDK_DTX_CPT_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_CPT_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb90000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_CPT_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_CPT_SELX(a) bdk_dtx_cpt_selx_t
+#define bustype_BDK_DTX_CPT_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_CPT_SELX(a) "DTX_CPT_SELX"
+#define busnum_BDK_DTX_CPT_SELX(a) (a)
+#define arguments_BDK_DTX_CPT_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dap_bcst_rsp
+ *
+ * DTX DAP Control Register
+ */
+union bdk_dtx_dap_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_dap_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dap_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_dap_bcst_rsp bdk_dtx_dap_bcst_rsp_t;
+
+#define BDK_DTX_DAP_BCST_RSP BDK_DTX_DAP_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_DAP_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DAP_BCST_RSP_FUNC(void)
+{
+ return 0x87e0fe010080ll;
+}
+
+#define typedef_BDK_DTX_DAP_BCST_RSP bdk_dtx_dap_bcst_rsp_t
+#define bustype_BDK_DTX_DAP_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DAP_BCST_RSP "DTX_DAP_BCST_RSP"
+#define busnum_BDK_DTX_DAP_BCST_RSP 0
+#define arguments_BDK_DTX_DAP_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dap_ctl
+ *
+ * DTX DAP Control Register
+ */
+union bdk_dtx_dap_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_dap_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dap_ctl_s cn; */
+};
+typedef union bdk_dtx_dap_ctl bdk_dtx_dap_ctl_t;
+
+#define BDK_DTX_DAP_CTL BDK_DTX_DAP_CTL_FUNC()
+static inline uint64_t BDK_DTX_DAP_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DAP_CTL_FUNC(void)
+{
+ return 0x87e0fe010060ll;
+}
+
+#define typedef_BDK_DTX_DAP_CTL bdk_dtx_dap_ctl_t
+#define bustype_BDK_DTX_DAP_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DAP_CTL "DTX_DAP_CTL"
+#define busnum_BDK_DTX_DAP_CTL 0
+#define arguments_BDK_DTX_DAP_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dap_dat#
+ *
+ * DTX DAP Raw Data Register
+ */
+union bdk_dtx_dap_datx
+{
+ uint64_t u;
+ struct bdk_dtx_dap_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dap_datx_s cn; */
+};
+typedef union bdk_dtx_dap_datx bdk_dtx_dap_datx_t;
+
+static inline uint64_t BDK_DTX_DAP_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DAP_DATX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe010040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DAP_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DAP_DATX(a) bdk_dtx_dap_datx_t
+#define bustype_BDK_DTX_DAP_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DAP_DATX(a) "DTX_DAP_DATX"
+#define busnum_BDK_DTX_DAP_DATX(a) (a)
+#define arguments_BDK_DTX_DAP_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dap_ena#
+ *
+ * DTX DAP Data Enable Register
+ */
+union bdk_dtx_dap_enax
+{
+ uint64_t u;
+ struct bdk_dtx_dap_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dap_enax_s cn; */
+};
+typedef union bdk_dtx_dap_enax bdk_dtx_dap_enax_t;
+
+static inline uint64_t BDK_DTX_DAP_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DAP_ENAX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe010020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DAP_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DAP_ENAX(a) bdk_dtx_dap_enax_t
+#define bustype_BDK_DTX_DAP_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DAP_ENAX(a) "DTX_DAP_ENAX"
+#define busnum_BDK_DTX_DAP_ENAX(a) (a)
+#define arguments_BDK_DTX_DAP_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dap_sel#
+ *
+ * DTX DAP Select Register
+ */
+union bdk_dtx_dap_selx
+{
+ uint64_t u;
+ struct bdk_dtx_dap_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dap_selx_s cn; */
+};
+typedef union bdk_dtx_dap_selx bdk_dtx_dap_selx_t;
+
+static inline uint64_t BDK_DTX_DAP_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DAP_SELX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe010000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DAP_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DAP_SELX(a) bdk_dtx_dap_selx_t
+#define bustype_BDK_DTX_DAP_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DAP_SELX(a) "DTX_DAP_SELX"
+#define busnum_BDK_DTX_DAP_SELX(a) (a)
+#define arguments_BDK_DTX_DAP_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ddf_bcst_rsp
+ *
+ * DTX DDF Control Register
+ */
+union bdk_dtx_ddf_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_ddf_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ddf_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_ddf_bcst_rsp bdk_dtx_ddf_bcst_rsp_t;
+
+#define BDK_DTX_DDF_BCST_RSP BDK_DTX_DDF_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_DDF_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DDF_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe848080ll;
+ __bdk_csr_fatal("DTX_DDF_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DDF_BCST_RSP bdk_dtx_ddf_bcst_rsp_t
+#define bustype_BDK_DTX_DDF_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DDF_BCST_RSP "DTX_DDF_BCST_RSP"
+#define busnum_BDK_DTX_DDF_BCST_RSP 0
+#define arguments_BDK_DTX_DDF_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ddf_ctl
+ *
+ * DTX DDF Control Register
+ */
+union bdk_dtx_ddf_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_ddf_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ddf_ctl_s cn; */
+};
+typedef union bdk_dtx_ddf_ctl bdk_dtx_ddf_ctl_t;
+
+#define BDK_DTX_DDF_CTL BDK_DTX_DDF_CTL_FUNC()
+static inline uint64_t BDK_DTX_DDF_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DDF_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe848060ll;
+ __bdk_csr_fatal("DTX_DDF_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DDF_CTL bdk_dtx_ddf_ctl_t
+#define bustype_BDK_DTX_DDF_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DDF_CTL "DTX_DDF_CTL"
+#define busnum_BDK_DTX_DDF_CTL 0
+#define arguments_BDK_DTX_DDF_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ddf_dat#
+ *
+ * DTX DDF Raw Data Register
+ */
+union bdk_dtx_ddf_datx
+{
+ uint64_t u;
+ struct bdk_dtx_ddf_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ddf_datx_s cn; */
+};
+typedef union bdk_dtx_ddf_datx bdk_dtx_ddf_datx_t;
+
+static inline uint64_t BDK_DTX_DDF_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DDF_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe848040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DDF_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DDF_DATX(a) bdk_dtx_ddf_datx_t
+#define bustype_BDK_DTX_DDF_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DDF_DATX(a) "DTX_DDF_DATX"
+#define busnum_BDK_DTX_DDF_DATX(a) (a)
+#define arguments_BDK_DTX_DDF_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ddf_ena#
+ *
+ * DTX DDF Data Enable Register
+ */
+union bdk_dtx_ddf_enax
+{
+ uint64_t u;
+ struct bdk_dtx_ddf_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ddf_enax_s cn; */
+};
+typedef union bdk_dtx_ddf_enax bdk_dtx_ddf_enax_t;
+
+static inline uint64_t BDK_DTX_DDF_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DDF_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe848020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DDF_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DDF_ENAX(a) bdk_dtx_ddf_enax_t
+#define bustype_BDK_DTX_DDF_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DDF_ENAX(a) "DTX_DDF_ENAX"
+#define busnum_BDK_DTX_DDF_ENAX(a) (a)
+#define arguments_BDK_DTX_DDF_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ddf_sel#
+ *
+ * DTX DDF Select Register
+ */
+union bdk_dtx_ddf_selx
+{
+ uint64_t u;
+ struct bdk_dtx_ddf_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ddf_selx_s cn; */
+};
+typedef union bdk_dtx_ddf_selx bdk_dtx_ddf_selx_t;
+
+static inline uint64_t BDK_DTX_DDF_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DDF_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe848000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DDF_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DDF_SELX(a) bdk_dtx_ddf_selx_t
+#define bustype_BDK_DTX_DDF_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DDF_SELX(a) "DTX_DDF_SELX"
+#define busnum_BDK_DTX_DDF_SELX(a) (a)
+#define arguments_BDK_DTX_DDF_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dfa_bcst_rsp
+ *
+ * DTX DFA Control Register
+ */
+union bdk_dtx_dfa_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_dfa_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dfa_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_dfa_bcst_rsp bdk_dtx_dfa_bcst_rsp_t;
+
+#define BDK_DTX_DFA_BCST_RSP BDK_DTX_DFA_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_DFA_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DFA_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fea30080ll;
+ __bdk_csr_fatal("DTX_DFA_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DFA_BCST_RSP bdk_dtx_dfa_bcst_rsp_t
+#define bustype_BDK_DTX_DFA_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DFA_BCST_RSP "DTX_DFA_BCST_RSP"
+#define busnum_BDK_DTX_DFA_BCST_RSP 0
+#define arguments_BDK_DTX_DFA_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dfa_ctl
+ *
+ * DTX DFA Control Register
+ */
+union bdk_dtx_dfa_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_dfa_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dfa_ctl_s cn; */
+};
+typedef union bdk_dtx_dfa_ctl bdk_dtx_dfa_ctl_t;
+
+#define BDK_DTX_DFA_CTL BDK_DTX_DFA_CTL_FUNC()
+static inline uint64_t BDK_DTX_DFA_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DFA_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fea30060ll;
+ __bdk_csr_fatal("DTX_DFA_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DFA_CTL bdk_dtx_dfa_ctl_t
+#define bustype_BDK_DTX_DFA_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DFA_CTL "DTX_DFA_CTL"
+#define busnum_BDK_DTX_DFA_CTL 0
+#define arguments_BDK_DTX_DFA_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dfa_dat#
+ *
+ * DTX DFA Raw Data Register
+ */
+union bdk_dtx_dfa_datx
+{
+ uint64_t u;
+ struct bdk_dtx_dfa_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dfa_datx_s cn; */
+};
+typedef union bdk_dtx_dfa_datx bdk_dtx_dfa_datx_t;
+
+static inline uint64_t BDK_DTX_DFA_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DFA_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fea30040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DFA_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DFA_DATX(a) bdk_dtx_dfa_datx_t
+#define bustype_BDK_DTX_DFA_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DFA_DATX(a) "DTX_DFA_DATX"
+#define busnum_BDK_DTX_DFA_DATX(a) (a)
+#define arguments_BDK_DTX_DFA_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dfa_ena#
+ *
+ * DTX DFA Data Enable Register
+ */
+union bdk_dtx_dfa_enax
+{
+ uint64_t u;
+ struct bdk_dtx_dfa_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dfa_enax_s cn; */
+};
+typedef union bdk_dtx_dfa_enax bdk_dtx_dfa_enax_t;
+
+static inline uint64_t BDK_DTX_DFA_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DFA_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fea30020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DFA_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DFA_ENAX(a) bdk_dtx_dfa_enax_t
+#define bustype_BDK_DTX_DFA_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DFA_ENAX(a) "DTX_DFA_ENAX"
+#define busnum_BDK_DTX_DFA_ENAX(a) (a)
+#define arguments_BDK_DTX_DFA_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dfa_sel#
+ *
+ * DTX DFA Select Register
+ */
+union bdk_dtx_dfa_selx
+{
+ uint64_t u;
+ struct bdk_dtx_dfa_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dfa_selx_s cn; */
+};
+typedef union bdk_dtx_dfa_selx bdk_dtx_dfa_selx_t;
+
+static inline uint64_t BDK_DTX_DFA_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DFA_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fea30000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DFA_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DFA_SELX(a) bdk_dtx_dfa_selx_t
+#define bustype_BDK_DTX_DFA_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DFA_SELX(a) "DTX_DFA_SELX"
+#define busnum_BDK_DTX_DFA_SELX(a) (a)
+#define arguments_BDK_DTX_DFA_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dpi_bcst_rsp
+ *
+ * DTX DPI Control Register
+ */
+union bdk_dtx_dpi_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_dpi_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dpi_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_dpi_bcst_rsp bdk_dtx_dpi_bcst_rsp_t;
+
+#define BDK_DTX_DPI_BCST_RSP BDK_DTX_DPI_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_DPI_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DPI_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feb70080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feb70080ll;
+ __bdk_csr_fatal("DTX_DPI_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DPI_BCST_RSP bdk_dtx_dpi_bcst_rsp_t
+#define bustype_BDK_DTX_DPI_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DPI_BCST_RSP "DTX_DPI_BCST_RSP"
+#define busnum_BDK_DTX_DPI_BCST_RSP 0
+#define arguments_BDK_DTX_DPI_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dpi_ctl
+ *
+ * DTX DPI Control Register
+ */
+union bdk_dtx_dpi_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_dpi_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dpi_ctl_s cn; */
+};
+typedef union bdk_dtx_dpi_ctl bdk_dtx_dpi_ctl_t;
+
+#define BDK_DTX_DPI_CTL BDK_DTX_DPI_CTL_FUNC()
+static inline uint64_t BDK_DTX_DPI_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DPI_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feb70060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feb70060ll;
+ __bdk_csr_fatal("DTX_DPI_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DPI_CTL bdk_dtx_dpi_ctl_t
+#define bustype_BDK_DTX_DPI_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DPI_CTL "DTX_DPI_CTL"
+#define busnum_BDK_DTX_DPI_CTL 0
+#define arguments_BDK_DTX_DPI_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dpi_dat#
+ *
+ * DTX DPI Raw Data Register
+ */
+union bdk_dtx_dpi_datx
+{
+ uint64_t u;
+ struct bdk_dtx_dpi_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dpi_datx_s cn; */
+};
+typedef union bdk_dtx_dpi_datx bdk_dtx_dpi_datx_t;
+
+static inline uint64_t BDK_DTX_DPI_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DPI_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb70040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb70040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DPI_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DPI_DATX(a) bdk_dtx_dpi_datx_t
+#define bustype_BDK_DTX_DPI_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DPI_DATX(a) "DTX_DPI_DATX"
+#define busnum_BDK_DTX_DPI_DATX(a) (a)
+#define arguments_BDK_DTX_DPI_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dpi_ena#
+ *
+ * DTX DPI Data Enable Register
+ */
+union bdk_dtx_dpi_enax
+{
+ uint64_t u;
+ struct bdk_dtx_dpi_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dpi_enax_s cn; */
+};
+typedef union bdk_dtx_dpi_enax bdk_dtx_dpi_enax_t;
+
+static inline uint64_t BDK_DTX_DPI_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DPI_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb70020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb70020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DPI_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DPI_ENAX(a) bdk_dtx_dpi_enax_t
+#define bustype_BDK_DTX_DPI_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DPI_ENAX(a) "DTX_DPI_ENAX"
+#define busnum_BDK_DTX_DPI_ENAX(a) (a)
+#define arguments_BDK_DTX_DPI_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_dpi_sel#
+ *
+ * DTX DPI Select Register
+ */
+union bdk_dtx_dpi_selx
+{
+ uint64_t u;
+ struct bdk_dtx_dpi_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_dpi_selx_s cn; */
+};
+typedef union bdk_dtx_dpi_selx bdk_dtx_dpi_selx_t;
+
+static inline uint64_t BDK_DTX_DPI_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_DPI_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb70000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb70000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_DPI_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_DPI_SELX(a) bdk_dtx_dpi_selx_t
+#define bustype_BDK_DTX_DPI_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_DPI_SELX(a) "DTX_DPI_SELX"
+#define busnum_BDK_DTX_DPI_SELX(a) (a)
+#define arguments_BDK_DTX_DPI_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_fpa_bcst_rsp
+ *
+ * DTX FPA Control Register
+ */
+union bdk_dtx_fpa_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_fpa_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_fpa_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_fpa_bcst_rsp bdk_dtx_fpa_bcst_rsp_t;
+
+#define BDK_DTX_FPA_BCST_RSP BDK_DTX_FPA_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_FPA_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_FPA_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe940080ll;
+ __bdk_csr_fatal("DTX_FPA_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_FPA_BCST_RSP bdk_dtx_fpa_bcst_rsp_t
+#define bustype_BDK_DTX_FPA_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_FPA_BCST_RSP "DTX_FPA_BCST_RSP"
+#define busnum_BDK_DTX_FPA_BCST_RSP 0
+#define arguments_BDK_DTX_FPA_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_fpa_ctl
+ *
+ * DTX FPA Control Register
+ */
+union bdk_dtx_fpa_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_fpa_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_fpa_ctl_s cn; */
+};
+typedef union bdk_dtx_fpa_ctl bdk_dtx_fpa_ctl_t;
+
+#define BDK_DTX_FPA_CTL BDK_DTX_FPA_CTL_FUNC()
+static inline uint64_t BDK_DTX_FPA_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_FPA_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe940060ll;
+ __bdk_csr_fatal("DTX_FPA_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_FPA_CTL bdk_dtx_fpa_ctl_t
+#define bustype_BDK_DTX_FPA_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_FPA_CTL "DTX_FPA_CTL"
+#define busnum_BDK_DTX_FPA_CTL 0
+#define arguments_BDK_DTX_FPA_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_fpa_dat#
+ *
+ * DTX FPA Raw Data Register
+ */
+union bdk_dtx_fpa_datx
+{
+ uint64_t u;
+ struct bdk_dtx_fpa_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_fpa_datx_s cn; */
+};
+typedef union bdk_dtx_fpa_datx bdk_dtx_fpa_datx_t;
+
+static inline uint64_t BDK_DTX_FPA_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_FPA_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe940040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_FPA_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_FPA_DATX(a) bdk_dtx_fpa_datx_t
+#define bustype_BDK_DTX_FPA_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_FPA_DATX(a) "DTX_FPA_DATX"
+#define busnum_BDK_DTX_FPA_DATX(a) (a)
+#define arguments_BDK_DTX_FPA_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_fpa_ena#
+ *
+ * DTX FPA Data Enable Register
+ */
+union bdk_dtx_fpa_enax
+{
+ uint64_t u;
+ struct bdk_dtx_fpa_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_fpa_enax_s cn; */
+};
+typedef union bdk_dtx_fpa_enax bdk_dtx_fpa_enax_t;
+
+static inline uint64_t BDK_DTX_FPA_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_FPA_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe940020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_FPA_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_FPA_ENAX(a) bdk_dtx_fpa_enax_t
+#define bustype_BDK_DTX_FPA_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_FPA_ENAX(a) "DTX_FPA_ENAX"
+#define busnum_BDK_DTX_FPA_ENAX(a) (a)
+#define arguments_BDK_DTX_FPA_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_fpa_sel#
+ *
+ * DTX FPA Select Register
+ */
+union bdk_dtx_fpa_selx
+{
+ uint64_t u;
+ struct bdk_dtx_fpa_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_fpa_selx_s cn; */
+};
+typedef union bdk_dtx_fpa_selx bdk_dtx_fpa_selx_t;
+
+static inline uint64_t BDK_DTX_FPA_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_FPA_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe940000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_FPA_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_FPA_SELX(a) bdk_dtx_fpa_selx_t
+#define bustype_BDK_DTX_FPA_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_FPA_SELX(a) "DTX_FPA_SELX"
+#define busnum_BDK_DTX_FPA_SELX(a) (a)
+#define arguments_BDK_DTX_FPA_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gic_bcst_rsp
+ *
+ * DTX GIC Control Register
+ */
+union bdk_dtx_gic_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_gic_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gic_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_gic_bcst_rsp bdk_dtx_gic_bcst_rsp_t;
+
+#define BDK_DTX_GIC_BCST_RSP BDK_DTX_GIC_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_GIC_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GIC_BCST_RSP_FUNC(void)
+{
+ return 0x87e0fe808080ll;
+}
+
+#define typedef_BDK_DTX_GIC_BCST_RSP bdk_dtx_gic_bcst_rsp_t
+#define bustype_BDK_DTX_GIC_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GIC_BCST_RSP "DTX_GIC_BCST_RSP"
+#define busnum_BDK_DTX_GIC_BCST_RSP 0
+#define arguments_BDK_DTX_GIC_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gic_ctl
+ *
+ * DTX GIC Control Register
+ */
+union bdk_dtx_gic_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_gic_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gic_ctl_s cn; */
+};
+typedef union bdk_dtx_gic_ctl bdk_dtx_gic_ctl_t;
+
+#define BDK_DTX_GIC_CTL BDK_DTX_GIC_CTL_FUNC()
+static inline uint64_t BDK_DTX_GIC_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GIC_CTL_FUNC(void)
+{
+ return 0x87e0fe808060ll;
+}
+
+#define typedef_BDK_DTX_GIC_CTL bdk_dtx_gic_ctl_t
+#define bustype_BDK_DTX_GIC_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GIC_CTL "DTX_GIC_CTL"
+#define busnum_BDK_DTX_GIC_CTL 0
+#define arguments_BDK_DTX_GIC_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gic_dat#
+ *
+ * DTX GIC Raw Data Register
+ */
+union bdk_dtx_gic_datx
+{
+ uint64_t u;
+ struct bdk_dtx_gic_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gic_datx_s cn; */
+};
+typedef union bdk_dtx_gic_datx bdk_dtx_gic_datx_t;
+
+static inline uint64_t BDK_DTX_GIC_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GIC_DATX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe808040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_GIC_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_GIC_DATX(a) bdk_dtx_gic_datx_t
+#define bustype_BDK_DTX_GIC_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GIC_DATX(a) "DTX_GIC_DATX"
+#define busnum_BDK_DTX_GIC_DATX(a) (a)
+#define arguments_BDK_DTX_GIC_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gic_ena#
+ *
+ * DTX GIC Data Enable Register
+ */
+union bdk_dtx_gic_enax
+{
+ uint64_t u;
+ struct bdk_dtx_gic_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gic_enax_s cn; */
+};
+typedef union bdk_dtx_gic_enax bdk_dtx_gic_enax_t;
+
+static inline uint64_t BDK_DTX_GIC_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GIC_ENAX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe808020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_GIC_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_GIC_ENAX(a) bdk_dtx_gic_enax_t
+#define bustype_BDK_DTX_GIC_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GIC_ENAX(a) "DTX_GIC_ENAX"
+#define busnum_BDK_DTX_GIC_ENAX(a) (a)
+#define arguments_BDK_DTX_GIC_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gic_sel#
+ *
+ * DTX GIC Select Register
+ */
+union bdk_dtx_gic_selx
+{
+ uint64_t u;
+ struct bdk_dtx_gic_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gic_selx_s cn; */
+};
+typedef union bdk_dtx_gic_selx bdk_dtx_gic_selx_t;
+
+static inline uint64_t BDK_DTX_GIC_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GIC_SELX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe808000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_GIC_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_GIC_SELX(a) bdk_dtx_gic_selx_t
+#define bustype_BDK_DTX_GIC_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GIC_SELX(a) "DTX_GIC_SELX"
+#define busnum_BDK_DTX_GIC_SELX(a) (a)
+#define arguments_BDK_DTX_GIC_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gser#_bcst_rsp
+ *
+ * DTX GSER Control Register
+ */
+union bdk_dtx_gserx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_gserx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gserx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_gserx_bcst_rsp bdk_dtx_gserx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_GSERX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GSERX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0fe480080ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0fe480080ll + 0x8000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0fe480080ll + 0x8000ll * ((a) & 0xf);
+ __bdk_csr_fatal("DTX_GSERX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_GSERX_BCST_RSP(a) bdk_dtx_gserx_bcst_rsp_t
+#define bustype_BDK_DTX_GSERX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GSERX_BCST_RSP(a) "DTX_GSERX_BCST_RSP"
+#define busnum_BDK_DTX_GSERX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_GSERX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gser#_ctl
+ *
+ * DTX GSER Control Register
+ */
+union bdk_dtx_gserx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_gserx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gserx_ctl_s cn; */
+};
+typedef union bdk_dtx_gserx_ctl bdk_dtx_gserx_ctl_t;
+
+static inline uint64_t BDK_DTX_GSERX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GSERX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0fe480060ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0fe480060ll + 0x8000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0fe480060ll + 0x8000ll * ((a) & 0xf);
+ __bdk_csr_fatal("DTX_GSERX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_GSERX_CTL(a) bdk_dtx_gserx_ctl_t
+#define bustype_BDK_DTX_GSERX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GSERX_CTL(a) "DTX_GSERX_CTL"
+#define busnum_BDK_DTX_GSERX_CTL(a) (a)
+#define arguments_BDK_DTX_GSERX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gser#_dat#
+ *
+ * DTX GSER Raw Data Register
+ */
+union bdk_dtx_gserx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_gserx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gserx_datx_s cn; */
+};
+typedef union bdk_dtx_gserx_datx bdk_dtx_gserx_datx_t;
+
+static inline uint64_t BDK_DTX_GSERX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GSERX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe480040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e0fe480040ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e0fe480040ll + 0x8000ll * ((a) & 0xf) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_GSERX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_GSERX_DATX(a,b) bdk_dtx_gserx_datx_t
+#define bustype_BDK_DTX_GSERX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GSERX_DATX(a,b) "DTX_GSERX_DATX"
+#define busnum_BDK_DTX_GSERX_DATX(a,b) (a)
+#define arguments_BDK_DTX_GSERX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_gser#_ena#
+ *
+ * DTX GSER Data Enable Register
+ */
+union bdk_dtx_gserx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_gserx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gserx_enax_s cn; */
+};
+typedef union bdk_dtx_gserx_enax bdk_dtx_gserx_enax_t;
+
+static inline uint64_t BDK_DTX_GSERX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GSERX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe480020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e0fe480020ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e0fe480020ll + 0x8000ll * ((a) & 0xf) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_GSERX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_GSERX_ENAX(a,b) bdk_dtx_gserx_enax_t
+#define bustype_BDK_DTX_GSERX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GSERX_ENAX(a,b) "DTX_GSERX_ENAX"
+#define busnum_BDK_DTX_GSERX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_GSERX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_gser#_sel#
+ *
+ * DTX GSER Select Register
+ */
+union bdk_dtx_gserx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_gserx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gserx_selx_s cn; */
+};
+typedef union bdk_dtx_gserx_selx bdk_dtx_gserx_selx_t;
+
+static inline uint64_t BDK_DTX_GSERX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GSERX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe480000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e0fe480000ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e0fe480000ll + 0x8000ll * ((a) & 0xf) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_GSERX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_GSERX_SELX(a,b) bdk_dtx_gserx_selx_t
+#define bustype_BDK_DTX_GSERX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GSERX_SELX(a,b) "DTX_GSERX_SELX"
+#define busnum_BDK_DTX_GSERX_SELX(a,b) (a)
+#define arguments_BDK_DTX_GSERX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_gti_bcst_rsp
+ *
+ * DTX GTI Control Register
+ */
+union bdk_dtx_gti_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_gti_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gti_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_gti_bcst_rsp bdk_dtx_gti_bcst_rsp_t;
+
+#define BDK_DTX_GTI_BCST_RSP BDK_DTX_GTI_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_GTI_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GTI_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fea20080ll;
+ __bdk_csr_fatal("DTX_GTI_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_GTI_BCST_RSP bdk_dtx_gti_bcst_rsp_t
+#define bustype_BDK_DTX_GTI_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GTI_BCST_RSP "DTX_GTI_BCST_RSP"
+#define busnum_BDK_DTX_GTI_BCST_RSP 0
+#define arguments_BDK_DTX_GTI_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gti_ctl
+ *
+ * DTX GTI Control Register
+ */
+union bdk_dtx_gti_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_gti_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gti_ctl_s cn; */
+};
+typedef union bdk_dtx_gti_ctl bdk_dtx_gti_ctl_t;
+
+#define BDK_DTX_GTI_CTL BDK_DTX_GTI_CTL_FUNC()
+static inline uint64_t BDK_DTX_GTI_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GTI_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fea20060ll;
+ __bdk_csr_fatal("DTX_GTI_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_GTI_CTL bdk_dtx_gti_ctl_t
+#define bustype_BDK_DTX_GTI_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GTI_CTL "DTX_GTI_CTL"
+#define busnum_BDK_DTX_GTI_CTL 0
+#define arguments_BDK_DTX_GTI_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gti_dat#
+ *
+ * DTX GTI Raw Data Register
+ */
+union bdk_dtx_gti_datx
+{
+ uint64_t u;
+ struct bdk_dtx_gti_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gti_datx_s cn; */
+};
+typedef union bdk_dtx_gti_datx bdk_dtx_gti_datx_t;
+
+static inline uint64_t BDK_DTX_GTI_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GTI_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fea20040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_GTI_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_GTI_DATX(a) bdk_dtx_gti_datx_t
+#define bustype_BDK_DTX_GTI_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GTI_DATX(a) "DTX_GTI_DATX"
+#define busnum_BDK_DTX_GTI_DATX(a) (a)
+#define arguments_BDK_DTX_GTI_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gti_ena#
+ *
+ * DTX GTI Data Enable Register
+ */
+union bdk_dtx_gti_enax
+{
+ uint64_t u;
+ struct bdk_dtx_gti_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gti_enax_s cn; */
+};
+typedef union bdk_dtx_gti_enax bdk_dtx_gti_enax_t;
+
+static inline uint64_t BDK_DTX_GTI_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GTI_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fea20020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_GTI_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_GTI_ENAX(a) bdk_dtx_gti_enax_t
+#define bustype_BDK_DTX_GTI_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GTI_ENAX(a) "DTX_GTI_ENAX"
+#define busnum_BDK_DTX_GTI_ENAX(a) (a)
+#define arguments_BDK_DTX_GTI_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_gti_sel#
+ *
+ * DTX GTI Select Register
+ */
+union bdk_dtx_gti_selx
+{
+ uint64_t u;
+ struct bdk_dtx_gti_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_gti_selx_s cn; */
+};
+typedef union bdk_dtx_gti_selx bdk_dtx_gti_selx_t;
+
+static inline uint64_t BDK_DTX_GTI_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_GTI_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fea20000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_GTI_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_GTI_SELX(a) bdk_dtx_gti_selx_t
+#define bustype_BDK_DTX_GTI_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_GTI_SELX(a) "DTX_GTI_SELX"
+#define busnum_BDK_DTX_GTI_SELX(a) (a)
+#define arguments_BDK_DTX_GTI_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_iobn#_bcst_rsp
+ *
+ * DTX IOBN Control Register
+ */
+union bdk_dtx_iobnx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_iobnx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_iobnx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_iobnx_bcst_rsp bdk_dtx_iobnx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_IOBNX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_IOBNX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe780080ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe780080ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe780080ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe780080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_IOBNX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_IOBNX_BCST_RSP(a) bdk_dtx_iobnx_bcst_rsp_t
+#define bustype_BDK_DTX_IOBNX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_IOBNX_BCST_RSP(a) "DTX_IOBNX_BCST_RSP"
+#define busnum_BDK_DTX_IOBNX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_IOBNX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_iobn#_ctl
+ *
+ * DTX IOBN Control Register
+ */
+union bdk_dtx_iobnx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_iobnx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_iobnx_ctl_s cn; */
+};
+typedef union bdk_dtx_iobnx_ctl bdk_dtx_iobnx_ctl_t;
+
+static inline uint64_t BDK_DTX_IOBNX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_IOBNX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe780060ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe780060ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe780060ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe780060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_IOBNX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_IOBNX_CTL(a) bdk_dtx_iobnx_ctl_t
+#define bustype_BDK_DTX_IOBNX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_IOBNX_CTL(a) "DTX_IOBNX_CTL"
+#define busnum_BDK_DTX_IOBNX_CTL(a) (a)
+#define arguments_BDK_DTX_IOBNX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_iobn#_dat#
+ *
+ * DTX IOBN Raw Data Register
+ */
+union bdk_dtx_iobnx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_iobnx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_iobnx_datx_s cn; */
+};
+typedef union bdk_dtx_iobnx_datx bdk_dtx_iobnx_datx_t;
+
+static inline uint64_t BDK_DTX_IOBNX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_IOBNX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe780040ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe780040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe780040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe780040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_IOBNX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_IOBNX_DATX(a,b) bdk_dtx_iobnx_datx_t
+#define bustype_BDK_DTX_IOBNX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_IOBNX_DATX(a,b) "DTX_IOBNX_DATX"
+#define busnum_BDK_DTX_IOBNX_DATX(a,b) (a)
+#define arguments_BDK_DTX_IOBNX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_iobn#_ena#
+ *
+ * DTX IOBN Data Enable Register
+ */
+union bdk_dtx_iobnx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_iobnx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_iobnx_enax_s cn; */
+};
+typedef union bdk_dtx_iobnx_enax bdk_dtx_iobnx_enax_t;
+
+static inline uint64_t BDK_DTX_IOBNX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_IOBNX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe780020ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe780020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe780020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe780020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_IOBNX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_IOBNX_ENAX(a,b) bdk_dtx_iobnx_enax_t
+#define bustype_BDK_DTX_IOBNX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_IOBNX_ENAX(a,b) "DTX_IOBNX_ENAX"
+#define busnum_BDK_DTX_IOBNX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_IOBNX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_iobn#_inrx_bcst_rsp
+ *
+ * DTX IOBN_INRX Control Register
+ */
+union bdk_dtx_iobnx_inrx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_iobnx_inrx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_iobnx_inrx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_iobnx_inrx_bcst_rsp bdk_dtx_iobnx_inrx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_IOBNX_INRX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_IOBNX_INRX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe7a0080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_IOBNX_INRX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_IOBNX_INRX_BCST_RSP(a) bdk_dtx_iobnx_inrx_bcst_rsp_t
+#define bustype_BDK_DTX_IOBNX_INRX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_IOBNX_INRX_BCST_RSP(a) "DTX_IOBNX_INRX_BCST_RSP"
+#define busnum_BDK_DTX_IOBNX_INRX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_IOBNX_INRX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_iobn#_inrx_ctl
+ *
+ * DTX IOBN_INRX Control Register
+ */
+union bdk_dtx_iobnx_inrx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_iobnx_inrx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_iobnx_inrx_ctl_s cn; */
+};
+typedef union bdk_dtx_iobnx_inrx_ctl bdk_dtx_iobnx_inrx_ctl_t;
+
+static inline uint64_t BDK_DTX_IOBNX_INRX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_IOBNX_INRX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe7a0060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_IOBNX_INRX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_IOBNX_INRX_CTL(a) bdk_dtx_iobnx_inrx_ctl_t
+#define bustype_BDK_DTX_IOBNX_INRX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_IOBNX_INRX_CTL(a) "DTX_IOBNX_INRX_CTL"
+#define busnum_BDK_DTX_IOBNX_INRX_CTL(a) (a)
+#define arguments_BDK_DTX_IOBNX_INRX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_iobn#_inrx_dat#
+ *
+ * DTX IOBN_INRX Raw Data Register
+ */
+union bdk_dtx_iobnx_inrx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_iobnx_inrx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_iobnx_inrx_datx_s cn; */
+};
+typedef union bdk_dtx_iobnx_inrx_datx bdk_dtx_iobnx_inrx_datx_t;
+
+static inline uint64_t BDK_DTX_IOBNX_INRX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_IOBNX_INRX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe7a0040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_IOBNX_INRX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_IOBNX_INRX_DATX(a,b) bdk_dtx_iobnx_inrx_datx_t
+#define bustype_BDK_DTX_IOBNX_INRX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_IOBNX_INRX_DATX(a,b) "DTX_IOBNX_INRX_DATX"
+#define busnum_BDK_DTX_IOBNX_INRX_DATX(a,b) (a)
+#define arguments_BDK_DTX_IOBNX_INRX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_iobn#_inrx_ena#
+ *
+ * DTX IOBN_INRX Data Enable Register
+ */
+union bdk_dtx_iobnx_inrx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_iobnx_inrx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_iobnx_inrx_enax_s cn; */
+};
+typedef union bdk_dtx_iobnx_inrx_enax bdk_dtx_iobnx_inrx_enax_t;
+
+static inline uint64_t BDK_DTX_IOBNX_INRX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_IOBNX_INRX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe7a0020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_IOBNX_INRX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_IOBNX_INRX_ENAX(a,b) bdk_dtx_iobnx_inrx_enax_t
+#define bustype_BDK_DTX_IOBNX_INRX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_IOBNX_INRX_ENAX(a,b) "DTX_IOBNX_INRX_ENAX"
+#define busnum_BDK_DTX_IOBNX_INRX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_IOBNX_INRX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_iobn#_inrx_sel#
+ *
+ * DTX IOBN_INRX Select Register
+ */
+union bdk_dtx_iobnx_inrx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_iobnx_inrx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_iobnx_inrx_selx_s cn; */
+};
+typedef union bdk_dtx_iobnx_inrx_selx bdk_dtx_iobnx_inrx_selx_t;
+
+static inline uint64_t BDK_DTX_IOBNX_INRX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_IOBNX_INRX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe7a0000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_IOBNX_INRX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_IOBNX_INRX_SELX(a,b) bdk_dtx_iobnx_inrx_selx_t
+#define bustype_BDK_DTX_IOBNX_INRX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_IOBNX_INRX_SELX(a,b) "DTX_IOBNX_INRX_SELX"
+#define busnum_BDK_DTX_IOBNX_INRX_SELX(a,b) (a)
+#define arguments_BDK_DTX_IOBNX_INRX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_iobn#_sel#
+ *
+ * DTX IOBN Select Register
+ */
+union bdk_dtx_iobnx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_iobnx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_iobnx_selx_s cn; */
+};
+typedef union bdk_dtx_iobnx_selx bdk_dtx_iobnx_selx_t;
+
+static inline uint64_t BDK_DTX_IOBNX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_IOBNX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe780000ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe780000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe780000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe780000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_IOBNX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_IOBNX_SELX(a,b) bdk_dtx_iobnx_selx_t
+#define bustype_BDK_DTX_IOBNX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_IOBNX_SELX(a,b) "DTX_IOBNX_SELX"
+#define busnum_BDK_DTX_IOBNX_SELX(a,b) (a)
+#define arguments_BDK_DTX_IOBNX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_key_bcst_rsp
+ *
+ * DTX KEY Control Register
+ */
+union bdk_dtx_key_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_key_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_key_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_key_bcst_rsp bdk_dtx_key_bcst_rsp_t;
+
+#define BDK_DTX_KEY_BCST_RSP BDK_DTX_KEY_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_KEY_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_KEY_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e0fe208080ll;
+ __bdk_csr_fatal("DTX_KEY_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_KEY_BCST_RSP bdk_dtx_key_bcst_rsp_t
+#define bustype_BDK_DTX_KEY_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_KEY_BCST_RSP "DTX_KEY_BCST_RSP"
+#define busnum_BDK_DTX_KEY_BCST_RSP 0
+#define arguments_BDK_DTX_KEY_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_key_ctl
+ *
+ * DTX KEY Control Register
+ */
+union bdk_dtx_key_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_key_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_key_ctl_s cn; */
+};
+typedef union bdk_dtx_key_ctl bdk_dtx_key_ctl_t;
+
+#define BDK_DTX_KEY_CTL BDK_DTX_KEY_CTL_FUNC()
+static inline uint64_t BDK_DTX_KEY_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_KEY_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e0fe208060ll;
+ __bdk_csr_fatal("DTX_KEY_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_KEY_CTL bdk_dtx_key_ctl_t
+#define bustype_BDK_DTX_KEY_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_KEY_CTL "DTX_KEY_CTL"
+#define busnum_BDK_DTX_KEY_CTL 0
+#define arguments_BDK_DTX_KEY_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_key_dat#
+ *
+ * DTX KEY Raw Data Register
+ */
+union bdk_dtx_key_datx
+{
+ uint64_t u;
+ struct bdk_dtx_key_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_key_datx_s cn; */
+};
+typedef union bdk_dtx_key_datx bdk_dtx_key_datx_t;
+
+static inline uint64_t BDK_DTX_KEY_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_KEY_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=1))
+ return 0x87e0fe208040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_KEY_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_KEY_DATX(a) bdk_dtx_key_datx_t
+#define bustype_BDK_DTX_KEY_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_KEY_DATX(a) "DTX_KEY_DATX"
+#define busnum_BDK_DTX_KEY_DATX(a) (a)
+#define arguments_BDK_DTX_KEY_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_key_ena#
+ *
+ * DTX KEY Data Enable Register
+ */
+union bdk_dtx_key_enax
+{
+ uint64_t u;
+ struct bdk_dtx_key_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_key_enax_s cn; */
+};
+typedef union bdk_dtx_key_enax bdk_dtx_key_enax_t;
+
+static inline uint64_t BDK_DTX_KEY_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_KEY_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=1))
+ return 0x87e0fe208020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_KEY_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_KEY_ENAX(a) bdk_dtx_key_enax_t
+#define bustype_BDK_DTX_KEY_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_KEY_ENAX(a) "DTX_KEY_ENAX"
+#define busnum_BDK_DTX_KEY_ENAX(a) (a)
+#define arguments_BDK_DTX_KEY_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_key_sel#
+ *
+ * DTX KEY Select Register
+ */
+union bdk_dtx_key_selx
+{
+ uint64_t u;
+ struct bdk_dtx_key_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_key_selx_s cn; */
+};
+typedef union bdk_dtx_key_selx bdk_dtx_key_selx_t;
+
+static inline uint64_t BDK_DTX_KEY_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_KEY_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=1))
+ return 0x87e0fe208000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_KEY_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_KEY_SELX(a) bdk_dtx_key_selx_t
+#define bustype_BDK_DTX_KEY_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_KEY_SELX(a) "DTX_KEY_SELX"
+#define busnum_BDK_DTX_KEY_SELX(a) (a)
+#define arguments_BDK_DTX_KEY_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_cbc#_bcst_rsp
+ *
+ * DTX L2C_CBC Control Register
+ */
+union bdk_dtx_l2c_cbcx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_cbcx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_cbcx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_l2c_cbcx_bcst_rsp bdk_dtx_l2c_cbcx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_L2C_CBCX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_CBCX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe2c0080ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe2c0080ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0fe2c0080ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_L2C_CBCX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_CBCX_BCST_RSP(a) bdk_dtx_l2c_cbcx_bcst_rsp_t
+#define bustype_BDK_DTX_L2C_CBCX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_CBCX_BCST_RSP(a) "DTX_L2C_CBCX_BCST_RSP"
+#define busnum_BDK_DTX_L2C_CBCX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_L2C_CBCX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_cbc#_ctl
+ *
+ * DTX L2C_CBC Control Register
+ */
+union bdk_dtx_l2c_cbcx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_cbcx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_cbcx_ctl_s cn; */
+};
+typedef union bdk_dtx_l2c_cbcx_ctl bdk_dtx_l2c_cbcx_ctl_t;
+
+static inline uint64_t BDK_DTX_L2C_CBCX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_CBCX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe2c0060ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe2c0060ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0fe2c0060ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_L2C_CBCX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_CBCX_CTL(a) bdk_dtx_l2c_cbcx_ctl_t
+#define bustype_BDK_DTX_L2C_CBCX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_CBCX_CTL(a) "DTX_L2C_CBCX_CTL"
+#define busnum_BDK_DTX_L2C_CBCX_CTL(a) (a)
+#define arguments_BDK_DTX_L2C_CBCX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_cbc#_dat#
+ *
+ * DTX L2C_CBC Raw Data Register
+ */
+union bdk_dtx_l2c_cbcx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_cbcx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_cbcx_datx_s cn; */
+};
+typedef union bdk_dtx_l2c_cbcx_datx bdk_dtx_l2c_cbcx_datx_t;
+
+static inline uint64_t BDK_DTX_L2C_CBCX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_CBCX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe2c0040ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe2c0040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe2c0040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_L2C_CBCX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_CBCX_DATX(a,b) bdk_dtx_l2c_cbcx_datx_t
+#define bustype_BDK_DTX_L2C_CBCX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_CBCX_DATX(a,b) "DTX_L2C_CBCX_DATX"
+#define busnum_BDK_DTX_L2C_CBCX_DATX(a,b) (a)
+#define arguments_BDK_DTX_L2C_CBCX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_cbc#_ena#
+ *
+ * DTX L2C_CBC Data Enable Register
+ */
+union bdk_dtx_l2c_cbcx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_cbcx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_cbcx_enax_s cn; */
+};
+typedef union bdk_dtx_l2c_cbcx_enax bdk_dtx_l2c_cbcx_enax_t;
+
+static inline uint64_t BDK_DTX_L2C_CBCX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_CBCX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe2c0020ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe2c0020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe2c0020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_L2C_CBCX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_CBCX_ENAX(a,b) bdk_dtx_l2c_cbcx_enax_t
+#define bustype_BDK_DTX_L2C_CBCX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_CBCX_ENAX(a,b) "DTX_L2C_CBCX_ENAX"
+#define busnum_BDK_DTX_L2C_CBCX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_L2C_CBCX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_cbc#_sel#
+ *
+ * DTX L2C_CBC Select Register
+ */
+union bdk_dtx_l2c_cbcx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_cbcx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_cbcx_selx_s cn; */
+};
+typedef union bdk_dtx_l2c_cbcx_selx bdk_dtx_l2c_cbcx_selx_t;
+
+static inline uint64_t BDK_DTX_L2C_CBCX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_CBCX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe2c0000ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe2c0000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe2c0000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_L2C_CBCX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_CBCX_SELX(a,b) bdk_dtx_l2c_cbcx_selx_t
+#define bustype_BDK_DTX_L2C_CBCX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_CBCX_SELX(a,b) "DTX_L2C_CBCX_SELX"
+#define busnum_BDK_DTX_L2C_CBCX_SELX(a,b) (a)
+#define arguments_BDK_DTX_L2C_CBCX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_mci#_bcst_rsp
+ *
+ * DTX L2C_MCI Control Register
+ */
+union bdk_dtx_l2c_mcix_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_mcix_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_mcix_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_l2c_mcix_bcst_rsp bdk_dtx_l2c_mcix_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_L2C_MCIX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_MCIX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe2e0080ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0fe2e0080ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0fe2e0080ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_L2C_MCIX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_MCIX_BCST_RSP(a) bdk_dtx_l2c_mcix_bcst_rsp_t
+#define bustype_BDK_DTX_L2C_MCIX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_MCIX_BCST_RSP(a) "DTX_L2C_MCIX_BCST_RSP"
+#define busnum_BDK_DTX_L2C_MCIX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_L2C_MCIX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_mci#_ctl
+ *
+ * DTX L2C_MCI Control Register
+ */
+union bdk_dtx_l2c_mcix_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_mcix_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_mcix_ctl_s cn; */
+};
+typedef union bdk_dtx_l2c_mcix_ctl bdk_dtx_l2c_mcix_ctl_t;
+
+static inline uint64_t BDK_DTX_L2C_MCIX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_MCIX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe2e0060ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=2))
+ return 0x87e0fe2e0060ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0fe2e0060ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_L2C_MCIX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_MCIX_CTL(a) bdk_dtx_l2c_mcix_ctl_t
+#define bustype_BDK_DTX_L2C_MCIX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_MCIX_CTL(a) "DTX_L2C_MCIX_CTL"
+#define busnum_BDK_DTX_L2C_MCIX_CTL(a) (a)
+#define arguments_BDK_DTX_L2C_MCIX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_mci#_dat#
+ *
+ * DTX L2C_MCI Raw Data Register
+ */
+union bdk_dtx_l2c_mcix_datx
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_mcix_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_mcix_datx_s cn; */
+};
+typedef union bdk_dtx_l2c_mcix_datx bdk_dtx_l2c_mcix_datx_t;
+
+static inline uint64_t BDK_DTX_L2C_MCIX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_MCIX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe2e0040ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe2e0040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe2e0040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_L2C_MCIX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_MCIX_DATX(a,b) bdk_dtx_l2c_mcix_datx_t
+#define bustype_BDK_DTX_L2C_MCIX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_MCIX_DATX(a,b) "DTX_L2C_MCIX_DATX"
+#define busnum_BDK_DTX_L2C_MCIX_DATX(a,b) (a)
+#define arguments_BDK_DTX_L2C_MCIX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_mci#_ena#
+ *
+ * DTX L2C_MCI Data Enable Register
+ */
+union bdk_dtx_l2c_mcix_enax
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_mcix_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_mcix_enax_s cn; */
+};
+typedef union bdk_dtx_l2c_mcix_enax bdk_dtx_l2c_mcix_enax_t;
+
+static inline uint64_t BDK_DTX_L2C_MCIX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_MCIX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe2e0020ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe2e0020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe2e0020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_L2C_MCIX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_MCIX_ENAX(a,b) bdk_dtx_l2c_mcix_enax_t
+#define bustype_BDK_DTX_L2C_MCIX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_MCIX_ENAX(a,b) "DTX_L2C_MCIX_ENAX"
+#define busnum_BDK_DTX_L2C_MCIX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_L2C_MCIX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_mci#_sel#
+ *
+ * DTX L2C_MCI Select Register
+ */
+union bdk_dtx_l2c_mcix_selx
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_mcix_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_mcix_selx_s cn; */
+};
+typedef union bdk_dtx_l2c_mcix_selx bdk_dtx_l2c_mcix_selx_t;
+
+static inline uint64_t BDK_DTX_L2C_MCIX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_MCIX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe2e0000ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe2e0000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe2e0000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_L2C_MCIX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_MCIX_SELX(a,b) bdk_dtx_l2c_mcix_selx_t
+#define bustype_BDK_DTX_L2C_MCIX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_MCIX_SELX(a,b) "DTX_L2C_MCIX_SELX"
+#define busnum_BDK_DTX_L2C_MCIX_SELX(a,b) (a)
+#define arguments_BDK_DTX_L2C_MCIX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_tad#_bcst_rsp
+ *
+ * DTX L2C_TAD Control Register
+ */
+union bdk_dtx_l2c_tadx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_tadx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_tadx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_l2c_tadx_bcst_rsp bdk_dtx_l2c_tadx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_L2C_TADX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_TADX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe280080ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0fe280080ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e0fe280080ll + 0x8000ll * ((a) & 0x7);
+ __bdk_csr_fatal("DTX_L2C_TADX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_TADX_BCST_RSP(a) bdk_dtx_l2c_tadx_bcst_rsp_t
+#define bustype_BDK_DTX_L2C_TADX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_TADX_BCST_RSP(a) "DTX_L2C_TADX_BCST_RSP"
+#define busnum_BDK_DTX_L2C_TADX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_L2C_TADX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_tad#_ctl
+ *
+ * DTX L2C_TAD Control Register
+ */
+union bdk_dtx_l2c_tadx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_tadx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_tadx_ctl_s cn; */
+};
+typedef union bdk_dtx_l2c_tadx_ctl bdk_dtx_l2c_tadx_ctl_t;
+
+static inline uint64_t BDK_DTX_L2C_TADX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_TADX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe280060ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0fe280060ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=7))
+ return 0x87e0fe280060ll + 0x8000ll * ((a) & 0x7);
+ __bdk_csr_fatal("DTX_L2C_TADX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_TADX_CTL(a) bdk_dtx_l2c_tadx_ctl_t
+#define bustype_BDK_DTX_L2C_TADX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_TADX_CTL(a) "DTX_L2C_TADX_CTL"
+#define busnum_BDK_DTX_L2C_TADX_CTL(a) (a)
+#define arguments_BDK_DTX_L2C_TADX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_tad#_dat#
+ *
+ * DTX L2C_TAD Raw Data Register
+ */
+union bdk_dtx_l2c_tadx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_tadx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_tadx_datx_s cn; */
+};
+typedef union bdk_dtx_l2c_tadx_datx bdk_dtx_l2c_tadx_datx_t;
+
+static inline uint64_t BDK_DTX_L2C_TADX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_TADX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe280040ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe280040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=7) && (b<=1)))
+ return 0x87e0fe280040ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_L2C_TADX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_TADX_DATX(a,b) bdk_dtx_l2c_tadx_datx_t
+#define bustype_BDK_DTX_L2C_TADX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_TADX_DATX(a,b) "DTX_L2C_TADX_DATX"
+#define busnum_BDK_DTX_L2C_TADX_DATX(a,b) (a)
+#define arguments_BDK_DTX_L2C_TADX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_tad#_ena#
+ *
+ * DTX L2C_TAD Data Enable Register
+ */
+union bdk_dtx_l2c_tadx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_tadx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_tadx_enax_s cn; */
+};
+typedef union bdk_dtx_l2c_tadx_enax bdk_dtx_l2c_tadx_enax_t;
+
+static inline uint64_t BDK_DTX_L2C_TADX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_TADX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe280020ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe280020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=7) && (b<=1)))
+ return 0x87e0fe280020ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_L2C_TADX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_TADX_ENAX(a,b) bdk_dtx_l2c_tadx_enax_t
+#define bustype_BDK_DTX_L2C_TADX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_TADX_ENAX(a,b) "DTX_L2C_TADX_ENAX"
+#define busnum_BDK_DTX_L2C_TADX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_L2C_TADX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_l2c_tad#_sel#
+ *
+ * DTX L2C_TAD Select Register
+ */
+union bdk_dtx_l2c_tadx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_l2c_tadx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_l2c_tadx_selx_s cn; */
+};
+typedef union bdk_dtx_l2c_tadx_selx bdk_dtx_l2c_tadx_selx_t;
+
+static inline uint64_t BDK_DTX_L2C_TADX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_L2C_TADX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe280000ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe280000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=7) && (b<=1)))
+ return 0x87e0fe280000ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_L2C_TADX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_L2C_TADX_SELX(a,b) bdk_dtx_l2c_tadx_selx_t
+#define bustype_BDK_DTX_L2C_TADX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_L2C_TADX_SELX(a,b) "DTX_L2C_TADX_SELX"
+#define busnum_BDK_DTX_L2C_TADX_SELX(a,b) (a)
+#define arguments_BDK_DTX_L2C_TADX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_lbk_bcst_rsp
+ *
+ * DTX LBK Control Register
+ */
+union bdk_dtx_lbk_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_lbk_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_lbk_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_lbk_bcst_rsp bdk_dtx_lbk_bcst_rsp_t;
+
+#define BDK_DTX_LBK_BCST_RSP BDK_DTX_LBK_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_LBK_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_LBK_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0fe0c0080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe0c0080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe0c0080ll;
+ __bdk_csr_fatal("DTX_LBK_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_LBK_BCST_RSP bdk_dtx_lbk_bcst_rsp_t
+#define bustype_BDK_DTX_LBK_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_LBK_BCST_RSP "DTX_LBK_BCST_RSP"
+#define busnum_BDK_DTX_LBK_BCST_RSP 0
+#define arguments_BDK_DTX_LBK_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_lbk_ctl
+ *
+ * DTX LBK Control Register
+ */
+union bdk_dtx_lbk_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_lbk_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_lbk_ctl_s cn; */
+};
+typedef union bdk_dtx_lbk_ctl bdk_dtx_lbk_ctl_t;
+
+#define BDK_DTX_LBK_CTL BDK_DTX_LBK_CTL_FUNC()
+static inline uint64_t BDK_DTX_LBK_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_LBK_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0fe0c0060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe0c0060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe0c0060ll;
+ __bdk_csr_fatal("DTX_LBK_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_LBK_CTL bdk_dtx_lbk_ctl_t
+#define bustype_BDK_DTX_LBK_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_LBK_CTL "DTX_LBK_CTL"
+#define busnum_BDK_DTX_LBK_CTL 0
+#define arguments_BDK_DTX_LBK_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_lbk_dat#
+ *
+ * DTX LBK Raw Data Register
+ */
+union bdk_dtx_lbk_datx
+{
+ uint64_t u;
+ struct bdk_dtx_lbk_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_lbk_datx_s cn; */
+};
+typedef union bdk_dtx_lbk_datx bdk_dtx_lbk_datx_t;
+
+static inline uint64_t BDK_DTX_LBK_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_LBK_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe0c0040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe0c0040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe0c0040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_LBK_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_LBK_DATX(a) bdk_dtx_lbk_datx_t
+#define bustype_BDK_DTX_LBK_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_LBK_DATX(a) "DTX_LBK_DATX"
+#define busnum_BDK_DTX_LBK_DATX(a) (a)
+#define arguments_BDK_DTX_LBK_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_lbk_ena#
+ *
+ * DTX LBK Data Enable Register
+ */
+union bdk_dtx_lbk_enax
+{
+ uint64_t u;
+ struct bdk_dtx_lbk_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_lbk_enax_s cn; */
+};
+typedef union bdk_dtx_lbk_enax bdk_dtx_lbk_enax_t;
+
+static inline uint64_t BDK_DTX_LBK_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_LBK_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe0c0020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe0c0020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe0c0020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_LBK_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_LBK_ENAX(a) bdk_dtx_lbk_enax_t
+#define bustype_BDK_DTX_LBK_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_LBK_ENAX(a) "DTX_LBK_ENAX"
+#define busnum_BDK_DTX_LBK_ENAX(a) (a)
+#define arguments_BDK_DTX_LBK_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_lbk_sel#
+ *
+ * DTX LBK Select Register
+ */
+union bdk_dtx_lbk_selx
+{
+ uint64_t u;
+ struct bdk_dtx_lbk_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_lbk_selx_s cn; */
+};
+typedef union bdk_dtx_lbk_selx bdk_dtx_lbk_selx_t;
+
+static inline uint64_t BDK_DTX_LBK_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_LBK_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe0c0000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe0c0000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe0c0000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_LBK_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_LBK_SELX(a) bdk_dtx_lbk_selx_t
+#define bustype_BDK_DTX_LBK_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_LBK_SELX(a) "DTX_LBK_SELX"
+#define busnum_BDK_DTX_LBK_SELX(a) (a)
+#define arguments_BDK_DTX_LBK_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_lmc#_bcst_rsp
+ *
+ * DTX LMC Control Register
+ */
+union bdk_dtx_lmcx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_lmcx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_lmcx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_lmcx_bcst_rsp bdk_dtx_lmcx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_LMCX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_LMCX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe440080ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe440080ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0fe440080ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe440080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_LMCX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_LMCX_BCST_RSP(a) bdk_dtx_lmcx_bcst_rsp_t
+#define bustype_BDK_DTX_LMCX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_LMCX_BCST_RSP(a) "DTX_LMCX_BCST_RSP"
+#define busnum_BDK_DTX_LMCX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_LMCX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_lmc#_ctl
+ *
+ * DTX LMC Control Register
+ */
+union bdk_dtx_lmcx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_lmcx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_lmcx_ctl_s cn; */
+};
+typedef union bdk_dtx_lmcx_ctl bdk_dtx_lmcx_ctl_t;
+
+static inline uint64_t BDK_DTX_LMCX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_LMCX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe440060ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe440060ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e0fe440060ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe440060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_LMCX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_LMCX_CTL(a) bdk_dtx_lmcx_ctl_t
+#define bustype_BDK_DTX_LMCX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_LMCX_CTL(a) "DTX_LMCX_CTL"
+#define busnum_BDK_DTX_LMCX_CTL(a) (a)
+#define arguments_BDK_DTX_LMCX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_lmc#_dat#
+ *
+ * DTX LMC Raw Data Register
+ */
+union bdk_dtx_lmcx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_lmcx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_lmcx_datx_s cn; */
+};
+typedef union bdk_dtx_lmcx_datx bdk_dtx_lmcx_datx_t;
+
+static inline uint64_t BDK_DTX_LMCX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_LMCX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe440040ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe440040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe440040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe440040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_LMCX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_LMCX_DATX(a,b) bdk_dtx_lmcx_datx_t
+#define bustype_BDK_DTX_LMCX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_LMCX_DATX(a,b) "DTX_LMCX_DATX"
+#define busnum_BDK_DTX_LMCX_DATX(a,b) (a)
+#define arguments_BDK_DTX_LMCX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_lmc#_ena#
+ *
+ * DTX LMC Data Enable Register
+ */
+union bdk_dtx_lmcx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_lmcx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_lmcx_enax_s cn; */
+};
+typedef union bdk_dtx_lmcx_enax bdk_dtx_lmcx_enax_t;
+
+static inline uint64_t BDK_DTX_LMCX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_LMCX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe440020ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe440020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe440020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe440020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_LMCX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_LMCX_ENAX(a,b) bdk_dtx_lmcx_enax_t
+#define bustype_BDK_DTX_LMCX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_LMCX_ENAX(a,b) "DTX_LMCX_ENAX"
+#define busnum_BDK_DTX_LMCX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_LMCX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_lmc#_sel#
+ *
+ * DTX LMC Select Register
+ */
+union bdk_dtx_lmcx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_lmcx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_lmcx_selx_s cn; */
+};
+typedef union bdk_dtx_lmcx_selx bdk_dtx_lmcx_selx_t;
+
+static inline uint64_t BDK_DTX_LMCX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_LMCX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe440000ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe440000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe440000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe440000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_LMCX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_LMCX_SELX(a,b) bdk_dtx_lmcx_selx_t
+#define bustype_BDK_DTX_LMCX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_LMCX_SELX(a,b) "DTX_LMCX_SELX"
+#define busnum_BDK_DTX_LMCX_SELX(a,b) (a)
+#define arguments_BDK_DTX_LMCX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_mcc#_bcst_rsp
+ *
+ * DTX MCC Control Register
+ */
+union bdk_dtx_mccx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_mccx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mccx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_mccx_bcst_rsp bdk_dtx_mccx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_MCCX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MCCX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe1e0080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_MCCX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_MCCX_BCST_RSP(a) bdk_dtx_mccx_bcst_rsp_t
+#define bustype_BDK_DTX_MCCX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MCCX_BCST_RSP(a) "DTX_MCCX_BCST_RSP"
+#define busnum_BDK_DTX_MCCX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_MCCX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mcc#_ctl
+ *
+ * DTX MCC Control Register
+ */
+union bdk_dtx_mccx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_mccx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mccx_ctl_s cn; */
+};
+typedef union bdk_dtx_mccx_ctl bdk_dtx_mccx_ctl_t;
+
+static inline uint64_t BDK_DTX_MCCX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MCCX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe1e0060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_MCCX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_MCCX_CTL(a) bdk_dtx_mccx_ctl_t
+#define bustype_BDK_DTX_MCCX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MCCX_CTL(a) "DTX_MCCX_CTL"
+#define busnum_BDK_DTX_MCCX_CTL(a) (a)
+#define arguments_BDK_DTX_MCCX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mcc#_dat#
+ *
+ * DTX MCC Raw Data Register
+ */
+union bdk_dtx_mccx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_mccx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mccx_datx_s cn; */
+};
+typedef union bdk_dtx_mccx_datx bdk_dtx_mccx_datx_t;
+
+static inline uint64_t BDK_DTX_MCCX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MCCX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe1e0040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_MCCX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_MCCX_DATX(a,b) bdk_dtx_mccx_datx_t
+#define bustype_BDK_DTX_MCCX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MCCX_DATX(a,b) "DTX_MCCX_DATX"
+#define busnum_BDK_DTX_MCCX_DATX(a,b) (a)
+#define arguments_BDK_DTX_MCCX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_mcc#_ena#
+ *
+ * DTX MCC Data Enable Register
+ */
+union bdk_dtx_mccx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_mccx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mccx_enax_s cn; */
+};
+typedef union bdk_dtx_mccx_enax bdk_dtx_mccx_enax_t;
+
+static inline uint64_t BDK_DTX_MCCX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MCCX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe1e0020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_MCCX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_MCCX_ENAX(a,b) bdk_dtx_mccx_enax_t
+#define bustype_BDK_DTX_MCCX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MCCX_ENAX(a,b) "DTX_MCCX_ENAX"
+#define busnum_BDK_DTX_MCCX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_MCCX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_mcc#_mci#_bcst_rsp
+ *
+ * DTX MCC_MCI Control Register
+ */
+union bdk_dtx_mccx_mcix_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_mccx_mcix_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mccx_mcix_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_mccx_mcix_bcst_rsp bdk_dtx_mccx_mcix_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_MCCX_MCIX_BCST_RSP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MCCX_MCIX_BCST_RSP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fec00080ll + 0x18000ll * ((a) & 0x1) + 0x8000ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_MCCX_MCIX_BCST_RSP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_MCCX_MCIX_BCST_RSP(a,b) bdk_dtx_mccx_mcix_bcst_rsp_t
+#define bustype_BDK_DTX_MCCX_MCIX_BCST_RSP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MCCX_MCIX_BCST_RSP(a,b) "DTX_MCCX_MCIX_BCST_RSP"
+#define busnum_BDK_DTX_MCCX_MCIX_BCST_RSP(a,b) (a)
+#define arguments_BDK_DTX_MCCX_MCIX_BCST_RSP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_mcc#_mci#_ctl
+ *
+ * DTX MCC_MCI Control Register
+ */
+union bdk_dtx_mccx_mcix_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_mccx_mcix_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mccx_mcix_ctl_s cn; */
+};
+typedef union bdk_dtx_mccx_mcix_ctl bdk_dtx_mccx_mcix_ctl_t;
+
+static inline uint64_t BDK_DTX_MCCX_MCIX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MCCX_MCIX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fec00060ll + 0x18000ll * ((a) & 0x1) + 0x8000ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_MCCX_MCIX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_MCCX_MCIX_CTL(a,b) bdk_dtx_mccx_mcix_ctl_t
+#define bustype_BDK_DTX_MCCX_MCIX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MCCX_MCIX_CTL(a,b) "DTX_MCCX_MCIX_CTL"
+#define busnum_BDK_DTX_MCCX_MCIX_CTL(a,b) (a)
+#define arguments_BDK_DTX_MCCX_MCIX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_mcc#_mci#_dat#
+ *
+ * DTX MCC_MCI Raw Data Register
+ */
+union bdk_dtx_mccx_mcix_datx
+{
+ uint64_t u;
+ struct bdk_dtx_mccx_mcix_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mccx_mcix_datx_s cn; */
+};
+typedef union bdk_dtx_mccx_mcix_datx bdk_dtx_mccx_mcix_datx_t;
+
+static inline uint64_t BDK_DTX_MCCX_MCIX_DATX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MCCX_MCIX_DATX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1) && (c<=1)))
+ return 0x87e0fec00040ll + 0x18000ll * ((a) & 0x1) + 0x8000ll * ((b) & 0x1) + 8ll * ((c) & 0x1);
+ __bdk_csr_fatal("DTX_MCCX_MCIX_DATX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_DTX_MCCX_MCIX_DATX(a,b,c) bdk_dtx_mccx_mcix_datx_t
+#define bustype_BDK_DTX_MCCX_MCIX_DATX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MCCX_MCIX_DATX(a,b,c) "DTX_MCCX_MCIX_DATX"
+#define busnum_BDK_DTX_MCCX_MCIX_DATX(a,b,c) (a)
+#define arguments_BDK_DTX_MCCX_MCIX_DATX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) dtx_mcc#_mci#_ena#
+ *
+ * DTX MCC_MCI Data Enable Register
+ */
+union bdk_dtx_mccx_mcix_enax
+{
+ uint64_t u;
+ struct bdk_dtx_mccx_mcix_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mccx_mcix_enax_s cn; */
+};
+typedef union bdk_dtx_mccx_mcix_enax bdk_dtx_mccx_mcix_enax_t;
+
+static inline uint64_t BDK_DTX_MCCX_MCIX_ENAX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MCCX_MCIX_ENAX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1) && (c<=1)))
+ return 0x87e0fec00020ll + 0x18000ll * ((a) & 0x1) + 0x8000ll * ((b) & 0x1) + 8ll * ((c) & 0x1);
+ __bdk_csr_fatal("DTX_MCCX_MCIX_ENAX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_DTX_MCCX_MCIX_ENAX(a,b,c) bdk_dtx_mccx_mcix_enax_t
+#define bustype_BDK_DTX_MCCX_MCIX_ENAX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MCCX_MCIX_ENAX(a,b,c) "DTX_MCCX_MCIX_ENAX"
+#define busnum_BDK_DTX_MCCX_MCIX_ENAX(a,b,c) (a)
+#define arguments_BDK_DTX_MCCX_MCIX_ENAX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) dtx_mcc#_mci#_sel#
+ *
+ * DTX MCC_MCI Select Register
+ */
+union bdk_dtx_mccx_mcix_selx
+{
+ uint64_t u;
+ struct bdk_dtx_mccx_mcix_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mccx_mcix_selx_s cn; */
+};
+typedef union bdk_dtx_mccx_mcix_selx bdk_dtx_mccx_mcix_selx_t;
+
+static inline uint64_t BDK_DTX_MCCX_MCIX_SELX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MCCX_MCIX_SELX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1) && (c<=1)))
+ return 0x87e0fec00000ll + 0x18000ll * ((a) & 0x1) + 0x8000ll * ((b) & 0x1) + 8ll * ((c) & 0x1);
+ __bdk_csr_fatal("DTX_MCCX_MCIX_SELX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_DTX_MCCX_MCIX_SELX(a,b,c) bdk_dtx_mccx_mcix_selx_t
+#define bustype_BDK_DTX_MCCX_MCIX_SELX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MCCX_MCIX_SELX(a,b,c) "DTX_MCCX_MCIX_SELX"
+#define busnum_BDK_DTX_MCCX_MCIX_SELX(a,b,c) (a)
+#define arguments_BDK_DTX_MCCX_MCIX_SELX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) dtx_mcc#_sel#
+ *
+ * DTX MCC Select Register
+ */
+union bdk_dtx_mccx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_mccx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mccx_selx_s cn; */
+};
+typedef union bdk_dtx_mccx_selx bdk_dtx_mccx_selx_t;
+
+static inline uint64_t BDK_DTX_MCCX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MCCX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe1e0000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_MCCX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_MCCX_SELX(a,b) bdk_dtx_mccx_selx_t
+#define bustype_BDK_DTX_MCCX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MCCX_SELX(a,b) "DTX_MCCX_SELX"
+#define busnum_BDK_DTX_MCCX_SELX(a,b) (a)
+#define arguments_BDK_DTX_MCCX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_mio_bcst_rsp
+ *
+ * DTX MIO Control Register
+ */
+union bdk_dtx_mio_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_mio_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mio_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_mio_bcst_rsp bdk_dtx_mio_bcst_rsp_t;
+
+#define BDK_DTX_MIO_BCST_RSP BDK_DTX_MIO_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_MIO_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MIO_BCST_RSP_FUNC(void)
+{
+ return 0x87e0fe000080ll;
+}
+
+#define typedef_BDK_DTX_MIO_BCST_RSP bdk_dtx_mio_bcst_rsp_t
+#define bustype_BDK_DTX_MIO_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MIO_BCST_RSP "DTX_MIO_BCST_RSP"
+#define busnum_BDK_DTX_MIO_BCST_RSP 0
+#define arguments_BDK_DTX_MIO_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mio_ctl
+ *
+ * DTX MIO Control Register
+ */
+union bdk_dtx_mio_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_mio_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mio_ctl_s cn; */
+};
+typedef union bdk_dtx_mio_ctl bdk_dtx_mio_ctl_t;
+
+#define BDK_DTX_MIO_CTL BDK_DTX_MIO_CTL_FUNC()
+static inline uint64_t BDK_DTX_MIO_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MIO_CTL_FUNC(void)
+{
+ return 0x87e0fe000060ll;
+}
+
+#define typedef_BDK_DTX_MIO_CTL bdk_dtx_mio_ctl_t
+#define bustype_BDK_DTX_MIO_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MIO_CTL "DTX_MIO_CTL"
+#define busnum_BDK_DTX_MIO_CTL 0
+#define arguments_BDK_DTX_MIO_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mio_dat#
+ *
+ * DTX MIO Raw Data Register
+ */
+union bdk_dtx_mio_datx
+{
+ uint64_t u;
+ struct bdk_dtx_mio_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mio_datx_s cn; */
+};
+typedef union bdk_dtx_mio_datx bdk_dtx_mio_datx_t;
+
+static inline uint64_t BDK_DTX_MIO_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MIO_DATX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe000040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_MIO_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_MIO_DATX(a) bdk_dtx_mio_datx_t
+#define bustype_BDK_DTX_MIO_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MIO_DATX(a) "DTX_MIO_DATX"
+#define busnum_BDK_DTX_MIO_DATX(a) (a)
+#define arguments_BDK_DTX_MIO_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mio_ena#
+ *
+ * DTX MIO Data Enable Register
+ */
+union bdk_dtx_mio_enax
+{
+ uint64_t u;
+ struct bdk_dtx_mio_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mio_enax_s cn; */
+};
+typedef union bdk_dtx_mio_enax bdk_dtx_mio_enax_t;
+
+static inline uint64_t BDK_DTX_MIO_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MIO_ENAX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe000020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_MIO_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_MIO_ENAX(a) bdk_dtx_mio_enax_t
+#define bustype_BDK_DTX_MIO_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MIO_ENAX(a) "DTX_MIO_ENAX"
+#define busnum_BDK_DTX_MIO_ENAX(a) (a)
+#define arguments_BDK_DTX_MIO_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mio_sel#
+ *
+ * DTX MIO Select Register
+ */
+union bdk_dtx_mio_selx
+{
+ uint64_t u;
+ struct bdk_dtx_mio_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mio_selx_s cn; */
+};
+typedef union bdk_dtx_mio_selx bdk_dtx_mio_selx_t;
+
+static inline uint64_t BDK_DTX_MIO_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MIO_SELX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe000000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_MIO_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_MIO_SELX(a) bdk_dtx_mio_selx_t
+#define bustype_BDK_DTX_MIO_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MIO_SELX(a) "DTX_MIO_SELX"
+#define busnum_BDK_DTX_MIO_SELX(a) (a)
+#define arguments_BDK_DTX_MIO_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mpi#_bcst_rsp
+ *
+ * DTX MPI Control Register
+ */
+union bdk_dtx_mpix_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_mpix_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mpix_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_mpix_bcst_rsp bdk_dtx_mpix_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_MPIX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MPIX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe820080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_MPIX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_MPIX_BCST_RSP(a) bdk_dtx_mpix_bcst_rsp_t
+#define bustype_BDK_DTX_MPIX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MPIX_BCST_RSP(a) "DTX_MPIX_BCST_RSP"
+#define busnum_BDK_DTX_MPIX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_MPIX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mpi#_ctl
+ *
+ * DTX MPI Control Register
+ */
+union bdk_dtx_mpix_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_mpix_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mpix_ctl_s cn; */
+};
+typedef union bdk_dtx_mpix_ctl bdk_dtx_mpix_ctl_t;
+
+static inline uint64_t BDK_DTX_MPIX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MPIX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe820060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_MPIX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_MPIX_CTL(a) bdk_dtx_mpix_ctl_t
+#define bustype_BDK_DTX_MPIX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MPIX_CTL(a) "DTX_MPIX_CTL"
+#define busnum_BDK_DTX_MPIX_CTL(a) (a)
+#define arguments_BDK_DTX_MPIX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mpi#_dat#
+ *
+ * DTX MPI Raw Data Register
+ */
+union bdk_dtx_mpix_datx
+{
+ uint64_t u;
+ struct bdk_dtx_mpix_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mpix_datx_s cn; */
+};
+typedef union bdk_dtx_mpix_datx bdk_dtx_mpix_datx_t;
+
+static inline uint64_t BDK_DTX_MPIX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MPIX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe820040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_MPIX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_MPIX_DATX(a,b) bdk_dtx_mpix_datx_t
+#define bustype_BDK_DTX_MPIX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MPIX_DATX(a,b) "DTX_MPIX_DATX"
+#define busnum_BDK_DTX_MPIX_DATX(a,b) (a)
+#define arguments_BDK_DTX_MPIX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_mpi#_ena#
+ *
+ * DTX MPI Data Enable Register
+ */
+union bdk_dtx_mpix_enax
+{
+ uint64_t u;
+ struct bdk_dtx_mpix_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mpix_enax_s cn; */
+};
+typedef union bdk_dtx_mpix_enax bdk_dtx_mpix_enax_t;
+
+static inline uint64_t BDK_DTX_MPIX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MPIX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe820020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_MPIX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_MPIX_ENAX(a,b) bdk_dtx_mpix_enax_t
+#define bustype_BDK_DTX_MPIX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MPIX_ENAX(a,b) "DTX_MPIX_ENAX"
+#define busnum_BDK_DTX_MPIX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_MPIX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_mpi#_sel#
+ *
+ * DTX MPI Select Register
+ */
+union bdk_dtx_mpix_selx
+{
+ uint64_t u;
+ struct bdk_dtx_mpix_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mpix_selx_s cn; */
+};
+typedef union bdk_dtx_mpix_selx bdk_dtx_mpix_selx_t;
+
+static inline uint64_t BDK_DTX_MPIX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MPIX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe820000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_MPIX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_MPIX_SELX(a,b) bdk_dtx_mpix_selx_t
+#define bustype_BDK_DTX_MPIX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MPIX_SELX(a,b) "DTX_MPIX_SELX"
+#define busnum_BDK_DTX_MPIX_SELX(a,b) (a)
+#define arguments_BDK_DTX_MPIX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_mrml_bcst_rsp
+ *
+ * DTX MRML Control Register
+ */
+union bdk_dtx_mrml_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_mrml_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mrml_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_mrml_bcst_rsp bdk_dtx_mrml_bcst_rsp_t;
+
+#define BDK_DTX_MRML_BCST_RSP BDK_DTX_MRML_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_MRML_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MRML_BCST_RSP_FUNC(void)
+{
+ return 0x87e0fe7e0080ll;
+}
+
+#define typedef_BDK_DTX_MRML_BCST_RSP bdk_dtx_mrml_bcst_rsp_t
+#define bustype_BDK_DTX_MRML_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MRML_BCST_RSP "DTX_MRML_BCST_RSP"
+#define busnum_BDK_DTX_MRML_BCST_RSP 0
+#define arguments_BDK_DTX_MRML_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mrml_ctl
+ *
+ * DTX MRML Control Register
+ */
+union bdk_dtx_mrml_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_mrml_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mrml_ctl_s cn; */
+};
+typedef union bdk_dtx_mrml_ctl bdk_dtx_mrml_ctl_t;
+
+#define BDK_DTX_MRML_CTL BDK_DTX_MRML_CTL_FUNC()
+static inline uint64_t BDK_DTX_MRML_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MRML_CTL_FUNC(void)
+{
+ return 0x87e0fe7e0060ll;
+}
+
+#define typedef_BDK_DTX_MRML_CTL bdk_dtx_mrml_ctl_t
+#define bustype_BDK_DTX_MRML_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MRML_CTL "DTX_MRML_CTL"
+#define busnum_BDK_DTX_MRML_CTL 0
+#define arguments_BDK_DTX_MRML_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mrml_dat#
+ *
+ * DTX MRML Raw Data Register
+ */
+union bdk_dtx_mrml_datx
+{
+ uint64_t u;
+ struct bdk_dtx_mrml_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mrml_datx_s cn; */
+};
+typedef union bdk_dtx_mrml_datx bdk_dtx_mrml_datx_t;
+
+static inline uint64_t BDK_DTX_MRML_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MRML_DATX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe7e0040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_MRML_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_MRML_DATX(a) bdk_dtx_mrml_datx_t
+#define bustype_BDK_DTX_MRML_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MRML_DATX(a) "DTX_MRML_DATX"
+#define busnum_BDK_DTX_MRML_DATX(a) (a)
+#define arguments_BDK_DTX_MRML_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mrml_ena#
+ *
+ * DTX MRML Data Enable Register
+ */
+union bdk_dtx_mrml_enax
+{
+ uint64_t u;
+ struct bdk_dtx_mrml_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mrml_enax_s cn; */
+};
+typedef union bdk_dtx_mrml_enax bdk_dtx_mrml_enax_t;
+
+static inline uint64_t BDK_DTX_MRML_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MRML_ENAX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe7e0020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_MRML_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_MRML_ENAX(a) bdk_dtx_mrml_enax_t
+#define bustype_BDK_DTX_MRML_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MRML_ENAX(a) "DTX_MRML_ENAX"
+#define busnum_BDK_DTX_MRML_ENAX(a) (a)
+#define arguments_BDK_DTX_MRML_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_mrml_sel#
+ *
+ * DTX MRML Select Register
+ */
+union bdk_dtx_mrml_selx
+{
+ uint64_t u;
+ struct bdk_dtx_mrml_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_mrml_selx_s cn; */
+};
+typedef union bdk_dtx_mrml_selx bdk_dtx_mrml_selx_t;
+
+static inline uint64_t BDK_DTX_MRML_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_MRML_SELX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe7e0000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_MRML_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_MRML_SELX(a) bdk_dtx_mrml_selx_t
+#define bustype_BDK_DTX_MRML_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_MRML_SELX(a) "DTX_MRML_SELX"
+#define busnum_BDK_DTX_MRML_SELX(a) (a)
+#define arguments_BDK_DTX_MRML_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ncsi_bcst_rsp
+ *
+ * DTX NCSI Control Register
+ */
+union bdk_dtx_ncsi_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_ncsi_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ncsi_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_ncsi_bcst_rsp bdk_dtx_ncsi_bcst_rsp_t;
+
+#define BDK_DTX_NCSI_BCST_RSP BDK_DTX_NCSI_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_NCSI_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NCSI_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe058080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fe058080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe058080ll;
+ __bdk_csr_fatal("DTX_NCSI_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NCSI_BCST_RSP bdk_dtx_ncsi_bcst_rsp_t
+#define bustype_BDK_DTX_NCSI_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NCSI_BCST_RSP "DTX_NCSI_BCST_RSP"
+#define busnum_BDK_DTX_NCSI_BCST_RSP 0
+#define arguments_BDK_DTX_NCSI_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ncsi_ctl
+ *
+ * DTX NCSI Control Register
+ */
+union bdk_dtx_ncsi_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_ncsi_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ncsi_ctl_s cn; */
+};
+typedef union bdk_dtx_ncsi_ctl bdk_dtx_ncsi_ctl_t;
+
+#define BDK_DTX_NCSI_CTL BDK_DTX_NCSI_CTL_FUNC()
+static inline uint64_t BDK_DTX_NCSI_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NCSI_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe058060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fe058060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe058060ll;
+ __bdk_csr_fatal("DTX_NCSI_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NCSI_CTL bdk_dtx_ncsi_ctl_t
+#define bustype_BDK_DTX_NCSI_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NCSI_CTL "DTX_NCSI_CTL"
+#define busnum_BDK_DTX_NCSI_CTL 0
+#define arguments_BDK_DTX_NCSI_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ncsi_dat#
+ *
+ * DTX NCSI Raw Data Register
+ */
+union bdk_dtx_ncsi_datx
+{
+ uint64_t u;
+ struct bdk_dtx_ncsi_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ncsi_datx_s cn; */
+};
+typedef union bdk_dtx_ncsi_datx bdk_dtx_ncsi_datx_t;
+
+static inline uint64_t BDK_DTX_NCSI_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NCSI_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe058040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe058040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe058040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_NCSI_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NCSI_DATX(a) bdk_dtx_ncsi_datx_t
+#define bustype_BDK_DTX_NCSI_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NCSI_DATX(a) "DTX_NCSI_DATX"
+#define busnum_BDK_DTX_NCSI_DATX(a) (a)
+#define arguments_BDK_DTX_NCSI_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ncsi_ena#
+ *
+ * DTX NCSI Data Enable Register
+ */
+union bdk_dtx_ncsi_enax
+{
+ uint64_t u;
+ struct bdk_dtx_ncsi_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ncsi_enax_s cn; */
+};
+typedef union bdk_dtx_ncsi_enax bdk_dtx_ncsi_enax_t;
+
+static inline uint64_t BDK_DTX_NCSI_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NCSI_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe058020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe058020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe058020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_NCSI_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NCSI_ENAX(a) bdk_dtx_ncsi_enax_t
+#define bustype_BDK_DTX_NCSI_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NCSI_ENAX(a) "DTX_NCSI_ENAX"
+#define busnum_BDK_DTX_NCSI_ENAX(a) (a)
+#define arguments_BDK_DTX_NCSI_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ncsi_sel#
+ *
+ * DTX NCSI Select Register
+ */
+union bdk_dtx_ncsi_selx
+{
+ uint64_t u;
+ struct bdk_dtx_ncsi_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ncsi_selx_s cn; */
+};
+typedef union bdk_dtx_ncsi_selx bdk_dtx_ncsi_selx_t;
+
+static inline uint64_t BDK_DTX_NCSI_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NCSI_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe058000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe058000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe058000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_NCSI_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NCSI_SELX(a) bdk_dtx_ncsi_selx_t
+#define bustype_BDK_DTX_NCSI_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NCSI_SELX(a) "DTX_NCSI_SELX"
+#define busnum_BDK_DTX_NCSI_SELX(a) (a)
+#define arguments_BDK_DTX_NCSI_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_nic_bcst_rsp
+ *
+ * DTX NIC Control Register
+ */
+union bdk_dtx_nic_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_nic_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_nic_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_nic_bcst_rsp bdk_dtx_nic_bcst_rsp_t;
+
+#define BDK_DTX_NIC_BCST_RSP BDK_DTX_NIC_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_NIC_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NIC_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e0fea18080ll;
+ __bdk_csr_fatal("DTX_NIC_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NIC_BCST_RSP bdk_dtx_nic_bcst_rsp_t
+#define bustype_BDK_DTX_NIC_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NIC_BCST_RSP "DTX_NIC_BCST_RSP"
+#define busnum_BDK_DTX_NIC_BCST_RSP 0
+#define arguments_BDK_DTX_NIC_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_nic_ctl
+ *
+ * DTX NIC Control Register
+ */
+union bdk_dtx_nic_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_nic_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_nic_ctl_s cn; */
+};
+typedef union bdk_dtx_nic_ctl bdk_dtx_nic_ctl_t;
+
+#define BDK_DTX_NIC_CTL BDK_DTX_NIC_CTL_FUNC()
+static inline uint64_t BDK_DTX_NIC_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NIC_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x87e0fea18060ll;
+ __bdk_csr_fatal("DTX_NIC_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NIC_CTL bdk_dtx_nic_ctl_t
+#define bustype_BDK_DTX_NIC_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NIC_CTL "DTX_NIC_CTL"
+#define busnum_BDK_DTX_NIC_CTL 0
+#define arguments_BDK_DTX_NIC_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_nic_dat#
+ *
+ * DTX NIC Raw Data Register
+ */
+union bdk_dtx_nic_datx
+{
+ uint64_t u;
+ struct bdk_dtx_nic_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_nic_datx_s cn; */
+};
+typedef union bdk_dtx_nic_datx bdk_dtx_nic_datx_t;
+
+static inline uint64_t BDK_DTX_NIC_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NIC_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=1))
+ return 0x87e0fea18040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_NIC_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NIC_DATX(a) bdk_dtx_nic_datx_t
+#define bustype_BDK_DTX_NIC_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NIC_DATX(a) "DTX_NIC_DATX"
+#define busnum_BDK_DTX_NIC_DATX(a) (a)
+#define arguments_BDK_DTX_NIC_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_nic_ena#
+ *
+ * DTX NIC Data Enable Register
+ */
+union bdk_dtx_nic_enax
+{
+ uint64_t u;
+ struct bdk_dtx_nic_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_nic_enax_s cn; */
+};
+typedef union bdk_dtx_nic_enax bdk_dtx_nic_enax_t;
+
+static inline uint64_t BDK_DTX_NIC_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NIC_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=1))
+ return 0x87e0fea18020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_NIC_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NIC_ENAX(a) bdk_dtx_nic_enax_t
+#define bustype_BDK_DTX_NIC_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NIC_ENAX(a) "DTX_NIC_ENAX"
+#define busnum_BDK_DTX_NIC_ENAX(a) (a)
+#define arguments_BDK_DTX_NIC_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_nic_sel#
+ *
+ * DTX NIC Select Register
+ */
+union bdk_dtx_nic_selx
+{
+ uint64_t u;
+ struct bdk_dtx_nic_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_nic_selx_s cn; */
+};
+typedef union bdk_dtx_nic_selx bdk_dtx_nic_selx_t;
+
+static inline uint64_t BDK_DTX_NIC_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NIC_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX) && (a<=1))
+ return 0x87e0fea18000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_NIC_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NIC_SELX(a) bdk_dtx_nic_selx_t
+#define bustype_BDK_DTX_NIC_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NIC_SELX(a) "DTX_NIC_SELX"
+#define busnum_BDK_DTX_NIC_SELX(a) (a)
+#define arguments_BDK_DTX_NIC_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_npc_bcst_rsp
+ *
+ * DTX NPC Control Register
+ */
+union bdk_dtx_npc_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_npc_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_npc_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_npc_bcst_rsp bdk_dtx_npc_bcst_rsp_t;
+
+#define BDK_DTX_NPC_BCST_RSP BDK_DTX_NPC_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_NPC_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NPC_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe2d0080ll;
+ __bdk_csr_fatal("DTX_NPC_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NPC_BCST_RSP bdk_dtx_npc_bcst_rsp_t
+#define bustype_BDK_DTX_NPC_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NPC_BCST_RSP "DTX_NPC_BCST_RSP"
+#define busnum_BDK_DTX_NPC_BCST_RSP 0
+#define arguments_BDK_DTX_NPC_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_npc_ctl
+ *
+ * DTX NPC Control Register
+ */
+union bdk_dtx_npc_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_npc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_npc_ctl_s cn; */
+};
+typedef union bdk_dtx_npc_ctl bdk_dtx_npc_ctl_t;
+
+#define BDK_DTX_NPC_CTL BDK_DTX_NPC_CTL_FUNC()
+static inline uint64_t BDK_DTX_NPC_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NPC_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe2d0060ll;
+ __bdk_csr_fatal("DTX_NPC_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NPC_CTL bdk_dtx_npc_ctl_t
+#define bustype_BDK_DTX_NPC_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NPC_CTL "DTX_NPC_CTL"
+#define busnum_BDK_DTX_NPC_CTL 0
+#define arguments_BDK_DTX_NPC_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_npc_dat#
+ *
+ * DTX NPC Raw Data Register
+ */
+union bdk_dtx_npc_datx
+{
+ uint64_t u;
+ struct bdk_dtx_npc_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_npc_datx_s cn; */
+};
+typedef union bdk_dtx_npc_datx bdk_dtx_npc_datx_t;
+
+static inline uint64_t BDK_DTX_NPC_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NPC_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe2d0040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_NPC_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NPC_DATX(a) bdk_dtx_npc_datx_t
+#define bustype_BDK_DTX_NPC_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NPC_DATX(a) "DTX_NPC_DATX"
+#define busnum_BDK_DTX_NPC_DATX(a) (a)
+#define arguments_BDK_DTX_NPC_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_npc_ena#
+ *
+ * DTX NPC Data Enable Register
+ */
+union bdk_dtx_npc_enax
+{
+ uint64_t u;
+ struct bdk_dtx_npc_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_npc_enax_s cn; */
+};
+typedef union bdk_dtx_npc_enax bdk_dtx_npc_enax_t;
+
+static inline uint64_t BDK_DTX_NPC_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NPC_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe2d0020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_NPC_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NPC_ENAX(a) bdk_dtx_npc_enax_t
+#define bustype_BDK_DTX_NPC_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NPC_ENAX(a) "DTX_NPC_ENAX"
+#define busnum_BDK_DTX_NPC_ENAX(a) (a)
+#define arguments_BDK_DTX_NPC_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_npc_sel#
+ *
+ * DTX NPC Select Register
+ */
+union bdk_dtx_npc_selx
+{
+ uint64_t u;
+ struct bdk_dtx_npc_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_npc_selx_s cn; */
+};
+typedef union bdk_dtx_npc_selx bdk_dtx_npc_selx_t;
+
+static inline uint64_t BDK_DTX_NPC_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_NPC_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe2d0000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_NPC_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_NPC_SELX(a) bdk_dtx_npc_selx_t
+#define bustype_BDK_DTX_NPC_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_NPC_SELX(a) "DTX_NPC_SELX"
+#define busnum_BDK_DTX_NPC_SELX(a) (a)
+#define arguments_BDK_DTX_NPC_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_bot_bcst_rsp
+ *
+ * INTERNAL: DTX OCX_BOT Control Register
+ */
+union bdk_dtx_ocx_bot_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_bot_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_bot_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_ocx_bot_bcst_rsp bdk_dtx_ocx_bot_bcst_rsp_t;
+
+#define BDK_DTX_OCX_BOT_BCST_RSP BDK_DTX_OCX_BOT_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_OCX_BOT_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_BOT_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fe198080ll;
+ __bdk_csr_fatal("DTX_OCX_BOT_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_BOT_BCST_RSP bdk_dtx_ocx_bot_bcst_rsp_t
+#define bustype_BDK_DTX_OCX_BOT_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_BOT_BCST_RSP "DTX_OCX_BOT_BCST_RSP"
+#define busnum_BDK_DTX_OCX_BOT_BCST_RSP 0
+#define arguments_BDK_DTX_OCX_BOT_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_bot_ctl
+ *
+ * INTERNAL: DTX OCX_BOT Control Register
+ */
+union bdk_dtx_ocx_bot_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_bot_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_bot_ctl_s cn; */
+};
+typedef union bdk_dtx_ocx_bot_ctl bdk_dtx_ocx_bot_ctl_t;
+
+#define BDK_DTX_OCX_BOT_CTL BDK_DTX_OCX_BOT_CTL_FUNC()
+static inline uint64_t BDK_DTX_OCX_BOT_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_BOT_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fe198060ll;
+ __bdk_csr_fatal("DTX_OCX_BOT_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_BOT_CTL bdk_dtx_ocx_bot_ctl_t
+#define bustype_BDK_DTX_OCX_BOT_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_BOT_CTL "DTX_OCX_BOT_CTL"
+#define busnum_BDK_DTX_OCX_BOT_CTL 0
+#define arguments_BDK_DTX_OCX_BOT_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_bot_dat#
+ *
+ * INTERNAL: DTX OCX_BOT Raw Data Register
+ */
+union bdk_dtx_ocx_bot_datx
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_bot_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_bot_datx_s cn; */
+};
+typedef union bdk_dtx_ocx_bot_datx bdk_dtx_ocx_bot_datx_t;
+
+static inline uint64_t BDK_DTX_OCX_BOT_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_BOT_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe198040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_BOT_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_BOT_DATX(a) bdk_dtx_ocx_bot_datx_t
+#define bustype_BDK_DTX_OCX_BOT_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_BOT_DATX(a) "DTX_OCX_BOT_DATX"
+#define busnum_BDK_DTX_OCX_BOT_DATX(a) (a)
+#define arguments_BDK_DTX_OCX_BOT_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_bot_ena#
+ *
+ * INTERNAL: DTX OCX_BOT Data Enable Register
+ */
+union bdk_dtx_ocx_bot_enax
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_bot_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_bot_enax_s cn; */
+};
+typedef union bdk_dtx_ocx_bot_enax bdk_dtx_ocx_bot_enax_t;
+
+static inline uint64_t BDK_DTX_OCX_BOT_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_BOT_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe198020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_BOT_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_BOT_ENAX(a) bdk_dtx_ocx_bot_enax_t
+#define bustype_BDK_DTX_OCX_BOT_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_BOT_ENAX(a) "DTX_OCX_BOT_ENAX"
+#define busnum_BDK_DTX_OCX_BOT_ENAX(a) (a)
+#define arguments_BDK_DTX_OCX_BOT_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_bot_sel#
+ *
+ * INTERNAL: DTX OCX_BOT Select Register
+ */
+union bdk_dtx_ocx_bot_selx
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_bot_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_bot_selx_s cn; */
+};
+typedef union bdk_dtx_ocx_bot_selx bdk_dtx_ocx_bot_selx_t;
+
+static inline uint64_t BDK_DTX_OCX_BOT_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_BOT_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe198000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_BOT_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_BOT_SELX(a) bdk_dtx_ocx_bot_selx_t
+#define bustype_BDK_DTX_OCX_BOT_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_BOT_SELX(a) "DTX_OCX_BOT_SELX"
+#define busnum_BDK_DTX_OCX_BOT_SELX(a) (a)
+#define arguments_BDK_DTX_OCX_BOT_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_lnk#_bcst_rsp
+ *
+ * DTX OCX_LNK Control Register
+ */
+union bdk_dtx_ocx_lnkx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_lnkx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_lnkx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_ocx_lnkx_bcst_rsp bdk_dtx_ocx_lnkx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_OCX_LNKX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_LNKX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e0fe180080ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_OCX_LNKX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_LNKX_BCST_RSP(a) bdk_dtx_ocx_lnkx_bcst_rsp_t
+#define bustype_BDK_DTX_OCX_LNKX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_LNKX_BCST_RSP(a) "DTX_OCX_LNKX_BCST_RSP"
+#define busnum_BDK_DTX_OCX_LNKX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_OCX_LNKX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_lnk#_ctl
+ *
+ * DTX OCX_LNK Control Register
+ */
+union bdk_dtx_ocx_lnkx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_lnkx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_lnkx_ctl_s cn; */
+};
+typedef union bdk_dtx_ocx_lnkx_ctl bdk_dtx_ocx_lnkx_ctl_t;
+
+static inline uint64_t BDK_DTX_OCX_LNKX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_LNKX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e0fe180060ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_OCX_LNKX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_LNKX_CTL(a) bdk_dtx_ocx_lnkx_ctl_t
+#define bustype_BDK_DTX_OCX_LNKX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_LNKX_CTL(a) "DTX_OCX_LNKX_CTL"
+#define busnum_BDK_DTX_OCX_LNKX_CTL(a) (a)
+#define arguments_BDK_DTX_OCX_LNKX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_lnk#_dat#
+ *
+ * DTX OCX_LNK Raw Data Register
+ */
+union bdk_dtx_ocx_lnkx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_lnkx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_lnkx_datx_s cn; */
+};
+typedef union bdk_dtx_ocx_lnkx_datx bdk_dtx_ocx_lnkx_datx_t;
+
+static inline uint64_t BDK_DTX_OCX_LNKX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_LNKX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe180040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_LNKX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_LNKX_DATX(a,b) bdk_dtx_ocx_lnkx_datx_t
+#define bustype_BDK_DTX_OCX_LNKX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_LNKX_DATX(a,b) "DTX_OCX_LNKX_DATX"
+#define busnum_BDK_DTX_OCX_LNKX_DATX(a,b) (a)
+#define arguments_BDK_DTX_OCX_LNKX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_lnk#_ena#
+ *
+ * DTX OCX_LNK Data Enable Register
+ */
+union bdk_dtx_ocx_lnkx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_lnkx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_lnkx_enax_s cn; */
+};
+typedef union bdk_dtx_ocx_lnkx_enax bdk_dtx_ocx_lnkx_enax_t;
+
+static inline uint64_t BDK_DTX_OCX_LNKX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_LNKX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe180020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_LNKX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_LNKX_ENAX(a,b) bdk_dtx_ocx_lnkx_enax_t
+#define bustype_BDK_DTX_OCX_LNKX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_LNKX_ENAX(a,b) "DTX_OCX_LNKX_ENAX"
+#define busnum_BDK_DTX_OCX_LNKX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_OCX_LNKX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_lnk#_sel#
+ *
+ * DTX OCX_LNK Select Register
+ */
+union bdk_dtx_ocx_lnkx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_lnkx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_lnkx_selx_s cn; */
+};
+typedef union bdk_dtx_ocx_lnkx_selx bdk_dtx_ocx_lnkx_selx_t;
+
+static inline uint64_t BDK_DTX_OCX_LNKX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_LNKX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe180000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_LNKX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_LNKX_SELX(a,b) bdk_dtx_ocx_lnkx_selx_t
+#define bustype_BDK_DTX_OCX_LNKX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_LNKX_SELX(a,b) "DTX_OCX_LNKX_SELX"
+#define busnum_BDK_DTX_OCX_LNKX_SELX(a,b) (a)
+#define arguments_BDK_DTX_OCX_LNKX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_ole#_bcst_rsp
+ *
+ * DTX OCX_OLE Control Register
+ */
+union bdk_dtx_ocx_olex_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_olex_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_olex_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_ocx_olex_bcst_rsp bdk_dtx_ocx_olex_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_OCX_OLEX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_OLEX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e0fe1a0080ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_OCX_OLEX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_OLEX_BCST_RSP(a) bdk_dtx_ocx_olex_bcst_rsp_t
+#define bustype_BDK_DTX_OCX_OLEX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_OLEX_BCST_RSP(a) "DTX_OCX_OLEX_BCST_RSP"
+#define busnum_BDK_DTX_OCX_OLEX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_OCX_OLEX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_ole#_ctl
+ *
+ * DTX OCX_OLE Control Register
+ */
+union bdk_dtx_ocx_olex_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_olex_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_olex_ctl_s cn; */
+};
+typedef union bdk_dtx_ocx_olex_ctl bdk_dtx_ocx_olex_ctl_t;
+
+static inline uint64_t BDK_DTX_OCX_OLEX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_OLEX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=2))
+ return 0x87e0fe1a0060ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_OCX_OLEX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_OLEX_CTL(a) bdk_dtx_ocx_olex_ctl_t
+#define bustype_BDK_DTX_OCX_OLEX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_OLEX_CTL(a) "DTX_OCX_OLEX_CTL"
+#define busnum_BDK_DTX_OCX_OLEX_CTL(a) (a)
+#define arguments_BDK_DTX_OCX_OLEX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_ole#_dat#
+ *
+ * DTX OCX_OLE Raw Data Register
+ */
+union bdk_dtx_ocx_olex_datx
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_olex_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_olex_datx_s cn; */
+};
+typedef union bdk_dtx_ocx_olex_datx bdk_dtx_ocx_olex_datx_t;
+
+static inline uint64_t BDK_DTX_OCX_OLEX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_OLEX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe1a0040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_OLEX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_OLEX_DATX(a,b) bdk_dtx_ocx_olex_datx_t
+#define bustype_BDK_DTX_OCX_OLEX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_OLEX_DATX(a,b) "DTX_OCX_OLEX_DATX"
+#define busnum_BDK_DTX_OCX_OLEX_DATX(a,b) (a)
+#define arguments_BDK_DTX_OCX_OLEX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_ole#_ena#
+ *
+ * DTX OCX_OLE Data Enable Register
+ */
+union bdk_dtx_ocx_olex_enax
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_olex_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_olex_enax_s cn; */
+};
+typedef union bdk_dtx_ocx_olex_enax bdk_dtx_ocx_olex_enax_t;
+
+static inline uint64_t BDK_DTX_OCX_OLEX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_OLEX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe1a0020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_OLEX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_OLEX_ENAX(a,b) bdk_dtx_ocx_olex_enax_t
+#define bustype_BDK_DTX_OCX_OLEX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_OLEX_ENAX(a,b) "DTX_OCX_OLEX_ENAX"
+#define busnum_BDK_DTX_OCX_OLEX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_OCX_OLEX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_ole#_sel#
+ *
+ * DTX OCX_OLE Select Register
+ */
+union bdk_dtx_ocx_olex_selx
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_olex_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_olex_selx_s cn; */
+};
+typedef union bdk_dtx_ocx_olex_selx bdk_dtx_ocx_olex_selx_t;
+
+static inline uint64_t BDK_DTX_OCX_OLEX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_OLEX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe1a0000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_OLEX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_OLEX_SELX(a,b) bdk_dtx_ocx_olex_selx_t
+#define bustype_BDK_DTX_OCX_OLEX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_OLEX_SELX(a,b) "DTX_OCX_OLEX_SELX"
+#define busnum_BDK_DTX_OCX_OLEX_SELX(a,b) (a)
+#define arguments_BDK_DTX_OCX_OLEX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_top_bcst_rsp
+ *
+ * DTX OCX_TOP Control Register
+ */
+union bdk_dtx_ocx_top_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_top_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_top_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_ocx_top_bcst_rsp bdk_dtx_ocx_top_bcst_rsp_t;
+
+#define BDK_DTX_OCX_TOP_BCST_RSP BDK_DTX_OCX_TOP_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_OCX_TOP_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_TOP_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fe088080ll;
+ __bdk_csr_fatal("DTX_OCX_TOP_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_TOP_BCST_RSP bdk_dtx_ocx_top_bcst_rsp_t
+#define bustype_BDK_DTX_OCX_TOP_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_TOP_BCST_RSP "DTX_OCX_TOP_BCST_RSP"
+#define busnum_BDK_DTX_OCX_TOP_BCST_RSP 0
+#define arguments_BDK_DTX_OCX_TOP_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_top_ctl
+ *
+ * DTX OCX_TOP Control Register
+ */
+union bdk_dtx_ocx_top_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_top_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_top_ctl_s cn; */
+};
+typedef union bdk_dtx_ocx_top_ctl bdk_dtx_ocx_top_ctl_t;
+
+#define BDK_DTX_OCX_TOP_CTL BDK_DTX_OCX_TOP_CTL_FUNC()
+static inline uint64_t BDK_DTX_OCX_TOP_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_TOP_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fe088060ll;
+ __bdk_csr_fatal("DTX_OCX_TOP_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_TOP_CTL bdk_dtx_ocx_top_ctl_t
+#define bustype_BDK_DTX_OCX_TOP_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_TOP_CTL "DTX_OCX_TOP_CTL"
+#define busnum_BDK_DTX_OCX_TOP_CTL 0
+#define arguments_BDK_DTX_OCX_TOP_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_top_dat#
+ *
+ * DTX OCX_TOP Raw Data Register
+ */
+union bdk_dtx_ocx_top_datx
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_top_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_top_datx_s cn; */
+};
+typedef union bdk_dtx_ocx_top_datx bdk_dtx_ocx_top_datx_t;
+
+static inline uint64_t BDK_DTX_OCX_TOP_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_TOP_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe088040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_TOP_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_TOP_DATX(a) bdk_dtx_ocx_top_datx_t
+#define bustype_BDK_DTX_OCX_TOP_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_TOP_DATX(a) "DTX_OCX_TOP_DATX"
+#define busnum_BDK_DTX_OCX_TOP_DATX(a) (a)
+#define arguments_BDK_DTX_OCX_TOP_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_top_ena#
+ *
+ * DTX OCX_TOP Data Enable Register
+ */
+union bdk_dtx_ocx_top_enax
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_top_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_top_enax_s cn; */
+};
+typedef union bdk_dtx_ocx_top_enax bdk_dtx_ocx_top_enax_t;
+
+static inline uint64_t BDK_DTX_OCX_TOP_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_TOP_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe088020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_TOP_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_TOP_ENAX(a) bdk_dtx_ocx_top_enax_t
+#define bustype_BDK_DTX_OCX_TOP_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_TOP_ENAX(a) "DTX_OCX_TOP_ENAX"
+#define busnum_BDK_DTX_OCX_TOP_ENAX(a) (a)
+#define arguments_BDK_DTX_OCX_TOP_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ocx_top_sel#
+ *
+ * DTX OCX_TOP Select Register
+ */
+union bdk_dtx_ocx_top_selx
+{
+ uint64_t u;
+ struct bdk_dtx_ocx_top_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ocx_top_selx_s cn; */
+};
+typedef union bdk_dtx_ocx_top_selx bdk_dtx_ocx_top_selx_t;
+
+static inline uint64_t BDK_DTX_OCX_TOP_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_OCX_TOP_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe088000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_OCX_TOP_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_OCX_TOP_SELX(a) bdk_dtx_ocx_top_selx_t
+#define bustype_BDK_DTX_OCX_TOP_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_OCX_TOP_SELX(a) "DTX_OCX_TOP_SELX"
+#define busnum_BDK_DTX_OCX_TOP_SELX(a) (a)
+#define arguments_BDK_DTX_OCX_TOP_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pem#_bcst_rsp
+ *
+ * DTX PEM Control Register
+ */
+union bdk_dtx_pemx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_pemx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pemx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_pemx_bcst_rsp bdk_dtx_pemx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_PEMX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PEMX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0fe600080ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0fe600080ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0fe600080ll + 0x8000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0fef00080ll + 0x8000ll * ((a) & 0x7);
+ __bdk_csr_fatal("DTX_PEMX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PEMX_BCST_RSP(a) bdk_dtx_pemx_bcst_rsp_t
+#define bustype_BDK_DTX_PEMX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PEMX_BCST_RSP(a) "DTX_PEMX_BCST_RSP"
+#define busnum_BDK_DTX_PEMX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_PEMX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pem#_ctl
+ *
+ * DTX PEM Control Register
+ */
+union bdk_dtx_pemx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_pemx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pemx_ctl_s cn; */
+};
+typedef union bdk_dtx_pemx_ctl bdk_dtx_pemx_ctl_t;
+
+static inline uint64_t BDK_DTX_PEMX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PEMX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x87e0fe600060ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0fe600060ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x87e0fe600060ll + 0x8000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0fef00060ll + 0x8000ll * ((a) & 0x7);
+ __bdk_csr_fatal("DTX_PEMX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PEMX_CTL(a) bdk_dtx_pemx_ctl_t
+#define bustype_BDK_DTX_PEMX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PEMX_CTL(a) "DTX_PEMX_CTL"
+#define busnum_BDK_DTX_PEMX_CTL(a) (a)
+#define arguments_BDK_DTX_PEMX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pem#_dat#
+ *
+ * DTX PEM Raw Data Register
+ */
+union bdk_dtx_pemx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_pemx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pemx_datx_s cn; */
+};
+typedef union bdk_dtx_pemx_datx bdk_dtx_pemx_datx_t;
+
+static inline uint64_t BDK_DTX_PEMX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PEMX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe600040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe600040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=5) && (b<=1)))
+ return 0x87e0fe600040ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=1)))
+ return 0x87e0fef00040ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_PEMX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_PEMX_DATX(a,b) bdk_dtx_pemx_datx_t
+#define bustype_BDK_DTX_PEMX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PEMX_DATX(a,b) "DTX_PEMX_DATX"
+#define busnum_BDK_DTX_PEMX_DATX(a,b) (a)
+#define arguments_BDK_DTX_PEMX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_pem#_ena#
+ *
+ * DTX PEM Data Enable Register
+ */
+union bdk_dtx_pemx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_pemx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pemx_enax_s cn; */
+};
+typedef union bdk_dtx_pemx_enax bdk_dtx_pemx_enax_t;
+
+static inline uint64_t BDK_DTX_PEMX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PEMX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe600020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe600020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=5) && (b<=1)))
+ return 0x87e0fe600020ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=1)))
+ return 0x87e0fef00020ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_PEMX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_PEMX_ENAX(a,b) bdk_dtx_pemx_enax_t
+#define bustype_BDK_DTX_PEMX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PEMX_ENAX(a,b) "DTX_PEMX_ENAX"
+#define busnum_BDK_DTX_PEMX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_PEMX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_pem#_sel#
+ *
+ * DTX PEM Select Register
+ */
+union bdk_dtx_pemx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_pemx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pemx_selx_s cn; */
+};
+typedef union bdk_dtx_pemx_selx bdk_dtx_pemx_selx_t;
+
+static inline uint64_t BDK_DTX_PEMX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PEMX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe600000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe600000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=5) && (b<=1)))
+ return 0x87e0fe600000ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=1)))
+ return 0x87e0fef00000ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_PEMX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_PEMX_SELX(a,b) bdk_dtx_pemx_selx_t
+#define bustype_BDK_DTX_PEMX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PEMX_SELX(a,b) "DTX_PEMX_SELX"
+#define busnum_BDK_DTX_PEMX_SELX(a,b) (a)
+#define arguments_BDK_DTX_PEMX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_pki_bcst_rsp
+ *
+ * DTX PKI Control Register
+ */
+union bdk_dtx_pki_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_pki_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pki_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_pki_bcst_rsp bdk_dtx_pki_bcst_rsp_t;
+
+#define BDK_DTX_PKI_BCST_RSP BDK_DTX_PKI_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_PKI_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PKI_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feb60080ll;
+ __bdk_csr_fatal("DTX_PKI_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PKI_BCST_RSP bdk_dtx_pki_bcst_rsp_t
+#define bustype_BDK_DTX_PKI_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PKI_BCST_RSP "DTX_PKI_BCST_RSP"
+#define busnum_BDK_DTX_PKI_BCST_RSP 0
+#define arguments_BDK_DTX_PKI_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pki_ctl
+ *
+ * DTX PKI Control Register
+ */
+union bdk_dtx_pki_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_pki_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pki_ctl_s cn; */
+};
+typedef union bdk_dtx_pki_ctl bdk_dtx_pki_ctl_t;
+
+#define BDK_DTX_PKI_CTL BDK_DTX_PKI_CTL_FUNC()
+static inline uint64_t BDK_DTX_PKI_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PKI_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feb60060ll;
+ __bdk_csr_fatal("DTX_PKI_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PKI_CTL bdk_dtx_pki_ctl_t
+#define bustype_BDK_DTX_PKI_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PKI_CTL "DTX_PKI_CTL"
+#define busnum_BDK_DTX_PKI_CTL 0
+#define arguments_BDK_DTX_PKI_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pki_dat#
+ *
+ * DTX PKI Raw Data Register
+ */
+union bdk_dtx_pki_datx
+{
+ uint64_t u;
+ struct bdk_dtx_pki_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pki_datx_s cn; */
+};
+typedef union bdk_dtx_pki_datx bdk_dtx_pki_datx_t;
+
+static inline uint64_t BDK_DTX_PKI_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PKI_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb60040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_PKI_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PKI_DATX(a) bdk_dtx_pki_datx_t
+#define bustype_BDK_DTX_PKI_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PKI_DATX(a) "DTX_PKI_DATX"
+#define busnum_BDK_DTX_PKI_DATX(a) (a)
+#define arguments_BDK_DTX_PKI_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pki_ena#
+ *
+ * DTX PKI Data Enable Register
+ */
+union bdk_dtx_pki_enax
+{
+ uint64_t u;
+ struct bdk_dtx_pki_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pki_enax_s cn; */
+};
+typedef union bdk_dtx_pki_enax bdk_dtx_pki_enax_t;
+
+static inline uint64_t BDK_DTX_PKI_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PKI_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb60020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_PKI_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PKI_ENAX(a) bdk_dtx_pki_enax_t
+#define bustype_BDK_DTX_PKI_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PKI_ENAX(a) "DTX_PKI_ENAX"
+#define busnum_BDK_DTX_PKI_ENAX(a) (a)
+#define arguments_BDK_DTX_PKI_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pki_sel#
+ *
+ * DTX PKI Select Register
+ */
+union bdk_dtx_pki_selx
+{
+ uint64_t u;
+ struct bdk_dtx_pki_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pki_selx_s cn; */
+};
+typedef union bdk_dtx_pki_selx bdk_dtx_pki_selx_t;
+
+static inline uint64_t BDK_DTX_PKI_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PKI_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb60000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_PKI_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PKI_SELX(a) bdk_dtx_pki_selx_t
+#define bustype_BDK_DTX_PKI_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PKI_SELX(a) "DTX_PKI_SELX"
+#define busnum_BDK_DTX_PKI_SELX(a) (a)
+#define arguments_BDK_DTX_PKI_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pko_bcst_rsp
+ *
+ * DTX PKO Control Register
+ */
+union bdk_dtx_pko_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_pko_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pko_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_pko_bcst_rsp bdk_dtx_pko_bcst_rsp_t;
+
+#define BDK_DTX_PKO_BCST_RSP BDK_DTX_PKO_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_PKO_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PKO_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feaa0080ll;
+ __bdk_csr_fatal("DTX_PKO_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PKO_BCST_RSP bdk_dtx_pko_bcst_rsp_t
+#define bustype_BDK_DTX_PKO_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PKO_BCST_RSP "DTX_PKO_BCST_RSP"
+#define busnum_BDK_DTX_PKO_BCST_RSP 0
+#define arguments_BDK_DTX_PKO_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pko_ctl
+ *
+ * DTX PKO Control Register
+ */
+union bdk_dtx_pko_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_pko_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pko_ctl_s cn; */
+};
+typedef union bdk_dtx_pko_ctl bdk_dtx_pko_ctl_t;
+
+#define BDK_DTX_PKO_CTL BDK_DTX_PKO_CTL_FUNC()
+static inline uint64_t BDK_DTX_PKO_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PKO_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feaa0060ll;
+ __bdk_csr_fatal("DTX_PKO_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PKO_CTL bdk_dtx_pko_ctl_t
+#define bustype_BDK_DTX_PKO_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PKO_CTL "DTX_PKO_CTL"
+#define busnum_BDK_DTX_PKO_CTL 0
+#define arguments_BDK_DTX_PKO_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pko_dat#
+ *
+ * DTX PKO Raw Data Register
+ */
+union bdk_dtx_pko_datx
+{
+ uint64_t u;
+ struct bdk_dtx_pko_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pko_datx_s cn; */
+};
+typedef union bdk_dtx_pko_datx bdk_dtx_pko_datx_t;
+
+static inline uint64_t BDK_DTX_PKO_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PKO_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feaa0040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_PKO_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PKO_DATX(a) bdk_dtx_pko_datx_t
+#define bustype_BDK_DTX_PKO_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PKO_DATX(a) "DTX_PKO_DATX"
+#define busnum_BDK_DTX_PKO_DATX(a) (a)
+#define arguments_BDK_DTX_PKO_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pko_ena#
+ *
+ * DTX PKO Data Enable Register
+ */
+union bdk_dtx_pko_enax
+{
+ uint64_t u;
+ struct bdk_dtx_pko_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pko_enax_s cn; */
+};
+typedef union bdk_dtx_pko_enax bdk_dtx_pko_enax_t;
+
+static inline uint64_t BDK_DTX_PKO_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PKO_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feaa0020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_PKO_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PKO_ENAX(a) bdk_dtx_pko_enax_t
+#define bustype_BDK_DTX_PKO_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PKO_ENAX(a) "DTX_PKO_ENAX"
+#define busnum_BDK_DTX_PKO_ENAX(a) (a)
+#define arguments_BDK_DTX_PKO_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_pko_sel#
+ *
+ * DTX PKO Select Register
+ */
+union bdk_dtx_pko_selx
+{
+ uint64_t u;
+ struct bdk_dtx_pko_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_pko_selx_s cn; */
+};
+typedef union bdk_dtx_pko_selx bdk_dtx_pko_selx_t;
+
+static inline uint64_t BDK_DTX_PKO_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PKO_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feaa0000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_PKO_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PKO_SELX(a) bdk_dtx_pko_selx_t
+#define bustype_BDK_DTX_PKO_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PKO_SELX(a) "DTX_PKO_SELX"
+#define busnum_BDK_DTX_PKO_SELX(a) (a)
+#define arguments_BDK_DTX_PKO_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ptp_bcst_rsp
+ *
+ * DTX PTP Control Register
+ */
+union bdk_dtx_ptp_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_ptp_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ptp_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_ptp_bcst_rsp bdk_dtx_ptp_bcst_rsp_t;
+
+#define BDK_DTX_PTP_BCST_RSP BDK_DTX_PTP_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_PTP_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PTP_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe838080ll;
+ __bdk_csr_fatal("DTX_PTP_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PTP_BCST_RSP bdk_dtx_ptp_bcst_rsp_t
+#define bustype_BDK_DTX_PTP_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PTP_BCST_RSP "DTX_PTP_BCST_RSP"
+#define busnum_BDK_DTX_PTP_BCST_RSP 0
+#define arguments_BDK_DTX_PTP_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ptp_ctl
+ *
+ * DTX PTP Control Register
+ */
+union bdk_dtx_ptp_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_ptp_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ptp_ctl_s cn; */
+};
+typedef union bdk_dtx_ptp_ctl bdk_dtx_ptp_ctl_t;
+
+#define BDK_DTX_PTP_CTL BDK_DTX_PTP_CTL_FUNC()
+static inline uint64_t BDK_DTX_PTP_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PTP_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe838060ll;
+ __bdk_csr_fatal("DTX_PTP_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PTP_CTL bdk_dtx_ptp_ctl_t
+#define bustype_BDK_DTX_PTP_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PTP_CTL "DTX_PTP_CTL"
+#define busnum_BDK_DTX_PTP_CTL 0
+#define arguments_BDK_DTX_PTP_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ptp_dat#
+ *
+ * DTX PTP Raw Data Register
+ */
+union bdk_dtx_ptp_datx
+{
+ uint64_t u;
+ struct bdk_dtx_ptp_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ptp_datx_s cn; */
+};
+typedef union bdk_dtx_ptp_datx bdk_dtx_ptp_datx_t;
+
+static inline uint64_t BDK_DTX_PTP_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PTP_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe838040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_PTP_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PTP_DATX(a) bdk_dtx_ptp_datx_t
+#define bustype_BDK_DTX_PTP_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PTP_DATX(a) "DTX_PTP_DATX"
+#define busnum_BDK_DTX_PTP_DATX(a) (a)
+#define arguments_BDK_DTX_PTP_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ptp_ena#
+ *
+ * DTX PTP Data Enable Register
+ */
+union bdk_dtx_ptp_enax
+{
+ uint64_t u;
+ struct bdk_dtx_ptp_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ptp_enax_s cn; */
+};
+typedef union bdk_dtx_ptp_enax bdk_dtx_ptp_enax_t;
+
+static inline uint64_t BDK_DTX_PTP_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PTP_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe838020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_PTP_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PTP_ENAX(a) bdk_dtx_ptp_enax_t
+#define bustype_BDK_DTX_PTP_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PTP_ENAX(a) "DTX_PTP_ENAX"
+#define busnum_BDK_DTX_PTP_ENAX(a) (a)
+#define arguments_BDK_DTX_PTP_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_ptp_sel#
+ *
+ * DTX PTP Select Register
+ */
+union bdk_dtx_ptp_selx
+{
+ uint64_t u;
+ struct bdk_dtx_ptp_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_ptp_selx_s cn; */
+};
+typedef union bdk_dtx_ptp_selx bdk_dtx_ptp_selx_t;
+
+static inline uint64_t BDK_DTX_PTP_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_PTP_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe838000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_PTP_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_PTP_SELX(a) bdk_dtx_ptp_selx_t
+#define bustype_BDK_DTX_PTP_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_PTP_SELX(a) "DTX_PTP_SELX"
+#define busnum_BDK_DTX_PTP_SELX(a) (a)
+#define arguments_BDK_DTX_PTP_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rad_bcst_rsp
+ *
+ * DTX RAD Control Register
+ */
+union bdk_dtx_rad_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_rad_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rad_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_rad_bcst_rsp bdk_dtx_rad_bcst_rsp_t;
+
+#define BDK_DTX_RAD_BCST_RSP BDK_DTX_RAD_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_RAD_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RAD_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feb80080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0feb80080ll;
+ __bdk_csr_fatal("DTX_RAD_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RAD_BCST_RSP bdk_dtx_rad_bcst_rsp_t
+#define bustype_BDK_DTX_RAD_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RAD_BCST_RSP "DTX_RAD_BCST_RSP"
+#define busnum_BDK_DTX_RAD_BCST_RSP 0
+#define arguments_BDK_DTX_RAD_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rad_ctl
+ *
+ * DTX RAD Control Register
+ */
+union bdk_dtx_rad_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_rad_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rad_ctl_s cn; */
+};
+typedef union bdk_dtx_rad_ctl bdk_dtx_rad_ctl_t;
+
+#define BDK_DTX_RAD_CTL BDK_DTX_RAD_CTL_FUNC()
+static inline uint64_t BDK_DTX_RAD_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RAD_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feb80060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0feb80060ll;
+ __bdk_csr_fatal("DTX_RAD_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RAD_CTL bdk_dtx_rad_ctl_t
+#define bustype_BDK_DTX_RAD_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RAD_CTL "DTX_RAD_CTL"
+#define busnum_BDK_DTX_RAD_CTL 0
+#define arguments_BDK_DTX_RAD_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rad_dat#
+ *
+ * DTX RAD Raw Data Register
+ */
+union bdk_dtx_rad_datx
+{
+ uint64_t u;
+ struct bdk_dtx_rad_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rad_datx_s cn; */
+};
+typedef union bdk_dtx_rad_datx bdk_dtx_rad_datx_t;
+
+static inline uint64_t BDK_DTX_RAD_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RAD_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb80040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0feb80040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_RAD_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RAD_DATX(a) bdk_dtx_rad_datx_t
+#define bustype_BDK_DTX_RAD_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RAD_DATX(a) "DTX_RAD_DATX"
+#define busnum_BDK_DTX_RAD_DATX(a) (a)
+#define arguments_BDK_DTX_RAD_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rad_ena#
+ *
+ * DTX RAD Data Enable Register
+ */
+union bdk_dtx_rad_enax
+{
+ uint64_t u;
+ struct bdk_dtx_rad_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rad_enax_s cn; */
+};
+typedef union bdk_dtx_rad_enax bdk_dtx_rad_enax_t;
+
+static inline uint64_t BDK_DTX_RAD_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RAD_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb80020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0feb80020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_RAD_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RAD_ENAX(a) bdk_dtx_rad_enax_t
+#define bustype_BDK_DTX_RAD_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RAD_ENAX(a) "DTX_RAD_ENAX"
+#define busnum_BDK_DTX_RAD_ENAX(a) (a)
+#define arguments_BDK_DTX_RAD_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rad_sel#
+ *
+ * DTX RAD Select Register
+ */
+union bdk_dtx_rad_selx
+{
+ uint64_t u;
+ struct bdk_dtx_rad_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rad_selx_s cn; */
+};
+typedef union bdk_dtx_rad_selx bdk_dtx_rad_selx_t;
+
+static inline uint64_t BDK_DTX_RAD_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RAD_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb80000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0feb80000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_RAD_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RAD_SELX(a) bdk_dtx_rad_selx_t
+#define bustype_BDK_DTX_RAD_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RAD_SELX(a) "DTX_RAD_SELX"
+#define busnum_BDK_DTX_RAD_SELX(a) (a)
+#define arguments_BDK_DTX_RAD_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rgx#_bcst_rsp
+ *
+ * DTX RGX Control Register
+ */
+union bdk_dtx_rgxx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_rgxx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rgxx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_rgxx_bcst_rsp bdk_dtx_rgxx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_RGXX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RGXX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe740080ll + 0x8000ll * ((a) & 0x0);
+ __bdk_csr_fatal("DTX_RGXX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RGXX_BCST_RSP(a) bdk_dtx_rgxx_bcst_rsp_t
+#define bustype_BDK_DTX_RGXX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RGXX_BCST_RSP(a) "DTX_RGXX_BCST_RSP"
+#define busnum_BDK_DTX_RGXX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_RGXX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rgx#_ctl
+ *
+ * DTX RGX Control Register
+ */
+union bdk_dtx_rgxx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_rgxx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rgxx_ctl_s cn; */
+};
+typedef union bdk_dtx_rgxx_ctl bdk_dtx_rgxx_ctl_t;
+
+static inline uint64_t BDK_DTX_RGXX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RGXX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0fe740060ll + 0x8000ll * ((a) & 0x0);
+ __bdk_csr_fatal("DTX_RGXX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RGXX_CTL(a) bdk_dtx_rgxx_ctl_t
+#define bustype_BDK_DTX_RGXX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RGXX_CTL(a) "DTX_RGXX_CTL"
+#define busnum_BDK_DTX_RGXX_CTL(a) (a)
+#define arguments_BDK_DTX_RGXX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rgx#_dat#
+ *
+ * DTX RGX Raw Data Register
+ */
+union bdk_dtx_rgxx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_rgxx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rgxx_datx_s cn; */
+};
+typedef union bdk_dtx_rgxx_datx bdk_dtx_rgxx_datx_t;
+
+static inline uint64_t BDK_DTX_RGXX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RGXX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe740040ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_RGXX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_RGXX_DATX(a,b) bdk_dtx_rgxx_datx_t
+#define bustype_BDK_DTX_RGXX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RGXX_DATX(a,b) "DTX_RGXX_DATX"
+#define busnum_BDK_DTX_RGXX_DATX(a,b) (a)
+#define arguments_BDK_DTX_RGXX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_rgx#_ena#
+ *
+ * DTX RGX Data Enable Register
+ */
+union bdk_dtx_rgxx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_rgxx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rgxx_enax_s cn; */
+};
+typedef union bdk_dtx_rgxx_enax bdk_dtx_rgxx_enax_t;
+
+static inline uint64_t BDK_DTX_RGXX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RGXX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe740020ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_RGXX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_RGXX_ENAX(a,b) bdk_dtx_rgxx_enax_t
+#define bustype_BDK_DTX_RGXX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RGXX_ENAX(a,b) "DTX_RGXX_ENAX"
+#define busnum_BDK_DTX_RGXX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_RGXX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_rgx#_sel#
+ *
+ * DTX RGX Select Register
+ */
+union bdk_dtx_rgxx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_rgxx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rgxx_selx_s cn; */
+};
+typedef union bdk_dtx_rgxx_selx bdk_dtx_rgxx_selx_t;
+
+static inline uint64_t BDK_DTX_RGXX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RGXX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0fe740000ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_RGXX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_RGXX_SELX(a,b) bdk_dtx_rgxx_selx_t
+#define bustype_BDK_DTX_RGXX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RGXX_SELX(a,b) "DTX_RGXX_SELX"
+#define busnum_BDK_DTX_RGXX_SELX(a,b) (a)
+#define arguments_BDK_DTX_RGXX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_rnm_bcst_rsp
+ *
+ * DTX RNM Control Register
+ */
+union bdk_dtx_rnm_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_rnm_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rnm_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_rnm_bcst_rsp bdk_dtx_rnm_bcst_rsp_t;
+
+#define BDK_DTX_RNM_BCST_RSP BDK_DTX_RNM_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_RNM_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RNM_BCST_RSP_FUNC(void)
+{
+ return 0x87e0fe200080ll;
+}
+
+#define typedef_BDK_DTX_RNM_BCST_RSP bdk_dtx_rnm_bcst_rsp_t
+#define bustype_BDK_DTX_RNM_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RNM_BCST_RSP "DTX_RNM_BCST_RSP"
+#define busnum_BDK_DTX_RNM_BCST_RSP 0
+#define arguments_BDK_DTX_RNM_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rnm_ctl
+ *
+ * DTX RNM Control Register
+ */
+union bdk_dtx_rnm_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_rnm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rnm_ctl_s cn; */
+};
+typedef union bdk_dtx_rnm_ctl bdk_dtx_rnm_ctl_t;
+
+#define BDK_DTX_RNM_CTL BDK_DTX_RNM_CTL_FUNC()
+static inline uint64_t BDK_DTX_RNM_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RNM_CTL_FUNC(void)
+{
+ return 0x87e0fe200060ll;
+}
+
+#define typedef_BDK_DTX_RNM_CTL bdk_dtx_rnm_ctl_t
+#define bustype_BDK_DTX_RNM_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RNM_CTL "DTX_RNM_CTL"
+#define busnum_BDK_DTX_RNM_CTL 0
+#define arguments_BDK_DTX_RNM_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rnm_dat#
+ *
+ * DTX RNM Raw Data Register
+ */
+union bdk_dtx_rnm_datx
+{
+ uint64_t u;
+ struct bdk_dtx_rnm_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rnm_datx_s cn; */
+};
+typedef union bdk_dtx_rnm_datx bdk_dtx_rnm_datx_t;
+
+static inline uint64_t BDK_DTX_RNM_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RNM_DATX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe200040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_RNM_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RNM_DATX(a) bdk_dtx_rnm_datx_t
+#define bustype_BDK_DTX_RNM_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RNM_DATX(a) "DTX_RNM_DATX"
+#define busnum_BDK_DTX_RNM_DATX(a) (a)
+#define arguments_BDK_DTX_RNM_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rnm_ena#
+ *
+ * DTX RNM Data Enable Register
+ */
+union bdk_dtx_rnm_enax
+{
+ uint64_t u;
+ struct bdk_dtx_rnm_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rnm_enax_s cn; */
+};
+typedef union bdk_dtx_rnm_enax bdk_dtx_rnm_enax_t;
+
+static inline uint64_t BDK_DTX_RNM_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RNM_ENAX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe200020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_RNM_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RNM_ENAX(a) bdk_dtx_rnm_enax_t
+#define bustype_BDK_DTX_RNM_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RNM_ENAX(a) "DTX_RNM_ENAX"
+#define busnum_BDK_DTX_RNM_ENAX(a) (a)
+#define arguments_BDK_DTX_RNM_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rnm_sel#
+ *
+ * DTX RNM Select Register
+ */
+union bdk_dtx_rnm_selx
+{
+ uint64_t u;
+ struct bdk_dtx_rnm_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rnm_selx_s cn; */
+};
+typedef union bdk_dtx_rnm_selx bdk_dtx_rnm_selx_t;
+
+static inline uint64_t BDK_DTX_RNM_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RNM_SELX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe200000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_RNM_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RNM_SELX(a) bdk_dtx_rnm_selx_t
+#define bustype_BDK_DTX_RNM_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RNM_SELX(a) "DTX_RNM_SELX"
+#define busnum_BDK_DTX_RNM_SELX(a) (a)
+#define arguments_BDK_DTX_RNM_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rst_bcst_rsp
+ *
+ * DTX RST Control Register
+ */
+union bdk_dtx_rst_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_rst_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rst_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_rst_bcst_rsp bdk_dtx_rst_bcst_rsp_t;
+
+#define BDK_DTX_RST_BCST_RSP BDK_DTX_RST_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_RST_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RST_BCST_RSP_FUNC(void)
+{
+ return 0x87e0fe030080ll;
+}
+
+#define typedef_BDK_DTX_RST_BCST_RSP bdk_dtx_rst_bcst_rsp_t
+#define bustype_BDK_DTX_RST_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RST_BCST_RSP "DTX_RST_BCST_RSP"
+#define busnum_BDK_DTX_RST_BCST_RSP 0
+#define arguments_BDK_DTX_RST_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rst_ctl
+ *
+ * DTX RST Control Register
+ */
+union bdk_dtx_rst_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_rst_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rst_ctl_s cn; */
+};
+typedef union bdk_dtx_rst_ctl bdk_dtx_rst_ctl_t;
+
+#define BDK_DTX_RST_CTL BDK_DTX_RST_CTL_FUNC()
+static inline uint64_t BDK_DTX_RST_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RST_CTL_FUNC(void)
+{
+ return 0x87e0fe030060ll;
+}
+
+#define typedef_BDK_DTX_RST_CTL bdk_dtx_rst_ctl_t
+#define bustype_BDK_DTX_RST_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RST_CTL "DTX_RST_CTL"
+#define busnum_BDK_DTX_RST_CTL 0
+#define arguments_BDK_DTX_RST_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rst_dat#
+ *
+ * DTX RST Raw Data Register
+ */
+union bdk_dtx_rst_datx
+{
+ uint64_t u;
+ struct bdk_dtx_rst_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rst_datx_s cn; */
+};
+typedef union bdk_dtx_rst_datx bdk_dtx_rst_datx_t;
+
+static inline uint64_t BDK_DTX_RST_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RST_DATX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe030040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_RST_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RST_DATX(a) bdk_dtx_rst_datx_t
+#define bustype_BDK_DTX_RST_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RST_DATX(a) "DTX_RST_DATX"
+#define busnum_BDK_DTX_RST_DATX(a) (a)
+#define arguments_BDK_DTX_RST_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rst_ena#
+ *
+ * DTX RST Data Enable Register
+ */
+union bdk_dtx_rst_enax
+{
+ uint64_t u;
+ struct bdk_dtx_rst_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rst_enax_s cn; */
+};
+typedef union bdk_dtx_rst_enax bdk_dtx_rst_enax_t;
+
+static inline uint64_t BDK_DTX_RST_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RST_ENAX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe030020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_RST_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RST_ENAX(a) bdk_dtx_rst_enax_t
+#define bustype_BDK_DTX_RST_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RST_ENAX(a) "DTX_RST_ENAX"
+#define busnum_BDK_DTX_RST_ENAX(a) (a)
+#define arguments_BDK_DTX_RST_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_rst_sel#
+ *
+ * DTX RST Select Register
+ */
+union bdk_dtx_rst_selx
+{
+ uint64_t u;
+ struct bdk_dtx_rst_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_rst_selx_s cn; */
+};
+typedef union bdk_dtx_rst_selx bdk_dtx_rst_selx_t;
+
+static inline uint64_t BDK_DTX_RST_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_RST_SELX(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e0fe030000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_RST_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_RST_SELX(a) bdk_dtx_rst_selx_t
+#define bustype_BDK_DTX_RST_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_RST_SELX(a) "DTX_RST_SELX"
+#define busnum_BDK_DTX_RST_SELX(a) (a)
+#define arguments_BDK_DTX_RST_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_sata#_bcst_rsp
+ *
+ * DTX SATA Control Register
+ */
+union bdk_dtx_satax_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_satax_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_satax_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_satax_bcst_rsp bdk_dtx_satax_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_SATAX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SATAX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe880080ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x87e0fe880080ll + 0x8000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x87e0fe880080ll + 0x8000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x87e0fe880080ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_SATAX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SATAX_BCST_RSP(a) bdk_dtx_satax_bcst_rsp_t
+#define bustype_BDK_DTX_SATAX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SATAX_BCST_RSP(a) "DTX_SATAX_BCST_RSP"
+#define busnum_BDK_DTX_SATAX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_SATAX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_sata#_ctl
+ *
+ * DTX SATA Control Register
+ */
+union bdk_dtx_satax_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_satax_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_satax_ctl_s cn; */
+};
+typedef union bdk_dtx_satax_ctl bdk_dtx_satax_ctl_t;
+
+static inline uint64_t BDK_DTX_SATAX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SATAX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe880060ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x87e0fe880060ll + 0x8000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x87e0fe880060ll + 0x8000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x87e0fe880060ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_SATAX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SATAX_CTL(a) bdk_dtx_satax_ctl_t
+#define bustype_BDK_DTX_SATAX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SATAX_CTL(a) "DTX_SATAX_CTL"
+#define busnum_BDK_DTX_SATAX_CTL(a) (a)
+#define arguments_BDK_DTX_SATAX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_sata#_dat#
+ *
+ * DTX SATA Raw Data Register
+ */
+union bdk_dtx_satax_datx
+{
+ uint64_t u;
+ struct bdk_dtx_satax_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_satax_datx_s cn; */
+};
+typedef union bdk_dtx_satax_datx bdk_dtx_satax_datx_t;
+
+static inline uint64_t BDK_DTX_SATAX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SATAX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe880040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=5) && (b<=1)))
+ return 0x87e0fe880040ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=15) && (b<=1)))
+ return 0x87e0fe880040ll + 0x8000ll * ((a) & 0xf) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe880040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SATAX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SATAX_DATX(a,b) bdk_dtx_satax_datx_t
+#define bustype_BDK_DTX_SATAX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SATAX_DATX(a,b) "DTX_SATAX_DATX"
+#define busnum_BDK_DTX_SATAX_DATX(a,b) (a)
+#define arguments_BDK_DTX_SATAX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_sata#_ena#
+ *
+ * DTX SATA Data Enable Register
+ */
+union bdk_dtx_satax_enax
+{
+ uint64_t u;
+ struct bdk_dtx_satax_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_satax_enax_s cn; */
+};
+typedef union bdk_dtx_satax_enax bdk_dtx_satax_enax_t;
+
+static inline uint64_t BDK_DTX_SATAX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SATAX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe880020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=5) && (b<=1)))
+ return 0x87e0fe880020ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=15) && (b<=1)))
+ return 0x87e0fe880020ll + 0x8000ll * ((a) & 0xf) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe880020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SATAX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SATAX_ENAX(a,b) bdk_dtx_satax_enax_t
+#define bustype_BDK_DTX_SATAX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SATAX_ENAX(a,b) "DTX_SATAX_ENAX"
+#define busnum_BDK_DTX_SATAX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_SATAX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_sata#_sel#
+ *
+ * DTX SATA Select Register
+ */
+union bdk_dtx_satax_selx
+{
+ uint64_t u;
+ struct bdk_dtx_satax_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_satax_selx_s cn; */
+};
+typedef union bdk_dtx_satax_selx bdk_dtx_satax_selx_t;
+
+static inline uint64_t BDK_DTX_SATAX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SATAX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe880000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=5) && (b<=1)))
+ return 0x87e0fe880000ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=15) && (b<=1)))
+ return 0x87e0fe880000ll + 0x8000ll * ((a) & 0xf) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe880000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SATAX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SATAX_SELX(a,b) bdk_dtx_satax_selx_t
+#define bustype_BDK_DTX_SATAX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SATAX_SELX(a,b) "DTX_SATAX_SELX"
+#define busnum_BDK_DTX_SATAX_SELX(a,b) (a)
+#define arguments_BDK_DTX_SATAX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_sli#_bcst_rsp
+ *
+ * DTX SLI Control Register
+ */
+union bdk_dtx_slix_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_slix_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_slix_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_slix_bcst_rsp bdk_dtx_slix_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_SLIX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SLIX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0feba0080ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x87e0feba0080ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0feba0080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_SLIX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SLIX_BCST_RSP(a) bdk_dtx_slix_bcst_rsp_t
+#define bustype_BDK_DTX_SLIX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SLIX_BCST_RSP(a) "DTX_SLIX_BCST_RSP"
+#define busnum_BDK_DTX_SLIX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_SLIX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_sli#_ctl
+ *
+ * DTX SLI Control Register
+ */
+union bdk_dtx_slix_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_slix_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_slix_ctl_s cn; */
+};
+typedef union bdk_dtx_slix_ctl bdk_dtx_slix_ctl_t;
+
+static inline uint64_t BDK_DTX_SLIX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SLIX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0feba0060ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x87e0feba0060ll + 0x8000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0feba0060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_SLIX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SLIX_CTL(a) bdk_dtx_slix_ctl_t
+#define bustype_BDK_DTX_SLIX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SLIX_CTL(a) "DTX_SLIX_CTL"
+#define busnum_BDK_DTX_SLIX_CTL(a) (a)
+#define arguments_BDK_DTX_SLIX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_sli#_dat#
+ *
+ * DTX SLI Raw Data Register
+ */
+union bdk_dtx_slix_datx
+{
+ uint64_t u;
+ struct bdk_dtx_slix_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_slix_datx_s cn; */
+};
+typedef union bdk_dtx_slix_datx bdk_dtx_slix_datx_t;
+
+static inline uint64_t BDK_DTX_SLIX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SLIX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0feba0040ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x87e0feba0040ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feba0040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SLIX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SLIX_DATX(a,b) bdk_dtx_slix_datx_t
+#define bustype_BDK_DTX_SLIX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SLIX_DATX(a,b) "DTX_SLIX_DATX"
+#define busnum_BDK_DTX_SLIX_DATX(a,b) (a)
+#define arguments_BDK_DTX_SLIX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_sli#_ena#
+ *
+ * DTX SLI Data Enable Register
+ */
+union bdk_dtx_slix_enax
+{
+ uint64_t u;
+ struct bdk_dtx_slix_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_slix_enax_s cn; */
+};
+typedef union bdk_dtx_slix_enax bdk_dtx_slix_enax_t;
+
+static inline uint64_t BDK_DTX_SLIX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SLIX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0feba0020ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x87e0feba0020ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feba0020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SLIX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SLIX_ENAX(a,b) bdk_dtx_slix_enax_t
+#define bustype_BDK_DTX_SLIX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SLIX_ENAX(a,b) "DTX_SLIX_ENAX"
+#define busnum_BDK_DTX_SLIX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_SLIX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_sli#_sel#
+ *
+ * DTX SLI Select Register
+ */
+union bdk_dtx_slix_selx
+{
+ uint64_t u;
+ struct bdk_dtx_slix_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_slix_selx_s cn; */
+};
+typedef union bdk_dtx_slix_selx bdk_dtx_slix_selx_t;
+
+static inline uint64_t BDK_DTX_SLIX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SLIX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=1)))
+ return 0x87e0feba0000ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x87e0feba0000ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feba0000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SLIX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SLIX_SELX(a,b) bdk_dtx_slix_selx_t
+#define bustype_BDK_DTX_SLIX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SLIX_SELX(a,b) "DTX_SLIX_SELX"
+#define busnum_BDK_DTX_SLIX_SELX(a,b) (a)
+#define arguments_BDK_DTX_SLIX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_smmu#_bcst_rsp
+ *
+ * DTX SMMU Control Register
+ */
+union bdk_dtx_smmux_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_smmux_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_smmux_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_smmux_bcst_rsp bdk_dtx_smmux_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_SMMUX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SMMUX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a==0))
+ return 0x87e0fe980080ll + 0x8000ll * ((a) & 0x0);
+ __bdk_csr_fatal("DTX_SMMUX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SMMUX_BCST_RSP(a) bdk_dtx_smmux_bcst_rsp_t
+#define bustype_BDK_DTX_SMMUX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SMMUX_BCST_RSP(a) "DTX_SMMUX_BCST_RSP"
+#define busnum_BDK_DTX_SMMUX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_SMMUX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_smmu#_ctl
+ *
+ * DTX SMMU Control Register
+ */
+union bdk_dtx_smmux_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_smmux_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_smmux_ctl_s cn; */
+};
+typedef union bdk_dtx_smmux_ctl bdk_dtx_smmux_ctl_t;
+
+static inline uint64_t BDK_DTX_SMMUX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SMMUX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a==0))
+ return 0x87e0fe980060ll + 0x8000ll * ((a) & 0x0);
+ __bdk_csr_fatal("DTX_SMMUX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SMMUX_CTL(a) bdk_dtx_smmux_ctl_t
+#define bustype_BDK_DTX_SMMUX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SMMUX_CTL(a) "DTX_SMMUX_CTL"
+#define busnum_BDK_DTX_SMMUX_CTL(a) (a)
+#define arguments_BDK_DTX_SMMUX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_smmu#_dat#
+ *
+ * DTX SMMU Raw Data Register
+ */
+union bdk_dtx_smmux_datx
+{
+ uint64_t u;
+ struct bdk_dtx_smmux_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_smmux_datx_s cn; */
+};
+typedef union bdk_dtx_smmux_datx bdk_dtx_smmux_datx_t;
+
+static inline uint64_t BDK_DTX_SMMUX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SMMUX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a==0) && (b<=1)))
+ return 0x87e0fe980040ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SMMUX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SMMUX_DATX(a,b) bdk_dtx_smmux_datx_t
+#define bustype_BDK_DTX_SMMUX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SMMUX_DATX(a,b) "DTX_SMMUX_DATX"
+#define busnum_BDK_DTX_SMMUX_DATX(a,b) (a)
+#define arguments_BDK_DTX_SMMUX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_smmu#_ena#
+ *
+ * DTX SMMU Data Enable Register
+ */
+union bdk_dtx_smmux_enax
+{
+ uint64_t u;
+ struct bdk_dtx_smmux_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_smmux_enax_s cn; */
+};
+typedef union bdk_dtx_smmux_enax bdk_dtx_smmux_enax_t;
+
+static inline uint64_t BDK_DTX_SMMUX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SMMUX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a==0) && (b<=1)))
+ return 0x87e0fe980020ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SMMUX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SMMUX_ENAX(a,b) bdk_dtx_smmux_enax_t
+#define bustype_BDK_DTX_SMMUX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SMMUX_ENAX(a,b) "DTX_SMMUX_ENAX"
+#define busnum_BDK_DTX_SMMUX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_SMMUX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_smmu#_sel#
+ *
+ * DTX SMMU Select Register
+ */
+union bdk_dtx_smmux_selx
+{
+ uint64_t u;
+ struct bdk_dtx_smmux_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_smmux_selx_s cn; */
+};
+typedef union bdk_dtx_smmux_selx bdk_dtx_smmux_selx_t;
+
+static inline uint64_t BDK_DTX_SMMUX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SMMUX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a==0) && (b<=1)))
+ return 0x87e0fe980000ll + 0x8000ll * ((a) & 0x0) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SMMUX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SMMUX_SELX(a,b) bdk_dtx_smmux_selx_t
+#define bustype_BDK_DTX_SMMUX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SMMUX_SELX(a,b) "DTX_SMMUX_SELX"
+#define busnum_BDK_DTX_SMMUX_SELX(a,b) (a)
+#define arguments_BDK_DTX_SMMUX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_smmus#_bcst_rsp
+ *
+ * DTX SMMUS Control Register
+ */
+union bdk_dtx_smmusx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_smmusx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_smmusx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_smmusx_bcst_rsp bdk_dtx_smmusx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_SMMUSX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SMMUSX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0fe988080ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_SMMUSX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SMMUSX_BCST_RSP(a) bdk_dtx_smmusx_bcst_rsp_t
+#define bustype_BDK_DTX_SMMUSX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SMMUSX_BCST_RSP(a) "DTX_SMMUSX_BCST_RSP"
+#define busnum_BDK_DTX_SMMUSX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_SMMUSX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_smmus#_ctl
+ *
+ * DTX SMMUS Control Register
+ */
+union bdk_dtx_smmusx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_smmusx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_smmusx_ctl_s cn; */
+};
+typedef union bdk_dtx_smmusx_ctl bdk_dtx_smmusx_ctl_t;
+
+static inline uint64_t BDK_DTX_SMMUSX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SMMUSX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=2))
+ return 0x87e0fe988060ll + 0x8000ll * ((a) & 0x3);
+ __bdk_csr_fatal("DTX_SMMUSX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SMMUSX_CTL(a) bdk_dtx_smmusx_ctl_t
+#define bustype_BDK_DTX_SMMUSX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SMMUSX_CTL(a) "DTX_SMMUSX_CTL"
+#define busnum_BDK_DTX_SMMUSX_CTL(a) (a)
+#define arguments_BDK_DTX_SMMUSX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_smmus#_dat#
+ *
+ * DTX SMMUS Raw Data Register
+ */
+union bdk_dtx_smmusx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_smmusx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_smmusx_datx_s cn; */
+};
+typedef union bdk_dtx_smmusx_datx bdk_dtx_smmusx_datx_t;
+
+static inline uint64_t BDK_DTX_SMMUSX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SMMUSX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe988040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SMMUSX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SMMUSX_DATX(a,b) bdk_dtx_smmusx_datx_t
+#define bustype_BDK_DTX_SMMUSX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SMMUSX_DATX(a,b) "DTX_SMMUSX_DATX"
+#define busnum_BDK_DTX_SMMUSX_DATX(a,b) (a)
+#define arguments_BDK_DTX_SMMUSX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_smmus#_ena#
+ *
+ * DTX SMMUS Data Enable Register
+ */
+union bdk_dtx_smmusx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_smmusx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_smmusx_enax_s cn; */
+};
+typedef union bdk_dtx_smmusx_enax bdk_dtx_smmusx_enax_t;
+
+static inline uint64_t BDK_DTX_SMMUSX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SMMUSX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe988020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SMMUSX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SMMUSX_ENAX(a,b) bdk_dtx_smmusx_enax_t
+#define bustype_BDK_DTX_SMMUSX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SMMUSX_ENAX(a,b) "DTX_SMMUSX_ENAX"
+#define busnum_BDK_DTX_SMMUSX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_SMMUSX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_smmus#_sel#
+ *
+ * DTX SMMUS Select Register
+ */
+union bdk_dtx_smmusx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_smmusx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_smmusx_selx_s cn; */
+};
+typedef union bdk_dtx_smmusx_selx bdk_dtx_smmusx_selx_t;
+
+static inline uint64_t BDK_DTX_SMMUSX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SMMUSX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=2) && (b<=1)))
+ return 0x87e0fe988000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_SMMUSX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_SMMUSX_SELX(a,b) bdk_dtx_smmusx_selx_t
+#define bustype_BDK_DTX_SMMUSX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SMMUSX_SELX(a,b) "DTX_SMMUSX_SELX"
+#define busnum_BDK_DTX_SMMUSX_SELX(a,b) (a)
+#define arguments_BDK_DTX_SMMUSX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_sso_bcst_rsp
+ *
+ * DTX SSO Control Register
+ */
+union bdk_dtx_sso_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_sso_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_sso_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_sso_bcst_rsp bdk_dtx_sso_bcst_rsp_t;
+
+#define BDK_DTX_SSO_BCST_RSP BDK_DTX_SSO_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_SSO_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SSO_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feb00080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feb00080ll;
+ __bdk_csr_fatal("DTX_SSO_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SSO_BCST_RSP bdk_dtx_sso_bcst_rsp_t
+#define bustype_BDK_DTX_SSO_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SSO_BCST_RSP "DTX_SSO_BCST_RSP"
+#define busnum_BDK_DTX_SSO_BCST_RSP 0
+#define arguments_BDK_DTX_SSO_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_sso_ctl
+ *
+ * DTX SSO Control Register
+ */
+union bdk_dtx_sso_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_sso_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_sso_ctl_s cn; */
+};
+typedef union bdk_dtx_sso_ctl bdk_dtx_sso_ctl_t;
+
+#define BDK_DTX_SSO_CTL BDK_DTX_SSO_CTL_FUNC()
+static inline uint64_t BDK_DTX_SSO_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SSO_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feb00060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feb00060ll;
+ __bdk_csr_fatal("DTX_SSO_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SSO_CTL bdk_dtx_sso_ctl_t
+#define bustype_BDK_DTX_SSO_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SSO_CTL "DTX_SSO_CTL"
+#define busnum_BDK_DTX_SSO_CTL 0
+#define arguments_BDK_DTX_SSO_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_sso_dat#
+ *
+ * DTX SSO Raw Data Register
+ */
+union bdk_dtx_sso_datx
+{
+ uint64_t u;
+ struct bdk_dtx_sso_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_sso_datx_s cn; */
+};
+typedef union bdk_dtx_sso_datx bdk_dtx_sso_datx_t;
+
+static inline uint64_t BDK_DTX_SSO_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SSO_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb00040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb00040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_SSO_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SSO_DATX(a) bdk_dtx_sso_datx_t
+#define bustype_BDK_DTX_SSO_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SSO_DATX(a) "DTX_SSO_DATX"
+#define busnum_BDK_DTX_SSO_DATX(a) (a)
+#define arguments_BDK_DTX_SSO_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_sso_ena#
+ *
+ * DTX SSO Data Enable Register
+ */
+union bdk_dtx_sso_enax
+{
+ uint64_t u;
+ struct bdk_dtx_sso_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_sso_enax_s cn; */
+};
+typedef union bdk_dtx_sso_enax bdk_dtx_sso_enax_t;
+
+static inline uint64_t BDK_DTX_SSO_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SSO_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb00020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb00020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_SSO_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SSO_ENAX(a) bdk_dtx_sso_enax_t
+#define bustype_BDK_DTX_SSO_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SSO_ENAX(a) "DTX_SSO_ENAX"
+#define busnum_BDK_DTX_SSO_ENAX(a) (a)
+#define arguments_BDK_DTX_SSO_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_sso_sel#
+ *
+ * DTX SSO Select Register
+ */
+union bdk_dtx_sso_selx
+{
+ uint64_t u;
+ struct bdk_dtx_sso_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_sso_selx_s cn; */
+};
+typedef union bdk_dtx_sso_selx bdk_dtx_sso_selx_t;
+
+static inline uint64_t BDK_DTX_SSO_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_SSO_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb00000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb00000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_SSO_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_SSO_SELX(a) bdk_dtx_sso_selx_t
+#define bustype_BDK_DTX_SSO_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_SSO_SELX(a) "DTX_SSO_SELX"
+#define busnum_BDK_DTX_SSO_SELX(a) (a)
+#define arguments_BDK_DTX_SSO_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_tim_bcst_rsp
+ *
+ * DTX TIM Control Register
+ */
+union bdk_dtx_tim_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_tim_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_tim_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_tim_bcst_rsp bdk_dtx_tim_bcst_rsp_t;
+
+#define BDK_DTX_TIM_BCST_RSP BDK_DTX_TIM_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_TIM_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_TIM_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feac0080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feac0080ll;
+ __bdk_csr_fatal("DTX_TIM_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_TIM_BCST_RSP bdk_dtx_tim_bcst_rsp_t
+#define bustype_BDK_DTX_TIM_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_TIM_BCST_RSP "DTX_TIM_BCST_RSP"
+#define busnum_BDK_DTX_TIM_BCST_RSP 0
+#define arguments_BDK_DTX_TIM_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_tim_ctl
+ *
+ * DTX TIM Control Register
+ */
+union bdk_dtx_tim_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_tim_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_tim_ctl_s cn; */
+};
+typedef union bdk_dtx_tim_ctl bdk_dtx_tim_ctl_t;
+
+#define BDK_DTX_TIM_CTL BDK_DTX_TIM_CTL_FUNC()
+static inline uint64_t BDK_DTX_TIM_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_TIM_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0feac0060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0feac0060ll;
+ __bdk_csr_fatal("DTX_TIM_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_TIM_CTL bdk_dtx_tim_ctl_t
+#define bustype_BDK_DTX_TIM_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_TIM_CTL "DTX_TIM_CTL"
+#define busnum_BDK_DTX_TIM_CTL 0
+#define arguments_BDK_DTX_TIM_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_tim_dat#
+ *
+ * DTX TIM Raw Data Register
+ */
+union bdk_dtx_tim_datx
+{
+ uint64_t u;
+ struct bdk_dtx_tim_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_tim_datx_s cn; */
+};
+typedef union bdk_dtx_tim_datx bdk_dtx_tim_datx_t;
+
+static inline uint64_t BDK_DTX_TIM_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_TIM_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feac0040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feac0040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_TIM_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_TIM_DATX(a) bdk_dtx_tim_datx_t
+#define bustype_BDK_DTX_TIM_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_TIM_DATX(a) "DTX_TIM_DATX"
+#define busnum_BDK_DTX_TIM_DATX(a) (a)
+#define arguments_BDK_DTX_TIM_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_tim_ena#
+ *
+ * DTX TIM Data Enable Register
+ */
+union bdk_dtx_tim_enax
+{
+ uint64_t u;
+ struct bdk_dtx_tim_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_tim_enax_s cn; */
+};
+typedef union bdk_dtx_tim_enax bdk_dtx_tim_enax_t;
+
+static inline uint64_t BDK_DTX_TIM_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_TIM_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feac0020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feac0020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_TIM_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_TIM_ENAX(a) bdk_dtx_tim_enax_t
+#define bustype_BDK_DTX_TIM_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_TIM_ENAX(a) "DTX_TIM_ENAX"
+#define busnum_BDK_DTX_TIM_ENAX(a) (a)
+#define arguments_BDK_DTX_TIM_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_tim_sel#
+ *
+ * DTX TIM Select Register
+ */
+union bdk_dtx_tim_selx
+{
+ uint64_t u;
+ struct bdk_dtx_tim_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_tim_selx_s cn; */
+};
+typedef union bdk_dtx_tim_selx bdk_dtx_tim_selx_t;
+
+static inline uint64_t BDK_DTX_TIM_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_TIM_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feac0000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feac0000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_TIM_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_TIM_SELX(a) bdk_dtx_tim_selx_t
+#define bustype_BDK_DTX_TIM_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_TIM_SELX(a) "DTX_TIM_SELX"
+#define busnum_BDK_DTX_TIM_SELX(a) (a)
+#define arguments_BDK_DTX_TIM_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_tns_bcst_rsp
+ *
+ * DTX TNS Control Register
+ */
+union bdk_dtx_tns_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_tns_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_tns_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_tns_bcst_rsp bdk_dtx_tns_bcst_rsp_t;
+
+#define BDK_DTX_TNS_BCST_RSP BDK_DTX_TNS_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_TNS_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_TNS_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fea10080ll;
+ __bdk_csr_fatal("DTX_TNS_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_TNS_BCST_RSP bdk_dtx_tns_bcst_rsp_t
+#define bustype_BDK_DTX_TNS_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_TNS_BCST_RSP "DTX_TNS_BCST_RSP"
+#define busnum_BDK_DTX_TNS_BCST_RSP 0
+#define arguments_BDK_DTX_TNS_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_tns_ctl
+ *
+ * DTX TNS Control Register
+ */
+union bdk_dtx_tns_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_tns_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_tns_ctl_s cn; */
+};
+typedef union bdk_dtx_tns_ctl bdk_dtx_tns_ctl_t;
+
+#define BDK_DTX_TNS_CTL BDK_DTX_TNS_CTL_FUNC()
+static inline uint64_t BDK_DTX_TNS_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_TNS_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fea10060ll;
+ __bdk_csr_fatal("DTX_TNS_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_TNS_CTL bdk_dtx_tns_ctl_t
+#define bustype_BDK_DTX_TNS_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_TNS_CTL "DTX_TNS_CTL"
+#define busnum_BDK_DTX_TNS_CTL 0
+#define arguments_BDK_DTX_TNS_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_tns_dat#
+ *
+ * DTX TNS Raw Data Register
+ */
+union bdk_dtx_tns_datx
+{
+ uint64_t u;
+ struct bdk_dtx_tns_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_tns_datx_s cn; */
+};
+typedef union bdk_dtx_tns_datx bdk_dtx_tns_datx_t;
+
+static inline uint64_t BDK_DTX_TNS_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_TNS_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fea10040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_TNS_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_TNS_DATX(a) bdk_dtx_tns_datx_t
+#define bustype_BDK_DTX_TNS_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_TNS_DATX(a) "DTX_TNS_DATX"
+#define busnum_BDK_DTX_TNS_DATX(a) (a)
+#define arguments_BDK_DTX_TNS_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_tns_ena#
+ *
+ * DTX TNS Data Enable Register
+ */
+union bdk_dtx_tns_enax
+{
+ uint64_t u;
+ struct bdk_dtx_tns_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_tns_enax_s cn; */
+};
+typedef union bdk_dtx_tns_enax bdk_dtx_tns_enax_t;
+
+static inline uint64_t BDK_DTX_TNS_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_TNS_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fea10020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_TNS_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_TNS_ENAX(a) bdk_dtx_tns_enax_t
+#define bustype_BDK_DTX_TNS_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_TNS_ENAX(a) "DTX_TNS_ENAX"
+#define busnum_BDK_DTX_TNS_ENAX(a) (a)
+#define arguments_BDK_DTX_TNS_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_tns_sel#
+ *
+ * DTX TNS Select Register
+ */
+union bdk_dtx_tns_selx
+{
+ uint64_t u;
+ struct bdk_dtx_tns_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_tns_selx_s cn; */
+};
+typedef union bdk_dtx_tns_selx bdk_dtx_tns_selx_t;
+
+static inline uint64_t BDK_DTX_TNS_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_TNS_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fea10000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_TNS_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_TNS_SELX(a) bdk_dtx_tns_selx_t
+#define bustype_BDK_DTX_TNS_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_TNS_SELX(a) "DTX_TNS_SELX"
+#define busnum_BDK_DTX_TNS_SELX(a) (a)
+#define arguments_BDK_DTX_TNS_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_uaa#_bcst_rsp
+ *
+ * DTX UAA Control Register
+ */
+union bdk_dtx_uaax_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_uaax_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_uaax_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_uaax_bcst_rsp bdk_dtx_uaax_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_UAAX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_UAAX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0fe140080ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0fe140080ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe120080ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0fe140080ll + 0x8000ll * ((a) & 0x7);
+ __bdk_csr_fatal("DTX_UAAX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_UAAX_BCST_RSP(a) bdk_dtx_uaax_bcst_rsp_t
+#define bustype_BDK_DTX_UAAX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_UAAX_BCST_RSP(a) "DTX_UAAX_BCST_RSP"
+#define busnum_BDK_DTX_UAAX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_UAAX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_uaa#_ctl
+ *
+ * DTX UAA Control Register
+ */
+union bdk_dtx_uaax_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_uaax_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_uaax_ctl_s cn; */
+};
+typedef union bdk_dtx_uaax_ctl bdk_dtx_uaax_ctl_t;
+
+static inline uint64_t BDK_DTX_UAAX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_UAAX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0fe140060ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x87e0fe140060ll + 0x8000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe120060ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0fe140060ll + 0x8000ll * ((a) & 0x7);
+ __bdk_csr_fatal("DTX_UAAX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_UAAX_CTL(a) bdk_dtx_uaax_ctl_t
+#define bustype_BDK_DTX_UAAX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_UAAX_CTL(a) "DTX_UAAX_CTL"
+#define busnum_BDK_DTX_UAAX_CTL(a) (a)
+#define arguments_BDK_DTX_UAAX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_uaa#_dat#
+ *
+ * DTX UAA Raw Data Register
+ */
+union bdk_dtx_uaax_datx
+{
+ uint64_t u;
+ struct bdk_dtx_uaax_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_uaax_datx_s cn; */
+};
+typedef union bdk_dtx_uaax_datx bdk_dtx_uaax_datx_t;
+
+static inline uint64_t BDK_DTX_UAAX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_UAAX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe140040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe140040ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe120040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=1)))
+ return 0x87e0fe140040ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_UAAX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_UAAX_DATX(a,b) bdk_dtx_uaax_datx_t
+#define bustype_BDK_DTX_UAAX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_UAAX_DATX(a,b) "DTX_UAAX_DATX"
+#define busnum_BDK_DTX_UAAX_DATX(a,b) (a)
+#define arguments_BDK_DTX_UAAX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_uaa#_ena#
+ *
+ * DTX UAA Data Enable Register
+ */
+union bdk_dtx_uaax_enax
+{
+ uint64_t u;
+ struct bdk_dtx_uaax_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_uaax_enax_s cn; */
+};
+typedef union bdk_dtx_uaax_enax bdk_dtx_uaax_enax_t;
+
+static inline uint64_t BDK_DTX_UAAX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_UAAX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe140020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe140020ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe120020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=1)))
+ return 0x87e0fe140020ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_UAAX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_UAAX_ENAX(a,b) bdk_dtx_uaax_enax_t
+#define bustype_BDK_DTX_UAAX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_UAAX_ENAX(a,b) "DTX_UAAX_ENAX"
+#define busnum_BDK_DTX_UAAX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_UAAX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_uaa#_sel#
+ *
+ * DTX UAA Select Register
+ */
+union bdk_dtx_uaax_selx
+{
+ uint64_t u;
+ struct bdk_dtx_uaax_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_uaax_selx_s cn; */
+};
+typedef union bdk_dtx_uaax_selx bdk_dtx_uaax_selx_t;
+
+static inline uint64_t BDK_DTX_UAAX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_UAAX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe140000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=3) && (b<=1)))
+ return 0x87e0fe140000ll + 0x8000ll * ((a) & 0x3) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe120000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=1)))
+ return 0x87e0fe140000ll + 0x8000ll * ((a) & 0x7) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_UAAX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_UAAX_SELX(a,b) bdk_dtx_uaax_selx_t
+#define bustype_BDK_DTX_UAAX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_UAAX_SELX(a,b) "DTX_UAAX_SELX"
+#define busnum_BDK_DTX_UAAX_SELX(a,b) (a)
+#define arguments_BDK_DTX_UAAX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_usbdrd#_bcst_rsp
+ *
+ * DTX USBDRD Control Register
+ */
+union bdk_dtx_usbdrdx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_usbdrdx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_usbdrdx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_usbdrdx_bcst_rsp bdk_dtx_usbdrdx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_USBDRDX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_USBDRDX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0feb40080ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb40080ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb40080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_USBDRDX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_USBDRDX_BCST_RSP(a) bdk_dtx_usbdrdx_bcst_rsp_t
+#define bustype_BDK_DTX_USBDRDX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_USBDRDX_BCST_RSP(a) "DTX_USBDRDX_BCST_RSP"
+#define busnum_BDK_DTX_USBDRDX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_USBDRDX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_usbdrd#_ctl
+ *
+ * DTX USBDRD Control Register
+ */
+union bdk_dtx_usbdrdx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_usbdrdx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_usbdrdx_ctl_s cn; */
+};
+typedef union bdk_dtx_usbdrdx_ctl bdk_dtx_usbdrdx_ctl_t;
+
+static inline uint64_t BDK_DTX_USBDRDX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_USBDRDX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0feb40060ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0feb40060ll + 0x8000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0feb40060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_USBDRDX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_USBDRDX_CTL(a) bdk_dtx_usbdrdx_ctl_t
+#define bustype_BDK_DTX_USBDRDX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_USBDRDX_CTL(a) "DTX_USBDRDX_CTL"
+#define busnum_BDK_DTX_USBDRDX_CTL(a) (a)
+#define arguments_BDK_DTX_USBDRDX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_usbdrd#_dat#
+ *
+ * DTX USBDRD Raw Data Register
+ */
+union bdk_dtx_usbdrdx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_usbdrdx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_usbdrdx_datx_s cn; */
+};
+typedef union bdk_dtx_usbdrdx_datx bdk_dtx_usbdrdx_datx_t;
+
+static inline uint64_t BDK_DTX_USBDRDX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_USBDRDX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_USBDRDX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_USBDRDX_DATX(a,b) bdk_dtx_usbdrdx_datx_t
+#define bustype_BDK_DTX_USBDRDX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_USBDRDX_DATX(a,b) "DTX_USBDRDX_DATX"
+#define busnum_BDK_DTX_USBDRDX_DATX(a,b) (a)
+#define arguments_BDK_DTX_USBDRDX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_usbdrd#_ena#
+ *
+ * DTX USBDRD Data Enable Register
+ */
+union bdk_dtx_usbdrdx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_usbdrdx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_usbdrdx_enax_s cn; */
+};
+typedef union bdk_dtx_usbdrdx_enax bdk_dtx_usbdrdx_enax_t;
+
+static inline uint64_t BDK_DTX_USBDRDX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_USBDRDX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_USBDRDX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_USBDRDX_ENAX(a,b) bdk_dtx_usbdrdx_enax_t
+#define bustype_BDK_DTX_USBDRDX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_USBDRDX_ENAX(a,b) "DTX_USBDRDX_ENAX"
+#define busnum_BDK_DTX_USBDRDX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_USBDRDX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_usbdrd#_sel#
+ *
+ * DTX USBDRD Select Register
+ */
+union bdk_dtx_usbdrdx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_usbdrdx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_usbdrdx_selx_s cn; */
+};
+typedef union bdk_dtx_usbdrdx_selx bdk_dtx_usbdrdx_selx_t;
+
+static inline uint64_t BDK_DTX_USBDRDX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_USBDRDX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_USBDRDX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_USBDRDX_SELX(a,b) bdk_dtx_usbdrdx_selx_t
+#define bustype_BDK_DTX_USBDRDX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_USBDRDX_SELX(a,b) "DTX_USBDRDX_SELX"
+#define busnum_BDK_DTX_USBDRDX_SELX(a,b) (a)
+#define arguments_BDK_DTX_USBDRDX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_usbh#_bcst_rsp
+ *
+ * DTX USBH Control Register
+ */
+union bdk_dtx_usbhx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_usbhx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_usbhx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_usbhx_bcst_rsp bdk_dtx_usbhx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_USBHX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_USBHX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0feb40080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_USBHX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_USBHX_BCST_RSP(a) bdk_dtx_usbhx_bcst_rsp_t
+#define bustype_BDK_DTX_USBHX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_USBHX_BCST_RSP(a) "DTX_USBHX_BCST_RSP"
+#define busnum_BDK_DTX_USBHX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_USBHX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_usbh#_ctl
+ *
+ * DTX USBH Control Register
+ */
+union bdk_dtx_usbhx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_usbhx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_usbhx_ctl_s cn; */
+};
+typedef union bdk_dtx_usbhx_ctl bdk_dtx_usbhx_ctl_t;
+
+static inline uint64_t BDK_DTX_USBHX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_USBHX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0feb40060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_USBHX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_USBHX_CTL(a) bdk_dtx_usbhx_ctl_t
+#define bustype_BDK_DTX_USBHX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_USBHX_CTL(a) "DTX_USBHX_CTL"
+#define busnum_BDK_DTX_USBHX_CTL(a) (a)
+#define arguments_BDK_DTX_USBHX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_usbh#_dat#
+ *
+ * DTX USBH Raw Data Register
+ */
+union bdk_dtx_usbhx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_usbhx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_usbhx_datx_s cn; */
+};
+typedef union bdk_dtx_usbhx_datx bdk_dtx_usbhx_datx_t;
+
+static inline uint64_t BDK_DTX_USBHX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_USBHX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_USBHX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_USBHX_DATX(a,b) bdk_dtx_usbhx_datx_t
+#define bustype_BDK_DTX_USBHX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_USBHX_DATX(a,b) "DTX_USBHX_DATX"
+#define busnum_BDK_DTX_USBHX_DATX(a,b) (a)
+#define arguments_BDK_DTX_USBHX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_usbh#_ena#
+ *
+ * DTX USBH Data Enable Register
+ */
+union bdk_dtx_usbhx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_usbhx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_usbhx_enax_s cn; */
+};
+typedef union bdk_dtx_usbhx_enax bdk_dtx_usbhx_enax_t;
+
+static inline uint64_t BDK_DTX_USBHX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_USBHX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_USBHX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_USBHX_ENAX(a,b) bdk_dtx_usbhx_enax_t
+#define bustype_BDK_DTX_USBHX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_USBHX_ENAX(a,b) "DTX_USBHX_ENAX"
+#define busnum_BDK_DTX_USBHX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_USBHX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_usbh#_sel#
+ *
+ * DTX USBH Select Register
+ */
+union bdk_dtx_usbhx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_usbhx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_usbhx_selx_s cn; */
+};
+typedef union bdk_dtx_usbhx_selx bdk_dtx_usbhx_selx_t;
+
+static inline uint64_t BDK_DTX_USBHX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_USBHX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0feb40000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_USBHX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_USBHX_SELX(a,b) bdk_dtx_usbhx_selx_t
+#define bustype_BDK_DTX_USBHX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_USBHX_SELX(a,b) "DTX_USBHX_SELX"
+#define busnum_BDK_DTX_USBHX_SELX(a,b) (a)
+#define arguments_BDK_DTX_USBHX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_xcp#_bcst_rsp
+ *
+ * DTX XCP Control Register
+ */
+union bdk_dtx_xcpx_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_xcpx_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_xcpx_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_xcpx_bcst_rsp bdk_dtx_xcpx_bcst_rsp_t;
+
+static inline uint64_t BDK_DTX_XCPX_BCST_RSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_XCPX_BCST_RSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe960080ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_XCPX_BCST_RSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_XCPX_BCST_RSP(a) bdk_dtx_xcpx_bcst_rsp_t
+#define bustype_BDK_DTX_XCPX_BCST_RSP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_XCPX_BCST_RSP(a) "DTX_XCPX_BCST_RSP"
+#define busnum_BDK_DTX_XCPX_BCST_RSP(a) (a)
+#define arguments_BDK_DTX_XCPX_BCST_RSP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_xcp#_ctl
+ *
+ * DTX XCP Control Register
+ */
+union bdk_dtx_xcpx_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_xcpx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_xcpx_ctl_s cn; */
+};
+typedef union bdk_dtx_xcpx_ctl bdk_dtx_xcpx_ctl_t;
+
+static inline uint64_t BDK_DTX_XCPX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_XCPX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe960060ll + 0x8000ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_XCPX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_XCPX_CTL(a) bdk_dtx_xcpx_ctl_t
+#define bustype_BDK_DTX_XCPX_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_XCPX_CTL(a) "DTX_XCPX_CTL"
+#define busnum_BDK_DTX_XCPX_CTL(a) (a)
+#define arguments_BDK_DTX_XCPX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_xcp#_dat#
+ *
+ * DTX XCP Raw Data Register
+ */
+union bdk_dtx_xcpx_datx
+{
+ uint64_t u;
+ struct bdk_dtx_xcpx_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_xcpx_datx_s cn; */
+};
+typedef union bdk_dtx_xcpx_datx bdk_dtx_xcpx_datx_t;
+
+static inline uint64_t BDK_DTX_XCPX_DATX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_XCPX_DATX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe960040ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_XCPX_DATX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_XCPX_DATX(a,b) bdk_dtx_xcpx_datx_t
+#define bustype_BDK_DTX_XCPX_DATX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_XCPX_DATX(a,b) "DTX_XCPX_DATX"
+#define busnum_BDK_DTX_XCPX_DATX(a,b) (a)
+#define arguments_BDK_DTX_XCPX_DATX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_xcp#_ena#
+ *
+ * DTX XCP Data Enable Register
+ */
+union bdk_dtx_xcpx_enax
+{
+ uint64_t u;
+ struct bdk_dtx_xcpx_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_xcpx_enax_s cn; */
+};
+typedef union bdk_dtx_xcpx_enax bdk_dtx_xcpx_enax_t;
+
+static inline uint64_t BDK_DTX_XCPX_ENAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_XCPX_ENAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe960020ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_XCPX_ENAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_XCPX_ENAX(a,b) bdk_dtx_xcpx_enax_t
+#define bustype_BDK_DTX_XCPX_ENAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_XCPX_ENAX(a,b) "DTX_XCPX_ENAX"
+#define busnum_BDK_DTX_XCPX_ENAX(a,b) (a)
+#define arguments_BDK_DTX_XCPX_ENAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_xcp#_sel#
+ *
+ * DTX XCP Select Register
+ */
+union bdk_dtx_xcpx_selx
+{
+ uint64_t u;
+ struct bdk_dtx_xcpx_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_xcpx_selx_s cn; */
+};
+typedef union bdk_dtx_xcpx_selx bdk_dtx_xcpx_selx_t;
+
+static inline uint64_t BDK_DTX_XCPX_SELX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_XCPX_SELX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0fe960000ll + 0x8000ll * ((a) & 0x1) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("DTX_XCPX_SELX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_DTX_XCPX_SELX(a,b) bdk_dtx_xcpx_selx_t
+#define bustype_BDK_DTX_XCPX_SELX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_XCPX_SELX(a,b) "DTX_XCPX_SELX"
+#define busnum_BDK_DTX_XCPX_SELX(a,b) (a)
+#define arguments_BDK_DTX_XCPX_SELX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) dtx_xcv_bcst_rsp
+ *
+ * DTX XCV Control Register
+ */
+union bdk_dtx_xcv_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_xcv_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_xcv_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_xcv_bcst_rsp bdk_dtx_xcv_bcst_rsp_t;
+
+#define BDK_DTX_XCV_BCST_RSP BDK_DTX_XCV_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_XCV_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_XCV_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0fe6d8080ll;
+ __bdk_csr_fatal("DTX_XCV_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_XCV_BCST_RSP bdk_dtx_xcv_bcst_rsp_t
+#define bustype_BDK_DTX_XCV_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_XCV_BCST_RSP "DTX_XCV_BCST_RSP"
+#define busnum_BDK_DTX_XCV_BCST_RSP 0
+#define arguments_BDK_DTX_XCV_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_xcv_ctl
+ *
+ * DTX XCV Control Register
+ */
+union bdk_dtx_xcv_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_xcv_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_xcv_ctl_s cn; */
+};
+typedef union bdk_dtx_xcv_ctl bdk_dtx_xcv_ctl_t;
+
+#define BDK_DTX_XCV_CTL BDK_DTX_XCV_CTL_FUNC()
+static inline uint64_t BDK_DTX_XCV_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_XCV_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0fe6d8060ll;
+ __bdk_csr_fatal("DTX_XCV_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_XCV_CTL bdk_dtx_xcv_ctl_t
+#define bustype_BDK_DTX_XCV_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_XCV_CTL "DTX_XCV_CTL"
+#define busnum_BDK_DTX_XCV_CTL 0
+#define arguments_BDK_DTX_XCV_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_xcv_dat#
+ *
+ * DTX XCV Raw Data Register
+ */
+union bdk_dtx_xcv_datx
+{
+ uint64_t u;
+ struct bdk_dtx_xcv_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_xcv_datx_s cn; */
+};
+typedef union bdk_dtx_xcv_datx bdk_dtx_xcv_datx_t;
+
+static inline uint64_t BDK_DTX_XCV_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_XCV_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe6d8040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_XCV_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_XCV_DATX(a) bdk_dtx_xcv_datx_t
+#define bustype_BDK_DTX_XCV_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_XCV_DATX(a) "DTX_XCV_DATX"
+#define busnum_BDK_DTX_XCV_DATX(a) (a)
+#define arguments_BDK_DTX_XCV_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_xcv_ena#
+ *
+ * DTX XCV Data Enable Register
+ */
+union bdk_dtx_xcv_enax
+{
+ uint64_t u;
+ struct bdk_dtx_xcv_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_xcv_enax_s cn; */
+};
+typedef union bdk_dtx_xcv_enax bdk_dtx_xcv_enax_t;
+
+static inline uint64_t BDK_DTX_XCV_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_XCV_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe6d8020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_XCV_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_XCV_ENAX(a) bdk_dtx_xcv_enax_t
+#define bustype_BDK_DTX_XCV_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_XCV_ENAX(a) "DTX_XCV_ENAX"
+#define busnum_BDK_DTX_XCV_ENAX(a) (a)
+#define arguments_BDK_DTX_XCV_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_xcv_sel#
+ *
+ * DTX XCV Select Register
+ */
+union bdk_dtx_xcv_selx
+{
+ uint64_t u;
+ struct bdk_dtx_xcv_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_xcv_selx_s cn; */
+};
+typedef union bdk_dtx_xcv_selx bdk_dtx_xcv_selx_t;
+
+static inline uint64_t BDK_DTX_XCV_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_XCV_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x87e0fe6d8000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_XCV_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_XCV_SELX(a) bdk_dtx_xcv_selx_t
+#define bustype_BDK_DTX_XCV_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_XCV_SELX(a) "DTX_XCV_SELX"
+#define busnum_BDK_DTX_XCV_SELX(a) (a)
+#define arguments_BDK_DTX_XCV_SELX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_zip_bcst_rsp
+ *
+ * DTX ZIP Control Register
+ */
+union bdk_dtx_zip_bcst_rsp
+{
+ uint64_t u;
+ struct bdk_dtx_zip_bcst_rsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 1; /**< [ 0: 0](R/W) Enable this DTX instance as the responder to DTX broadcast read/write operations. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_zip_bcst_rsp_s cn; */
+};
+typedef union bdk_dtx_zip_bcst_rsp bdk_dtx_zip_bcst_rsp_t;
+
+#define BDK_DTX_ZIP_BCST_RSP BDK_DTX_ZIP_BCST_RSP_FUNC()
+static inline uint64_t BDK_DTX_ZIP_BCST_RSP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_ZIP_BCST_RSP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe9c0080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fe9c0080ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe9c0080ll;
+ __bdk_csr_fatal("DTX_ZIP_BCST_RSP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_ZIP_BCST_RSP bdk_dtx_zip_bcst_rsp_t
+#define bustype_BDK_DTX_ZIP_BCST_RSP BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_ZIP_BCST_RSP "DTX_ZIP_BCST_RSP"
+#define busnum_BDK_DTX_ZIP_BCST_RSP 0
+#define arguments_BDK_DTX_ZIP_BCST_RSP -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_zip_ctl
+ *
+ * DTX ZIP Control Register
+ */
+union bdk_dtx_zip_ctl
+{
+ uint64_t u;
+ struct bdk_dtx_zip_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+#else /* Word 0 - Little Endian */
+ uint64_t swap : 1; /**< [ 0: 0](R/W) Swap the high and low 36-bit debug bus outputs. */
+ uint64_t echoen : 1; /**< [ 1: 1](R/W) Drive debug bus with the value in DTX_MIO_ENA(0..1) instead of normal block
+ debug data. Not applicable when software directly reads the DAT(0..1) registers.
+ For diagnostic use only. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t active : 1; /**< [ 4: 4](R/W) Force block's gated clocks on, so that the state of idle signals may be captured. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_zip_ctl_s cn; */
+};
+typedef union bdk_dtx_zip_ctl bdk_dtx_zip_ctl_t;
+
+#define BDK_DTX_ZIP_CTL BDK_DTX_ZIP_CTL_FUNC()
+static inline uint64_t BDK_DTX_ZIP_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_ZIP_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0fe9c0060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x87e0fe9c0060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0fe9c0060ll;
+ __bdk_csr_fatal("DTX_ZIP_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_ZIP_CTL bdk_dtx_zip_ctl_t
+#define bustype_BDK_DTX_ZIP_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_ZIP_CTL "DTX_ZIP_CTL"
+#define busnum_BDK_DTX_ZIP_CTL 0
+#define arguments_BDK_DTX_ZIP_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) dtx_zip_dat#
+ *
+ * DTX ZIP Raw Data Register
+ */
+union bdk_dtx_zip_datx
+{
+ uint64_t u;
+ struct bdk_dtx_zip_datx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+#else /* Word 0 - Little Endian */
+ uint64_t raw : 36; /**< [ 35: 0](RO/H) Raw debug data captured by the DTX before the ENA is applied. This gives the
+ ability to peek into blocks during an OCLA capture without OCLA reconfiguration. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_zip_datx_s cn; */
+};
+typedef union bdk_dtx_zip_datx bdk_dtx_zip_datx_t;
+
+static inline uint64_t BDK_DTX_ZIP_DATX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_ZIP_DATX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe9c0040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe9c0040ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe9c0040ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_ZIP_DATX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_ZIP_DATX(a) bdk_dtx_zip_datx_t
+#define bustype_BDK_DTX_ZIP_DATX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_ZIP_DATX(a) "DTX_ZIP_DATX"
+#define busnum_BDK_DTX_ZIP_DATX(a) (a)
+#define arguments_BDK_DTX_ZIP_DATX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_zip_ena#
+ *
+ * DTX ZIP Data Enable Register
+ */
+union bdk_dtx_zip_enax
+{
+ uint64_t u;
+ struct bdk_dtx_zip_enax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+#else /* Word 0 - Little Endian */
+ uint64_t ena : 36; /**< [ 35: 0](R/W) Output enable vector of which bits to drive onto the low/high 36-bit debug
+ buses. Normally only one block will drive each bit. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_zip_enax_s cn; */
+};
+typedef union bdk_dtx_zip_enax bdk_dtx_zip_enax_t;
+
+static inline uint64_t BDK_DTX_ZIP_ENAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_ZIP_ENAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe9c0020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe9c0020ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe9c0020ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_ZIP_ENAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_ZIP_ENAX(a) bdk_dtx_zip_enax_t
+#define bustype_BDK_DTX_ZIP_ENAX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_ZIP_ENAX(a) "DTX_ZIP_ENAX"
+#define busnum_BDK_DTX_ZIP_ENAX(a) (a)
+#define arguments_BDK_DTX_ZIP_ENAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) dtx_zip_sel#
+ *
+ * DTX ZIP Select Register
+ */
+union bdk_dtx_zip_selx
+{
+ uint64_t u;
+ struct bdk_dtx_zip_selx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+#else /* Word 0 - Little Endian */
+ uint64_t value : 24; /**< [ 23: 0](R/W) Debug select. Selects which signals to drive onto low/high 36-bit debug buses. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_dtx_zip_selx_s cn; */
+};
+typedef union bdk_dtx_zip_selx bdk_dtx_zip_selx_t;
+
+static inline uint64_t BDK_DTX_ZIP_SELX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_DTX_ZIP_SELX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0fe9c0000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0fe9c0000ll + 8ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0fe9c0000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("DTX_ZIP_SELX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_DTX_ZIP_SELX(a) bdk_dtx_zip_selx_t
+#define bustype_BDK_DTX_ZIP_SELX(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_DTX_ZIP_SELX(a) "DTX_ZIP_SELX"
+#define busnum_BDK_DTX_ZIP_SELX(a) (a)
+#define arguments_BDK_DTX_ZIP_SELX(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_DTX_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ecam.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ecam.h
new file mode 100644
index 0000000000..d8a425160a
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-ecam.h
@@ -0,0 +1,1245 @@
+#ifndef __BDK_CSRS_ECAM_H__
+#define __BDK_CSRS_ECAM_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium ECAM.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration ecam_bar_e
+ *
+ * ECAM Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_ECAM_BAR_E_ECAMX_PF_BAR0(a) (0x87e048000000ll + 0x1000000ll * (a))
+#define BDK_ECAM_BAR_E_ECAMX_PF_BAR0_SIZE 0x100000ull
+#define BDK_ECAM_BAR_E_ECAMX_PF_BAR2(a) (0x848000000000ll + 0x1000000000ll * (a))
+#define BDK_ECAM_BAR_E_ECAMX_PF_BAR2_SIZE 0x1000000000ull
+
+/**
+ * Structure ecam_cfg_addr_s
+ *
+ * ECAM Configuration Address Structure
+ * ECAM load and store operations form an address with this structure: 8-bit, 16-bit, 32-bit and
+ * 64-bit read and write operations are supported to this region.
+ */
+union bdk_ecam_cfg_addr_s
+{
+ uint64_t u;
+ struct bdk_ecam_cfg_addr_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t io : 5; /**< [ 51: 47] Indicates I/O space. */
+ uint64_t reserved_46 : 1;
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t did : 8; /**< [ 43: 36] ECAM(0..3) DID. 0x48 + ECAM number. */
+ uint64_t setup : 1; /**< [ 35: 35] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Setup. Allow certain PCC
+ configuration registers to be written for boot-time initialization. Treated as 0
+ unless in secure mode. */
+ uint64_t bcst : 1; /**< [ 34: 34] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Broadcast. Write to all PCC
+ blocks for fast configuration. Treated as 0 unless in secure mode and SETUP is
+ set. */
+ uint64_t dmn : 6; /**< [ 33: 28] Domain number.
+ Internal:
+ \<33:32\> is SMMU number, \<31:28\> is the bus-numbering space within the SMMU (0x0 or PEM
+ ID). */
+ uint64_t bus : 8; /**< [ 27: 20] Bus number. */
+ uint64_t func : 8; /**< [ 19: 12] Function number. Note this assumes an ARI device; for external PCI devices that do not
+ support ARI this contains both the device and function number. */
+ uint64_t addr : 12; /**< [ 11: 0] Register address within the device. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 12; /**< [ 11: 0] Register address within the device. */
+ uint64_t func : 8; /**< [ 19: 12] Function number. Note this assumes an ARI device; for external PCI devices that do not
+ support ARI this contains both the device and function number. */
+ uint64_t bus : 8; /**< [ 27: 20] Bus number. */
+ uint64_t dmn : 6; /**< [ 33: 28] Domain number.
+ Internal:
+ \<33:32\> is SMMU number, \<31:28\> is the bus-numbering space within the SMMU (0x0 or PEM
+ ID). */
+ uint64_t bcst : 1; /**< [ 34: 34] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Broadcast. Write to all PCC
+ blocks for fast configuration. Treated as 0 unless in secure mode and SETUP is
+ set. */
+ uint64_t setup : 1; /**< [ 35: 35] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Setup. Allow certain PCC
+ configuration registers to be written for boot-time initialization. Treated as 0
+ unless in secure mode. */
+ uint64_t did : 8; /**< [ 43: 36] ECAM(0..3) DID. 0x48 + ECAM number. */
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t reserved_46 : 1;
+ uint64_t io : 5; /**< [ 51: 47] Indicates I/O space. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ecam_cfg_addr_s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t io : 5; /**< [ 51: 47] Indicates I/O space. */
+ uint64_t reserved_46 : 1;
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t did : 8; /**< [ 43: 36] ECAM(0) DID. 0x48. */
+ uint64_t setup : 1; /**< [ 35: 35] Reserved, MBZ.
+ Internal:
+ This was intended to allow certain PCC configuration registers to be written for
+ boot-time initialization. Treated as 0 unless in secure mode.
+
+ PEM also uses this flag to write certain CS2 registers, e.g. PCIEEP_BAR0_MASKL,
+ but software should be using PEM()_CFG_WR instead of the ECAM for that. */
+ uint64_t bcst : 1; /**< [ 34: 34] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Broadcast. Write to all PCC
+ blocks for fast configuration. Treated as 0 unless in secure mode and SETUP is
+ set. */
+ uint64_t dmn : 6; /**< [ 33: 28] Domain number.
+ Internal:
+ \<33:32\> is SMMU number, \<31:28\> is the bus-numbering space within the SMMU (0x0 or PEM
+ ID). */
+ uint64_t bus : 8; /**< [ 27: 20] Bus number. */
+ uint64_t func : 8; /**< [ 19: 12] Function number. Note this assumes an ARI device; for external PCI devices that do not
+ support ARI this contains both the device and function number. */
+ uint64_t addr : 12; /**< [ 11: 0] Register address within the device. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 12; /**< [ 11: 0] Register address within the device. */
+ uint64_t func : 8; /**< [ 19: 12] Function number. Note this assumes an ARI device; for external PCI devices that do not
+ support ARI this contains both the device and function number. */
+ uint64_t bus : 8; /**< [ 27: 20] Bus number. */
+ uint64_t dmn : 6; /**< [ 33: 28] Domain number.
+ Internal:
+ \<33:32\> is SMMU number, \<31:28\> is the bus-numbering space within the SMMU (0x0 or PEM
+ ID). */
+ uint64_t bcst : 1; /**< [ 34: 34] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Broadcast. Write to all PCC
+ blocks for fast configuration. Treated as 0 unless in secure mode and SETUP is
+ set. */
+ uint64_t setup : 1; /**< [ 35: 35] Reserved, MBZ.
+ Internal:
+ This was intended to allow certain PCC configuration registers to be written for
+ boot-time initialization. Treated as 0 unless in secure mode.
+
+ PEM also uses this flag to write certain CS2 registers, e.g. PCIEEP_BAR0_MASKL,
+ but software should be using PEM()_CFG_WR instead of the ECAM for that. */
+ uint64_t did : 8; /**< [ 43: 36] ECAM(0) DID. 0x48. */
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t reserved_46 : 1;
+ uint64_t io : 5; /**< [ 51: 47] Indicates I/O space. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_ecam_cfg_addr_s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t io : 1; /**< [ 47: 47] Indicates I/O space. */
+ uint64_t reserved_46 : 1;
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t did : 8; /**< [ 43: 36] ECAM(0) DID. 0x48 + ECAM number. */
+ uint64_t setup : 1; /**< [ 35: 35] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Setup. Allow certain PCC
+ configuration registers to be written for boot-time initialization. Treated as 0
+ unless in secure mode. */
+ uint64_t bcst : 1; /**< [ 34: 34] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Broadcast. Write to all PCC
+ blocks for fast configuration. Treated as 0 unless in secure mode and SETUP is
+ set. */
+ uint64_t reserved_28_33 : 6;
+ uint64_t bus : 8; /**< [ 27: 20] Bus number. */
+ uint64_t func : 8; /**< [ 19: 12] Function number. Note this assumes an ARI device; for external PCI devices that do not
+ support ARI this contains both the device and function number. */
+ uint64_t addr : 12; /**< [ 11: 0] Register address within the device. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 12; /**< [ 11: 0] Register address within the device. */
+ uint64_t func : 8; /**< [ 19: 12] Function number. Note this assumes an ARI device; for external PCI devices that do not
+ support ARI this contains both the device and function number. */
+ uint64_t bus : 8; /**< [ 27: 20] Bus number. */
+ uint64_t reserved_28_33 : 6;
+ uint64_t bcst : 1; /**< [ 34: 34] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Broadcast. Write to all PCC
+ blocks for fast configuration. Treated as 0 unless in secure mode and SETUP is
+ set. */
+ uint64_t setup : 1; /**< [ 35: 35] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Setup. Allow certain PCC
+ configuration registers to be written for boot-time initialization. Treated as 0
+ unless in secure mode. */
+ uint64_t did : 8; /**< [ 43: 36] ECAM(0) DID. 0x48 + ECAM number. */
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t reserved_46 : 1;
+ uint64_t io : 1; /**< [ 47: 47] Indicates I/O space. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_ecam_cfg_addr_s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t io : 1; /**< [ 47: 47] Indicates I/O space. */
+ uint64_t reserved_46 : 1;
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t did : 8; /**< [ 43: 36] ECAM(0..3) DID. 0x48 + ECAM number. */
+ uint64_t setup : 1; /**< [ 35: 35] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Setup. Allow certain PCC
+ configuration registers to be written for boot-time initialization. Treated as 0
+ unless in secure mode. */
+ uint64_t bcst : 1; /**< [ 34: 34] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Broadcast. Write to all PCC
+ blocks for fast configuration. Treated as 0 unless in secure mode and SETUP is
+ set. */
+ uint64_t reserved_28_33 : 6;
+ uint64_t bus : 8; /**< [ 27: 20] Bus number. */
+ uint64_t func : 8; /**< [ 19: 12] Function number. Note this assumes an ARI device; for external PCI devices that do not
+ support ARI this contains both the device and function number. */
+ uint64_t addr : 12; /**< [ 11: 0] Register address within the device. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 12; /**< [ 11: 0] Register address within the device. */
+ uint64_t func : 8; /**< [ 19: 12] Function number. Note this assumes an ARI device; for external PCI devices that do not
+ support ARI this contains both the device and function number. */
+ uint64_t bus : 8; /**< [ 27: 20] Bus number. */
+ uint64_t reserved_28_33 : 6;
+ uint64_t bcst : 1; /**< [ 34: 34] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Broadcast. Write to all PCC
+ blocks for fast configuration. Treated as 0 unless in secure mode and SETUP is
+ set. */
+ uint64_t setup : 1; /**< [ 35: 35] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Setup. Allow certain PCC
+ configuration registers to be written for boot-time initialization. Treated as 0
+ unless in secure mode. */
+ uint64_t did : 8; /**< [ 43: 36] ECAM(0..3) DID. 0x48 + ECAM number. */
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t reserved_46 : 1;
+ uint64_t io : 1; /**< [ 47: 47] Indicates I/O space. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_ecam_cfg_addr_s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t io : 1; /**< [ 47: 47] Indicates I/O space. */
+ uint64_t reserved_46 : 1;
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t did : 8; /**< [ 43: 36] ECAM(0..1) DID. 0x48 + ECAM number. */
+ uint64_t setup : 1; /**< [ 35: 35] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Setup. Allow certain PCC
+ configuration registers to be written for boot-time initialization. Treated as 0
+ unless in secure mode. */
+ uint64_t bcst : 1; /**< [ 34: 34] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Broadcast. Write to all PCC
+ blocks for fast configuration. Treated as 0 unless in secure mode and SETUP is
+ set. */
+ uint64_t reserved_28_33 : 6;
+ uint64_t bus : 8; /**< [ 27: 20] Bus number. */
+ uint64_t func : 8; /**< [ 19: 12] Function number. Note this assumes an ARI device; for external PCI devices that do not
+ support ARI this contains both the device and function number. */
+ uint64_t addr : 12; /**< [ 11: 0] Register address within the device. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 12; /**< [ 11: 0] Register address within the device. */
+ uint64_t func : 8; /**< [ 19: 12] Function number. Note this assumes an ARI device; for external PCI devices that do not
+ support ARI this contains both the device and function number. */
+ uint64_t bus : 8; /**< [ 27: 20] Bus number. */
+ uint64_t reserved_28_33 : 6;
+ uint64_t bcst : 1; /**< [ 34: 34] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Broadcast. Write to all PCC
+ blocks for fast configuration. Treated as 0 unless in secure mode and SETUP is
+ set. */
+ uint64_t setup : 1; /**< [ 35: 35] Reserved, MBZ.
+ Internal:
+ Reserved for future use - Setup. Allow certain PCC
+ configuration registers to be written for boot-time initialization. Treated as 0
+ unless in secure mode. */
+ uint64_t did : 8; /**< [ 43: 36] ECAM(0..1) DID. 0x48 + ECAM number. */
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t reserved_46 : 1;
+ uint64_t io : 1; /**< [ 47: 47] Indicates I/O space. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+
+/**
+ * Register (RSL) ecam#_bus#_nsdis
+ *
+ * ECAM Bus Nonsecure Disable Registers
+ */
+union bdk_ecamx_busx_nsdis
+{
+ uint64_t u;
+ struct bdk_ecamx_busx_nsdis_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dis : 1; /**< [ 0: 0](R/W) Disable ECAM bus in nonsecure mode. If set, the indexed ECAM bus number is RAO/WI
+ when accessed via the ECAM space with nonsecure transactions. Note this affects only ECAM
+ configuration access, not normal I/O mapped memory accesses to the device. ECAM 0, bus 0
+ (corresponding to RSL devices) is not generally disabled, instead may be used to disable
+ RSL discovery. */
+#else /* Word 0 - Little Endian */
+ uint64_t dis : 1; /**< [ 0: 0](R/W) Disable ECAM bus in nonsecure mode. If set, the indexed ECAM bus number is RAO/WI
+ when accessed via the ECAM space with nonsecure transactions. Note this affects only ECAM
+ configuration access, not normal I/O mapped memory accesses to the device. ECAM 0, bus 0
+ (corresponding to RSL devices) is not generally disabled, instead may be used to disable
+ RSL discovery. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_busx_nsdis_s cn; */
+};
+typedef union bdk_ecamx_busx_nsdis bdk_ecamx_busx_nsdis_t;
+
+static inline uint64_t BDK_ECAMX_BUSX_NSDIS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_BUSX_NSDIS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=255)))
+ return 0x87e048030000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=255)))
+ return 0x87e048030000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=255)))
+ return 0x87e048030000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xff);
+ __bdk_csr_fatal("ECAMX_BUSX_NSDIS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_BUSX_NSDIS(a,b) bdk_ecamx_busx_nsdis_t
+#define bustype_BDK_ECAMX_BUSX_NSDIS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_BUSX_NSDIS(a,b) "ECAMX_BUSX_NSDIS"
+#define device_bar_BDK_ECAMX_BUSX_NSDIS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_BUSX_NSDIS(a,b) (a)
+#define arguments_BDK_ECAMX_BUSX_NSDIS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ecam#_bus#_sdis
+ *
+ * ECAM Bus Secure Disable Registers
+ */
+union bdk_ecamx_busx_sdis
+{
+ uint64_t u;
+ struct bdk_ecamx_busx_sdis_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) Secure ECAM bus. If set, the indexed ECAM bus number is secured and RAO/WI when
+ accessed via the ECAM space with nonsecure transactions. This bit overrides
+ ECAM()_BUS()_NSDIS[DIS]. */
+ uint64_t dis : 1; /**< [ 0: 0](SR/W) Disable ECAM bus in secure mode. If set, the indexed ECAM bus number is RAO/WI when
+ accessed via the ECAM space with secure transactions. This bit is similar to the non-
+ secure ECAM()_BUS()_NSDIS[DIS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dis : 1; /**< [ 0: 0](SR/W) Disable ECAM bus in secure mode. If set, the indexed ECAM bus number is RAO/WI when
+ accessed via the ECAM space with secure transactions. This bit is similar to the non-
+ secure ECAM()_BUS()_NSDIS[DIS]. */
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) Secure ECAM bus. If set, the indexed ECAM bus number is secured and RAO/WI when
+ accessed via the ECAM space with nonsecure transactions. This bit overrides
+ ECAM()_BUS()_NSDIS[DIS]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_busx_sdis_s cn; */
+};
+typedef union bdk_ecamx_busx_sdis bdk_ecamx_busx_sdis_t;
+
+static inline uint64_t BDK_ECAMX_BUSX_SDIS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_BUSX_SDIS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=255)))
+ return 0x87e048020000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=255)))
+ return 0x87e048020000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=255)))
+ return 0x87e048020000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xff);
+ __bdk_csr_fatal("ECAMX_BUSX_SDIS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_BUSX_SDIS(a,b) bdk_ecamx_busx_sdis_t
+#define bustype_BDK_ECAMX_BUSX_SDIS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_BUSX_SDIS(a,b) "ECAMX_BUSX_SDIS"
+#define device_bar_BDK_ECAMX_BUSX_SDIS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_BUSX_SDIS(a,b) (a)
+#define arguments_BDK_ECAMX_BUSX_SDIS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ecam#_bus#_skill
+ *
+ * ECAM Bus Secure Disable Registers
+ */
+union bdk_ecamx_busx_skill
+{
+ uint64_t u;
+ struct bdk_ecamx_busx_skill_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t skill : 1; /**< [ 0: 0](SR/W1S) ECAM bus kill.
+ Write one to set. Once set, cannot be cleared until soft reset. If set,
+ the indexed ECAM bus/function/device number is RAO/WI when accessed via
+ the ECAM space with any (secure/nonsecure) transactions. */
+#else /* Word 0 - Little Endian */
+ uint64_t skill : 1; /**< [ 0: 0](SR/W1S) ECAM bus kill.
+ Write one to set. Once set, cannot be cleared until soft reset. If set,
+ the indexed ECAM bus/function/device number is RAO/WI when accessed via
+ the ECAM space with any (secure/nonsecure) transactions. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_busx_skill_s cn; */
+};
+typedef union bdk_ecamx_busx_skill bdk_ecamx_busx_skill_t;
+
+static inline uint64_t BDK_ECAMX_BUSX_SKILL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_BUSX_SKILL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=255)))
+ return 0x87e048080000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=255)))
+ return 0x87e048080000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && ((a<=3) && (b<=255)))
+ return 0x87e048080000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xff);
+ __bdk_csr_fatal("ECAMX_BUSX_SKILL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_BUSX_SKILL(a,b) bdk_ecamx_busx_skill_t
+#define bustype_BDK_ECAMX_BUSX_SKILL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_BUSX_SKILL(a,b) "ECAMX_BUSX_SKILL"
+#define device_bar_BDK_ECAMX_BUSX_SKILL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_BUSX_SKILL(a,b) (a)
+#define arguments_BDK_ECAMX_BUSX_SKILL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ecam#_const
+ *
+ * ECAM Constants Register
+ * This register contains constants for software discovery.
+ */
+union bdk_ecamx_const
+{
+ uint64_t u;
+ struct bdk_ecamx_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t domains : 8; /**< [ 15: 8](RO) Number of domains. */
+ uint64_t ecams : 8; /**< [ 7: 0](RO) Number of ECAM units. */
+#else /* Word 0 - Little Endian */
+ uint64_t ecams : 8; /**< [ 7: 0](RO) Number of ECAM units. */
+ uint64_t domains : 8; /**< [ 15: 8](RO) Number of domains. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_ecamx_const_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t ecams : 8; /**< [ 7: 0](RO) Number of ECAM units. */
+#else /* Word 0 - Little Endian */
+ uint64_t ecams : 8; /**< [ 7: 0](RO) Number of ECAM units. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_ecamx_const_s cn9; */
+};
+typedef union bdk_ecamx_const bdk_ecamx_const_t;
+
+static inline uint64_t BDK_ECAMX_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e048000200ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e048000200ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a==0))
+ return 0x87e048000200ll + 0x1000000ll * ((a) & 0x0);
+ __bdk_csr_fatal("ECAMX_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_CONST(a) bdk_ecamx_const_t
+#define bustype_BDK_ECAMX_CONST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_CONST(a) "ECAMX_CONST"
+#define device_bar_BDK_ECAMX_CONST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_CONST(a) (a)
+#define arguments_BDK_ECAMX_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ecam#_dev#_nsdis
+ *
+ * ECAM Device Nonsecure Disable Registers
+ */
+union bdk_ecamx_devx_nsdis
+{
+ uint64_t u;
+ struct bdk_ecamx_devx_nsdis_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dis : 1; /**< [ 0: 0](R/W) Disable ECAM device in nonsecure mode. If set, the specified device
+ number on bus 0 are RAO/WI when accessed via the ECAM space with
+ nonsecure transactions. Note this affects only ECAM configuration
+ access, not normal I/O mapped memory accesses to the device. */
+#else /* Word 0 - Little Endian */
+ uint64_t dis : 1; /**< [ 0: 0](R/W) Disable ECAM device in nonsecure mode. If set, the specified device
+ number on bus 0 are RAO/WI when accessed via the ECAM space with
+ nonsecure transactions. Note this affects only ECAM configuration
+ access, not normal I/O mapped memory accesses to the device. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_devx_nsdis_s cn; */
+};
+typedef union bdk_ecamx_devx_nsdis bdk_ecamx_devx_nsdis_t;
+
+static inline uint64_t BDK_ECAMX_DEVX_NSDIS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_DEVX_NSDIS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=31)))
+ return 0x87e048070000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=31)))
+ return 0x87e048070000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=31)))
+ return 0x87e048070000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1f);
+ __bdk_csr_fatal("ECAMX_DEVX_NSDIS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_DEVX_NSDIS(a,b) bdk_ecamx_devx_nsdis_t
+#define bustype_BDK_ECAMX_DEVX_NSDIS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_DEVX_NSDIS(a,b) "ECAMX_DEVX_NSDIS"
+#define device_bar_BDK_ECAMX_DEVX_NSDIS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_DEVX_NSDIS(a,b) (a)
+#define arguments_BDK_ECAMX_DEVX_NSDIS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ecam#_dev#_sdis
+ *
+ * ECAM Device Secure Disable Registers
+ */
+union bdk_ecamx_devx_sdis
+{
+ uint64_t u;
+ struct bdk_ecamx_devx_sdis_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) Secure ECAM device. If set, the indexed device number on bus 0 are
+ secured and RAO/WI when accessed via the ECAM space with nonsecure
+ transactions. This bit overrides ECAM()_DEV()_NSDIS[DIS]. */
+ uint64_t dis : 1; /**< [ 0: 0](SR/W) Disable ECAM device in secure mode. If set, ECAM secure
+ read/write operations to the indexed device number on bus 0
+ are RAO/WI when accessed via the ECAM space. This bit is
+ similar to the nonsecure ECAM()_DEV()_NSDIS[DIS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dis : 1; /**< [ 0: 0](SR/W) Disable ECAM device in secure mode. If set, ECAM secure
+ read/write operations to the indexed device number on bus 0
+ are RAO/WI when accessed via the ECAM space. This bit is
+ similar to the nonsecure ECAM()_DEV()_NSDIS[DIS]. */
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) Secure ECAM device. If set, the indexed device number on bus 0 are
+ secured and RAO/WI when accessed via the ECAM space with nonsecure
+ transactions. This bit overrides ECAM()_DEV()_NSDIS[DIS]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_devx_sdis_s cn; */
+};
+typedef union bdk_ecamx_devx_sdis bdk_ecamx_devx_sdis_t;
+
+static inline uint64_t BDK_ECAMX_DEVX_SDIS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_DEVX_SDIS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=31)))
+ return 0x87e048060000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=31)))
+ return 0x87e048060000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=31)))
+ return 0x87e048060000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1f);
+ __bdk_csr_fatal("ECAMX_DEVX_SDIS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_DEVX_SDIS(a,b) bdk_ecamx_devx_sdis_t
+#define bustype_BDK_ECAMX_DEVX_SDIS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_DEVX_SDIS(a,b) "ECAMX_DEVX_SDIS"
+#define device_bar_BDK_ECAMX_DEVX_SDIS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_DEVX_SDIS(a,b) (a)
+#define arguments_BDK_ECAMX_DEVX_SDIS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ecam#_dev#_skill
+ *
+ * ECAM Device Secure Disable Registers
+ */
+union bdk_ecamx_devx_skill
+{
+ uint64_t u;
+ struct bdk_ecamx_devx_skill_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t skill : 1; /**< [ 0: 0](SR/W1S) ECAM device kill.
+ Write one to set. Once set, cannot be cleared until soft reset. If set,
+ the indexed ECAM bus/function/device number is RAO/WI when accessed via
+ the ECAM space with any (secure/nonsecure) transactions. */
+#else /* Word 0 - Little Endian */
+ uint64_t skill : 1; /**< [ 0: 0](SR/W1S) ECAM device kill.
+ Write one to set. Once set, cannot be cleared until soft reset. If set,
+ the indexed ECAM bus/function/device number is RAO/WI when accessed via
+ the ECAM space with any (secure/nonsecure) transactions. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_devx_skill_s cn; */
+};
+typedef union bdk_ecamx_devx_skill bdk_ecamx_devx_skill_t;
+
+static inline uint64_t BDK_ECAMX_DEVX_SKILL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_DEVX_SKILL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=31)))
+ return 0x87e0480a0000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=31)))
+ return 0x87e0480a0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && ((a<=3) && (b<=31)))
+ return 0x87e0480a0000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0x1f);
+ __bdk_csr_fatal("ECAMX_DEVX_SKILL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_DEVX_SKILL(a,b) bdk_ecamx_devx_skill_t
+#define bustype_BDK_ECAMX_DEVX_SKILL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_DEVX_SKILL(a,b) "ECAMX_DEVX_SKILL"
+#define device_bar_BDK_ECAMX_DEVX_SKILL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_DEVX_SKILL(a,b) (a)
+#define arguments_BDK_ECAMX_DEVX_SKILL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ecam#_dom#_bus#_permit
+ *
+ * ECAM Domain Bus Permit Registers
+ * This register sets the permissions for an ECAM access to a device
+ * using a given ECAM bus number.
+ *
+ * Index {b} corresponds to the ECAM address's domain (address's ECAM_CFG_ADDR_S[DMN]).
+ * ECAM()_DOM()_CONST[PERMIT] is used to discover for which domains this register is
+ * implemented; nonimplemented indices are RAZ.
+ *
+ * Index {c} corresponds to the ECAM address's bus number (address's ECAM_CFG_ADDR_S[BUS]).
+ */
+union bdk_ecamx_domx_busx_permit
+{
+ uint64_t u;
+ struct bdk_ecamx_domx_busx_permit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t kill : 1; /**< [ 7: 7](SR/W1S) Kill the device. Once written with one, stays
+ set until warm chip reset. If set, no access
+ allowed by any initiator. */
+ uint64_t reserved_4_6 : 3;
+ uint64_t xcp1_dis : 1; /**< [ 3: 3](SR/W) XCP1 disable. As with [SEC_DIS], but for accesses initiated by XCP1 (MCP). */
+ uint64_t xcp0_dis : 1; /**< [ 2: 2](SR/W) XCP0 disable. As with [SEC_DIS], but for accesses initiated by XCP0 (SCP). */
+ uint64_t nsec_dis : 1; /**< [ 1: 1](SR/W) Nonsecure disable. As with [SEC_DIS], but for accesses initiated by non-secure devices
+ excluding XCP0/XCP1. */
+ uint64_t sec_dis : 1; /**< [ 0: 0](SR/W) Secure disable. */
+#else /* Word 0 - Little Endian */
+ uint64_t sec_dis : 1; /**< [ 0: 0](SR/W) Secure disable. */
+ uint64_t nsec_dis : 1; /**< [ 1: 1](SR/W) Nonsecure disable. As with [SEC_DIS], but for accesses initiated by non-secure devices
+ excluding XCP0/XCP1. */
+ uint64_t xcp0_dis : 1; /**< [ 2: 2](SR/W) XCP0 disable. As with [SEC_DIS], but for accesses initiated by XCP0 (SCP). */
+ uint64_t xcp1_dis : 1; /**< [ 3: 3](SR/W) XCP1 disable. As with [SEC_DIS], but for accesses initiated by XCP1 (MCP). */
+ uint64_t reserved_4_6 : 3;
+ uint64_t kill : 1; /**< [ 7: 7](SR/W1S) Kill the device. Once written with one, stays
+ set until warm chip reset. If set, no access
+ allowed by any initiator. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_domx_busx_permit_s cn; */
+};
+typedef union bdk_ecamx_domx_busx_permit bdk_ecamx_domx_busx_permit_t;
+
+static inline uint64_t BDK_ECAMX_DOMX_BUSX_PERMIT(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_DOMX_BUSX_PERMIT(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a==0) && ((b==0) || (b==1) || (b==2)) && (c<=255)))
+ return 0x87e048020000ll + 0x1000000ll * ((a) & 0x0) + 0x800ll * ((b) & 0x3) + 8ll * ((c) & 0xff);
+ __bdk_csr_fatal("ECAMX_DOMX_BUSX_PERMIT", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_ECAMX_DOMX_BUSX_PERMIT(a,b,c) bdk_ecamx_domx_busx_permit_t
+#define bustype_BDK_ECAMX_DOMX_BUSX_PERMIT(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_DOMX_BUSX_PERMIT(a,b,c) "ECAMX_DOMX_BUSX_PERMIT"
+#define device_bar_BDK_ECAMX_DOMX_BUSX_PERMIT(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_DOMX_BUSX_PERMIT(a,b,c) (a)
+#define arguments_BDK_ECAMX_DOMX_BUSX_PERMIT(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) ecam#_dom#_const
+ *
+ * ECAM Constants Register
+ * This register contains constants for software discovery.
+ *
+ * Index {b} indicates the domain for which the attributes are to be returned.
+ */
+union bdk_ecamx_domx_const
+{
+ uint64_t u;
+ struct bdk_ecamx_domx_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t pres : 1; /**< [ 5: 5](RO) If implemented. */
+ uint64_t permit : 1; /**< [ 4: 4](RO) If set, for this domain number, the ECAM()_DOM()_BUS()_PERMIT,
+ ECAM()_DOM()_RSL()_PERMIT, and ECAM()_DOM()_DEV()_PERMIT registers are implemented. */
+ uint64_t smmu : 4; /**< [ 3: 0](RO) Attached SMMU number. */
+#else /* Word 0 - Little Endian */
+ uint64_t smmu : 4; /**< [ 3: 0](RO) Attached SMMU number. */
+ uint64_t permit : 1; /**< [ 4: 4](RO) If set, for this domain number, the ECAM()_DOM()_BUS()_PERMIT,
+ ECAM()_DOM()_RSL()_PERMIT, and ECAM()_DOM()_DEV()_PERMIT registers are implemented. */
+ uint64_t pres : 1; /**< [ 5: 5](RO) If implemented. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_domx_const_s cn; */
+};
+typedef union bdk_ecamx_domx_const bdk_ecamx_domx_const_t;
+
+static inline uint64_t BDK_ECAMX_DOMX_CONST(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_DOMX_CONST(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a==0) && (b<=63)))
+ return 0x87e048000400ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x3f);
+ __bdk_csr_fatal("ECAMX_DOMX_CONST", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_DOMX_CONST(a,b) bdk_ecamx_domx_const_t
+#define bustype_BDK_ECAMX_DOMX_CONST(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_DOMX_CONST(a,b) "ECAMX_DOMX_CONST"
+#define device_bar_BDK_ECAMX_DOMX_CONST(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_DOMX_CONST(a,b) (a)
+#define arguments_BDK_ECAMX_DOMX_CONST(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ecam#_dom#_dev#_permit
+ *
+ * ECAM Domain Device Permit Registers
+ * This register sets the permissions for a ECAM access to a bus 0 device.
+ * This register is used when the bus number is 0; i.e. address's ECAM_CFG_ADDR_S[BUS] = 0x0.
+ *
+ * Index {b} corresponds to the ECAM address's domain (address's ECAM_CFG_ADDR_S[DOMAIN]).
+ * ECAM()_DOM()_CONST[PERMIT] is used to discover for which domains this register is
+ * implemented; nonimplemented indices are RAZ.
+ *
+ * Index {c} corresponds to the bus 0 non-ARI device number (address's
+ * ECAM_CFG_ADDR_S[FUNC]\<7:3\>).
+ */
+union bdk_ecamx_domx_devx_permit
+{
+ uint64_t u;
+ struct bdk_ecamx_domx_devx_permit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t kill : 1; /**< [ 7: 7](SR/W1S) Kill the device. Once written with one, stays
+ set until warm chip reset. If set, no access
+ allowed by any initiator. */
+ uint64_t reserved_4_6 : 3;
+ uint64_t xcp1_dis : 1; /**< [ 3: 3](SR/W) XCP1 disable. As with [SEC_DIS], but for accesses initiated by XCP1 (MCP). */
+ uint64_t xcp0_dis : 1; /**< [ 2: 2](SR/W) XCP0 disable. As with [SEC_DIS], but for accesses initiated by XCP0 (SCP). */
+ uint64_t nsec_dis : 1; /**< [ 1: 1](SR/W) Nonsecure disable. As with [SEC_DIS], but for accesses initiated by non-secure devices
+ excluding XCP0/XCP1. */
+ uint64_t sec_dis : 1; /**< [ 0: 0](SR/W) Secure disable. */
+#else /* Word 0 - Little Endian */
+ uint64_t sec_dis : 1; /**< [ 0: 0](SR/W) Secure disable. */
+ uint64_t nsec_dis : 1; /**< [ 1: 1](SR/W) Nonsecure disable. As with [SEC_DIS], but for accesses initiated by non-secure devices
+ excluding XCP0/XCP1. */
+ uint64_t xcp0_dis : 1; /**< [ 2: 2](SR/W) XCP0 disable. As with [SEC_DIS], but for accesses initiated by XCP0 (SCP). */
+ uint64_t xcp1_dis : 1; /**< [ 3: 3](SR/W) XCP1 disable. As with [SEC_DIS], but for accesses initiated by XCP1 (MCP). */
+ uint64_t reserved_4_6 : 3;
+ uint64_t kill : 1; /**< [ 7: 7](SR/W1S) Kill the device. Once written with one, stays
+ set until warm chip reset. If set, no access
+ allowed by any initiator. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_domx_devx_permit_s cn; */
+};
+typedef union bdk_ecamx_domx_devx_permit bdk_ecamx_domx_devx_permit_t;
+
+static inline uint64_t BDK_ECAMX_DOMX_DEVX_PERMIT(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_DOMX_DEVX_PERMIT(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a==0) && ((b==0) || (b==1) || (b==2)) && (c<=31)))
+ return 0x87e048040000ll + 0x1000000ll * ((a) & 0x0) + 0x800ll * ((b) & 0x3) + 8ll * ((c) & 0x1f);
+ __bdk_csr_fatal("ECAMX_DOMX_DEVX_PERMIT", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_ECAMX_DOMX_DEVX_PERMIT(a,b,c) bdk_ecamx_domx_devx_permit_t
+#define bustype_BDK_ECAMX_DOMX_DEVX_PERMIT(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_DOMX_DEVX_PERMIT(a,b,c) "ECAMX_DOMX_DEVX_PERMIT"
+#define device_bar_BDK_ECAMX_DOMX_DEVX_PERMIT(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_DOMX_DEVX_PERMIT(a,b,c) (a)
+#define arguments_BDK_ECAMX_DOMX_DEVX_PERMIT(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) ecam#_dom#_rsl#_permit
+ *
+ * ECAM Domain Device Permit Registers
+ * This register sets the permissions for an ECAM access to an RSL device.
+ * This register is used when the domain and bus point to RSL; i.e.
+ * address's ECAM_CFG_ADDR_S[DOMAIN]=PCC_DEV_CON_E::PCCBR_MRML\<21:16\>,
+ * ECAM_CFG_ADDR_S[BUS] = PCC_DEV_CON_E::PCCBR_MRML\<15:8\>.
+ *
+ * Index {b} corresponds to the ECAM address's domain (address's ECAM_CFG_ADDR_S[DOMAIN]).
+ * ECAM()_DOM()_CONST[PERMIT] is used to discover for which domains this register is
+ * implemented; nonimplemented indices are RAZ.
+ *
+ * Index {c} corresponds to the RSL device number (address's ECAM_CFG_ADDR_S[FUNC]).
+ */
+union bdk_ecamx_domx_rslx_permit
+{
+ uint64_t u;
+ struct bdk_ecamx_domx_rslx_permit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t kill : 1; /**< [ 7: 7](SR/W1S) Kill the device. Once written with one, stays
+ set until warm chip reset. If set, no access
+ allowed by any initiator. */
+ uint64_t reserved_4_6 : 3;
+ uint64_t xcp1_dis : 1; /**< [ 3: 3](SR/W) XCP1 disable. As with [SEC_DIS], but for accesses initiated by XCP1 (MCP). */
+ uint64_t xcp0_dis : 1; /**< [ 2: 2](SR/W) XCP0 disable. As with [SEC_DIS], but for accesses initiated by XCP0 (SCP). */
+ uint64_t nsec_dis : 1; /**< [ 1: 1](SR/W) Nonsecure disable. As with [SEC_DIS], but for accesses initiated by non-secure devices
+ excluding XCP0/XCP1. */
+ uint64_t sec_dis : 1; /**< [ 0: 0](SR/W) Secure disable. */
+#else /* Word 0 - Little Endian */
+ uint64_t sec_dis : 1; /**< [ 0: 0](SR/W) Secure disable. */
+ uint64_t nsec_dis : 1; /**< [ 1: 1](SR/W) Nonsecure disable. As with [SEC_DIS], but for accesses initiated by non-secure devices
+ excluding XCP0/XCP1. */
+ uint64_t xcp0_dis : 1; /**< [ 2: 2](SR/W) XCP0 disable. As with [SEC_DIS], but for accesses initiated by XCP0 (SCP). */
+ uint64_t xcp1_dis : 1; /**< [ 3: 3](SR/W) XCP1 disable. As with [SEC_DIS], but for accesses initiated by XCP1 (MCP). */
+ uint64_t reserved_4_6 : 3;
+ uint64_t kill : 1; /**< [ 7: 7](SR/W1S) Kill the device. Once written with one, stays
+ set until warm chip reset. If set, no access
+ allowed by any initiator. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_domx_rslx_permit_s cn; */
+};
+typedef union bdk_ecamx_domx_rslx_permit bdk_ecamx_domx_rslx_permit_t;
+
+static inline uint64_t BDK_ECAMX_DOMX_RSLX_PERMIT(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_DOMX_RSLX_PERMIT(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a==0) && ((b==0) || (b==1) || (b==2)) && (c<=255)))
+ return 0x87e048060000ll + 0x1000000ll * ((a) & 0x0) + 0x800ll * ((b) & 0x3) + 8ll * ((c) & 0xff);
+ __bdk_csr_fatal("ECAMX_DOMX_RSLX_PERMIT", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_ECAMX_DOMX_RSLX_PERMIT(a,b,c) bdk_ecamx_domx_rslx_permit_t
+#define bustype_BDK_ECAMX_DOMX_RSLX_PERMIT(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_DOMX_RSLX_PERMIT(a,b,c) "ECAMX_DOMX_RSLX_PERMIT"
+#define device_bar_BDK_ECAMX_DOMX_RSLX_PERMIT(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_DOMX_RSLX_PERMIT(a,b,c) (a)
+#define arguments_BDK_ECAMX_DOMX_RSLX_PERMIT(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) ecam#_nop_of
+ *
+ * ECAM No-Operation Ones Faulting Register
+ */
+union bdk_ecamx_nop_of
+{
+ uint64_t u;
+ struct bdk_ecamx_nop_of_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ones : 64; /**< [ 63: 0](RO) Used internally to handle disabled read/write transactions. */
+#else /* Word 0 - Little Endian */
+ uint64_t ones : 64; /**< [ 63: 0](RO) Used internally to handle disabled read/write transactions. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_nop_of_s cn; */
+};
+typedef union bdk_ecamx_nop_of bdk_ecamx_nop_of_t;
+
+static inline uint64_t BDK_ECAMX_NOP_OF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_NOP_OF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e048000000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e048000000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e048000000ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("ECAMX_NOP_OF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_NOP_OF(a) bdk_ecamx_nop_of_t
+#define bustype_BDK_ECAMX_NOP_OF(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_NOP_OF(a) "ECAMX_NOP_OF"
+#define device_bar_BDK_ECAMX_NOP_OF(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_NOP_OF(a) (a)
+#define arguments_BDK_ECAMX_NOP_OF(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ecam#_nop_onf
+ *
+ * ECAM No-Operation Ones Non-Faulting Register
+ */
+union bdk_ecamx_nop_onf
+{
+ uint64_t u;
+ struct bdk_ecamx_nop_onf_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ones : 64; /**< [ 63: 0](RO) Used internally to handle disabled read/write transactions. */
+#else /* Word 0 - Little Endian */
+ uint64_t ones : 64; /**< [ 63: 0](RO) Used internally to handle disabled read/write transactions. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_nop_onf_s cn; */
+};
+typedef union bdk_ecamx_nop_onf bdk_ecamx_nop_onf_t;
+
+static inline uint64_t BDK_ECAMX_NOP_ONF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_NOP_ONF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e048000080ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e048000080ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e048000080ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("ECAMX_NOP_ONF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_NOP_ONF(a) bdk_ecamx_nop_onf_t
+#define bustype_BDK_ECAMX_NOP_ONF(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_NOP_ONF(a) "ECAMX_NOP_ONF"
+#define device_bar_BDK_ECAMX_NOP_ONF(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_NOP_ONF(a) (a)
+#define arguments_BDK_ECAMX_NOP_ONF(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ecam#_nop_zf
+ *
+ * ECAM No-Operation Zero Faulting Register
+ */
+union bdk_ecamx_nop_zf
+{
+ uint64_t u;
+ struct bdk_ecamx_nop_zf_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t zeros : 64; /**< [ 63: 0](RO) Used internally to handle disabled read/write transactions. */
+#else /* Word 0 - Little Endian */
+ uint64_t zeros : 64; /**< [ 63: 0](RO) Used internally to handle disabled read/write transactions. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_nop_zf_s cn; */
+};
+typedef union bdk_ecamx_nop_zf bdk_ecamx_nop_zf_t;
+
+static inline uint64_t BDK_ECAMX_NOP_ZF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_NOP_ZF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e048000100ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e048000100ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e048000100ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("ECAMX_NOP_ZF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_NOP_ZF(a) bdk_ecamx_nop_zf_t
+#define bustype_BDK_ECAMX_NOP_ZF(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_NOP_ZF(a) "ECAMX_NOP_ZF"
+#define device_bar_BDK_ECAMX_NOP_ZF(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_NOP_ZF(a) (a)
+#define arguments_BDK_ECAMX_NOP_ZF(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ecam#_nop_znf
+ *
+ * ECAM No-Operation Zero Non-Faulting Register
+ */
+union bdk_ecamx_nop_znf
+{
+ uint64_t u;
+ struct bdk_ecamx_nop_znf_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t zeros : 64; /**< [ 63: 0](RO) Used internally to handle disabled read/write transactions. */
+#else /* Word 0 - Little Endian */
+ uint64_t zeros : 64; /**< [ 63: 0](RO) Used internally to handle disabled read/write transactions. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_nop_znf_s cn; */
+};
+typedef union bdk_ecamx_nop_znf bdk_ecamx_nop_znf_t;
+
+static inline uint64_t BDK_ECAMX_NOP_ZNF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_NOP_ZNF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e048000180ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e048000180ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=3))
+ return 0x87e048000180ll + 0x1000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("ECAMX_NOP_ZNF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_NOP_ZNF(a) bdk_ecamx_nop_znf_t
+#define bustype_BDK_ECAMX_NOP_ZNF(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_NOP_ZNF(a) "ECAMX_NOP_ZNF"
+#define device_bar_BDK_ECAMX_NOP_ZNF(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_NOP_ZNF(a) (a)
+#define arguments_BDK_ECAMX_NOP_ZNF(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) ecam#_rsl#_nsdis
+ *
+ * ECAM RSL Function Nonsecure Disable Registers
+ * This register is only implemented for ECAM0 which sources RSL.
+ */
+union bdk_ecamx_rslx_nsdis
+{
+ uint64_t u;
+ struct bdk_ecamx_rslx_nsdis_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dis : 1; /**< [ 0: 0](R/W) Disable ECAM RSL function in nonsecure mode. If set, the specified RSL function number
+ (under ECAM 0 bus 1) is RAO/WI when accessed via the ECAM space with nonsecure
+ transactions. Note this affects only ECAM configuration access, not normal I/O mapped
+ memory accesses to the device. */
+#else /* Word 0 - Little Endian */
+ uint64_t dis : 1; /**< [ 0: 0](R/W) Disable ECAM RSL function in nonsecure mode. If set, the specified RSL function number
+ (under ECAM 0 bus 1) is RAO/WI when accessed via the ECAM space with nonsecure
+ transactions. Note this affects only ECAM configuration access, not normal I/O mapped
+ memory accesses to the device. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_rslx_nsdis_s cn; */
+};
+typedef union bdk_ecamx_rslx_nsdis bdk_ecamx_rslx_nsdis_t;
+
+static inline uint64_t BDK_ECAMX_RSLX_NSDIS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_RSLX_NSDIS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=255)))
+ return 0x87e048050000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=255)))
+ return 0x87e048050000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=255)))
+ return 0x87e048050000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xff);
+ __bdk_csr_fatal("ECAMX_RSLX_NSDIS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_RSLX_NSDIS(a,b) bdk_ecamx_rslx_nsdis_t
+#define bustype_BDK_ECAMX_RSLX_NSDIS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_RSLX_NSDIS(a,b) "ECAMX_RSLX_NSDIS"
+#define device_bar_BDK_ECAMX_RSLX_NSDIS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_RSLX_NSDIS(a,b) (a)
+#define arguments_BDK_ECAMX_RSLX_NSDIS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ecam#_rsl#_sdis
+ *
+ * ECAM RSL Function Secure Disable Registers
+ * This register is only implemented for ECAM0 which sources RSL.
+ */
+union bdk_ecamx_rslx_sdis
+{
+ uint64_t u;
+ struct bdk_ecamx_rslx_sdis_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) Secure ECAM RSL function. If set, the indexed RSL function number (under ECAM 0
+ bus 1) is secured and RAO/WI when accessed via the ECAM space with nonsecure
+ transactions. This bit overrides ECAM()_RSL()_NSDIS[DIS]. */
+ uint64_t dis : 1; /**< [ 0: 0](SR/W) Disable ECAM RSL function in secure mode. If set, ECAM secure read/write operations to the
+ indexed
+ RSL function number (under ECAM 0 bus 1) are RAO/WI when accessed via the ECAM
+ space. This bit is similar to the nonsecure ECAM()_RSL()_NSDIS[DIS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dis : 1; /**< [ 0: 0](SR/W) Disable ECAM RSL function in secure mode. If set, ECAM secure read/write operations to the
+ indexed
+ RSL function number (under ECAM 0 bus 1) are RAO/WI when accessed via the ECAM
+ space. This bit is similar to the nonsecure ECAM()_RSL()_NSDIS[DIS]. */
+ uint64_t sec : 1; /**< [ 1: 1](SR/W) Secure ECAM RSL function. If set, the indexed RSL function number (under ECAM 0
+ bus 1) is secured and RAO/WI when accessed via the ECAM space with nonsecure
+ transactions. This bit overrides ECAM()_RSL()_NSDIS[DIS]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_rslx_sdis_s cn; */
+};
+typedef union bdk_ecamx_rslx_sdis bdk_ecamx_rslx_sdis_t;
+
+static inline uint64_t BDK_ECAMX_RSLX_SDIS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_RSLX_SDIS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=255)))
+ return 0x87e048040000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=255)))
+ return 0x87e048040000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=3) && (b<=255)))
+ return 0x87e048040000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xff);
+ __bdk_csr_fatal("ECAMX_RSLX_SDIS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_RSLX_SDIS(a,b) bdk_ecamx_rslx_sdis_t
+#define bustype_BDK_ECAMX_RSLX_SDIS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_RSLX_SDIS(a,b) "ECAMX_RSLX_SDIS"
+#define device_bar_BDK_ECAMX_RSLX_SDIS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_RSLX_SDIS(a,b) (a)
+#define arguments_BDK_ECAMX_RSLX_SDIS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ecam#_rsl#_skill
+ *
+ * ECAM RSL Function Secure Disable Registers
+ * This register is only implemented for ECAM0 which sources RSL.
+ */
+union bdk_ecamx_rslx_skill
+{
+ uint64_t u;
+ struct bdk_ecamx_rslx_skill_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t skill : 1; /**< [ 0: 0](SR/W1S) ECAM function kill.
+ Write one to set. Once set, cannot be cleared until soft reset. If set,
+ the indexed ECAM bus/function/device number is RAO/WI when accessed via
+ the ECAM space with any (secure/nonsecure) transactions. */
+#else /* Word 0 - Little Endian */
+ uint64_t skill : 1; /**< [ 0: 0](SR/W1S) ECAM function kill.
+ Write one to set. Once set, cannot be cleared until soft reset. If set,
+ the indexed ECAM bus/function/device number is RAO/WI when accessed via
+ the ECAM space with any (secure/nonsecure) transactions. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_rslx_skill_s cn; */
+};
+typedef union bdk_ecamx_rslx_skill bdk_ecamx_rslx_skill_t;
+
+static inline uint64_t BDK_ECAMX_RSLX_SKILL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_RSLX_SKILL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=255)))
+ return 0x87e048090000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=255)))
+ return 0x87e048090000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && ((a<=3) && (b<=255)))
+ return 0x87e048090000ll + 0x1000000ll * ((a) & 0x3) + 8ll * ((b) & 0xff);
+ __bdk_csr_fatal("ECAMX_RSLX_SKILL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_RSLX_SKILL(a,b) bdk_ecamx_rslx_skill_t
+#define bustype_BDK_ECAMX_RSLX_SKILL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_RSLX_SKILL(a,b) "ECAMX_RSLX_SKILL"
+#define device_bar_BDK_ECAMX_RSLX_SKILL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_RSLX_SKILL(a,b) (a)
+#define arguments_BDK_ECAMX_RSLX_SKILL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) ecam#_scratch
+ *
+ * INTERNAL: ECAM Scratch Register
+ */
+union bdk_ecamx_scratch
+{
+ uint64_t u;
+ struct bdk_ecamx_scratch_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Test register for CSR access. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Test register for CSR access. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_ecamx_scratch_s cn; */
+};
+typedef union bdk_ecamx_scratch bdk_ecamx_scratch_t;
+
+static inline uint64_t BDK_ECAMX_SCRATCH(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_ECAMX_SCRATCH(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a==0))
+ return 0x87e048080000ll + 0x1000000ll * ((a) & 0x0);
+ __bdk_csr_fatal("ECAMX_SCRATCH", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_ECAMX_SCRATCH(a) bdk_ecamx_scratch_t
+#define bustype_BDK_ECAMX_SCRATCH(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_ECAMX_SCRATCH(a) "ECAMX_SCRATCH"
+#define device_bar_BDK_ECAMX_SCRATCH(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_ECAMX_SCRATCH(a) (a)
+#define arguments_BDK_ECAMX_SCRATCH(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_ECAM_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gic.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gic.h
new file mode 100644
index 0000000000..664b48990c
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gic.h
@@ -0,0 +1,8178 @@
+#ifndef __BDK_CSRS_GIC_H__
+#define __BDK_CSRS_GIC_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium GIC.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration gic_bar_e
+ *
+ * GIC Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_GIC_BAR_E_GIC_PF_BAR0 (0x801000000000ll)
+#define BDK_GIC_BAR_E_GIC_PF_BAR0_SIZE 0x20000ull
+#define BDK_GIC_BAR_E_GIC_PF_BAR2 (0x801000020000ll)
+#define BDK_GIC_BAR_E_GIC_PF_BAR2_SIZE 0x20000ull
+#define BDK_GIC_BAR_E_GIC_PF_BAR4 (0x801080000000ll)
+#define BDK_GIC_BAR_E_GIC_PF_BAR4_SIZE 0x1000000ull
+
+/**
+ * Enumeration gic_int_req_e
+ *
+ * GIC Performance Counter Enumeration
+ * Enumerates the index of GIC_INT_REQ()_PC.
+ */
+#define BDK_GIC_INT_REQ_E_GICD_CLRSPI_NSR_PC (1)
+#define BDK_GIC_INT_REQ_E_GICD_CLRSPI_SR_PC (3)
+#define BDK_GIC_INT_REQ_E_GICD_SETSPI_NSR_PC (0)
+#define BDK_GIC_INT_REQ_E_GICD_SETSPI_SR_PC (2)
+#define BDK_GIC_INT_REQ_E_GICR_CLRLPIR (6)
+#define BDK_GIC_INT_REQ_E_GICR_SETLPIR (5)
+#define BDK_GIC_INT_REQ_E_GITS_TRANSLATER (4)
+
+/**
+ * Enumeration gits_cmd_err_e
+ *
+ * GIC ITS Command Error Enumeration
+ * The actual 24-bit ITS command SEI is defined as {8'h01,
+ * GITS_CMD_TYPE_E(8-bit), GITS_CMD_ERR_E(8-bit)}.
+ */
+#define BDK_GITS_CMD_ERR_E_CSEI_CMD_TO (0xe0)
+#define BDK_GITS_CMD_ERR_E_CSEI_COLLECTION_OOR (3)
+#define BDK_GITS_CMD_ERR_E_CSEI_DEVICE_OOR (1)
+#define BDK_GITS_CMD_ERR_E_CSEI_ID_OOR (5)
+#define BDK_GITS_CMD_ERR_E_CSEI_ITE_INVALID (0x10)
+#define BDK_GITS_CMD_ERR_E_CSEI_ITTSIZE_OOR (2)
+#define BDK_GITS_CMD_ERR_E_CSEI_PHYSICALID_OOR (6)
+#define BDK_GITS_CMD_ERR_E_CSEI_SYNCACK_INVALID (0xe1)
+#define BDK_GITS_CMD_ERR_E_CSEI_TA_INVALID (0xfe)
+#define BDK_GITS_CMD_ERR_E_CSEI_UNMAPPED_COLLECTION (9)
+#define BDK_GITS_CMD_ERR_E_CSEI_UNMAPPED_DEVICE (4)
+#define BDK_GITS_CMD_ERR_E_CSEI_UNMAPPED_INTERRUPT (7)
+#define BDK_GITS_CMD_ERR_E_CSEI_UNSUPPORTED_CMD (0xff)
+
+/**
+ * Enumeration gits_cmd_type_e
+ *
+ * GIC ITS Command Type Enumeration
+ * Enumerates the ITS commands.
+ */
+#define BDK_GITS_CMD_TYPE_E_CMD_CLEAR (4)
+#define BDK_GITS_CMD_TYPE_E_CMD_DISCARD (0xf)
+#define BDK_GITS_CMD_TYPE_E_CMD_INT (3)
+#define BDK_GITS_CMD_TYPE_E_CMD_INV (0xc)
+#define BDK_GITS_CMD_TYPE_E_CMD_INVALL (0xd)
+#define BDK_GITS_CMD_TYPE_E_CMD_MAPC (9)
+#define BDK_GITS_CMD_TYPE_E_CMD_MAPD (8)
+#define BDK_GITS_CMD_TYPE_E_CMD_MAPI (0xb)
+#define BDK_GITS_CMD_TYPE_E_CMD_MAPVI (0xa)
+#define BDK_GITS_CMD_TYPE_E_CMD_MOVALL (0xe)
+#define BDK_GITS_CMD_TYPE_E_CMD_MOVI (1)
+#define BDK_GITS_CMD_TYPE_E_CMD_SYNC (5)
+#define BDK_GITS_CMD_TYPE_E_CMD_UDF (0)
+
+/**
+ * Structure gits_cmd_clear_s
+ *
+ * GIC ITS Clear Command Structure
+ */
+union bdk_gits_cmd_clear_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_clear_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_CLEAR. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_CLEAR. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_96_127 : 32;
+ uint64_t int_id : 32; /**< [ 95: 64] Interrupt ID to be translated. */
+#else /* Word 1 - Little Endian */
+ uint64_t int_id : 32; /**< [ 95: 64] Interrupt ID to be translated. */
+ uint64_t reserved_96_127 : 32;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_128_191 : 64;
+#else /* Word 2 - Little Endian */
+ uint64_t reserved_128_191 : 64;
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_clear_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_discard_s
+ *
+ * GIC ITS Discard Command Structure
+ */
+union bdk_gits_cmd_discard_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_discard_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dev_id : 32; /**< [ 63: 32] Device ID. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_DISCARD. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_DISCARD. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t dev_id : 32; /**< [ 63: 32] Device ID. */
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_96_127 : 32;
+ uint64_t int_id : 32; /**< [ 95: 64] Interrupt ID. */
+#else /* Word 1 - Little Endian */
+ uint64_t int_id : 32; /**< [ 95: 64] Interrupt ID. */
+ uint64_t reserved_96_127 : 32;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_128_191 : 64;
+#else /* Word 2 - Little Endian */
+ uint64_t reserved_128_191 : 64;
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_discard_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_int_s
+ *
+ * GIC ITS INT Command Structure
+ */
+union bdk_gits_cmd_int_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_int_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_INT. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_INT. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_96_127 : 32;
+ uint64_t int_id : 32; /**< [ 95: 64] Interrupt ID to be translated. */
+#else /* Word 1 - Little Endian */
+ uint64_t int_id : 32; /**< [ 95: 64] Interrupt ID to be translated. */
+ uint64_t reserved_96_127 : 32;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_128_191 : 64;
+#else /* Word 2 - Little Endian */
+ uint64_t reserved_128_191 : 64;
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_int_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_inv_s
+ *
+ * GIC ITS INV Command Structure
+ */
+union bdk_gits_cmd_inv_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_inv_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_INV. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_INV. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_96_127 : 32;
+ uint64_t int_id : 32; /**< [ 95: 64] Reserved. */
+#else /* Word 1 - Little Endian */
+ uint64_t int_id : 32; /**< [ 95: 64] Reserved. */
+ uint64_t reserved_96_127 : 32;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_128_191 : 64;
+#else /* Word 2 - Little Endian */
+ uint64_t reserved_128_191 : 64;
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_inv_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_invall_s
+ *
+ * GIC ITS INVALL Command Structure
+ */
+union bdk_gits_cmd_invall_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_invall_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_INVALL. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_INVALL. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_64_127 : 64;
+#else /* Word 1 - Little Endian */
+ uint64_t reserved_64_127 : 64;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_144_191 : 48;
+ uint64_t cid : 16; /**< [143:128] Interrupt collection ID. */
+#else /* Word 2 - Little Endian */
+ uint64_t cid : 16; /**< [143:128] Interrupt collection ID. */
+ uint64_t reserved_144_191 : 48;
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_invall_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_mapc_s
+ *
+ * GIC ITS MAPC Command Structure
+ */
+union bdk_gits_cmd_mapc_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_mapc_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MAPC. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MAPC. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_64_127 : 64;
+#else /* Word 1 - Little Endian */
+ uint64_t reserved_64_127 : 64;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t v : 1; /**< [191:191] Valid bit. Specifies whether the ITT address and size are valid. When [V] is
+ zero, this command unmaps the specified device and translation request from
+ that device will be discarded. */
+ uint64_t reserved_176_190 : 15;
+ uint64_t ta : 32; /**< [175:144] Target address. Specifies the physical address of the redistributor to which
+ interrupts for the collection will be forwarded. */
+ uint64_t cid : 16; /**< [143:128] Interrupt collection ID. */
+#else /* Word 2 - Little Endian */
+ uint64_t cid : 16; /**< [143:128] Interrupt collection ID. */
+ uint64_t ta : 32; /**< [175:144] Target address. Specifies the physical address of the redistributor to which
+ interrupts for the collection will be forwarded. */
+ uint64_t reserved_176_190 : 15;
+ uint64_t v : 1; /**< [191:191] Valid bit. Specifies whether the ITT address and size are valid. When [V] is
+ zero, this command unmaps the specified device and translation request from
+ that device will be discarded. */
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_mapc_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_mapd_s
+ *
+ * GIC ITS MAPD Command Structure
+ */
+union bdk_gits_cmd_mapd_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_mapd_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MAPD. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MAPD. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_69_127 : 59;
+ uint64_t size : 5; /**< [ 68: 64] Number of bits of interrupt ID supported for this device, minus one. */
+#else /* Word 1 - Little Endian */
+ uint64_t size : 5; /**< [ 68: 64] Number of bits of interrupt ID supported for this device, minus one. */
+ uint64_t reserved_69_127 : 59;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t v : 1; /**< [191:191] Valid bit. Specifies whether the ITT address and size are valid. When [V] is zero,
+ this command unmaps the specified device and translation request from that
+ device will be discarded. */
+ uint64_t reserved_176_190 : 15;
+ uint64_t itta : 40; /**< [175:136] ITT address. Specifies bits \<47:8\> of the physical address of the interrupt
+ translation table. Bits \<7:0\> of the physical address are zero. */
+ uint64_t reserved_128_135 : 8;
+#else /* Word 2 - Little Endian */
+ uint64_t reserved_128_135 : 8;
+ uint64_t itta : 40; /**< [175:136] ITT address. Specifies bits \<47:8\> of the physical address of the interrupt
+ translation table. Bits \<7:0\> of the physical address are zero. */
+ uint64_t reserved_176_190 : 15;
+ uint64_t v : 1; /**< [191:191] Valid bit. Specifies whether the ITT address and size are valid. When [V] is zero,
+ this command unmaps the specified device and translation request from that
+ device will be discarded. */
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_mapd_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_mapi_s
+ *
+ * GIC ITS MAPI Command Structure
+ */
+union bdk_gits_cmd_mapi_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_mapi_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MAPI. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MAPI. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_96_127 : 32;
+ uint64_t int_id : 32; /**< [ 95: 64] Reserved. */
+#else /* Word 1 - Little Endian */
+ uint64_t int_id : 32; /**< [ 95: 64] Reserved. */
+ uint64_t reserved_96_127 : 32;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_144_191 : 48;
+ uint64_t cid : 16; /**< [143:128] Collection. Specifies the interrupt collection of which the interrupt with identifier
+ physical ID is a member. */
+#else /* Word 2 - Little Endian */
+ uint64_t cid : 16; /**< [143:128] Collection. Specifies the interrupt collection of which the interrupt with identifier
+ physical ID is a member. */
+ uint64_t reserved_144_191 : 48;
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_mapi_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_mapvi_s
+ *
+ * GIC ITS MAPVI Command Structure
+ */
+union bdk_gits_cmd_mapvi_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_mapvi_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MAPVI. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MAPVI. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t phy_id : 32; /**< [127: 96] Reserved. */
+ uint64_t int_id : 32; /**< [ 95: 64] Reserved. */
+#else /* Word 1 - Little Endian */
+ uint64_t int_id : 32; /**< [ 95: 64] Reserved. */
+ uint64_t phy_id : 32; /**< [127: 96] Reserved. */
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_144_191 : 48;
+ uint64_t cid : 16; /**< [143:128] Collection. Specifies the interrupt collection of which the interrupt with identifier
+ physical ID is a member. */
+#else /* Word 2 - Little Endian */
+ uint64_t cid : 16; /**< [143:128] Collection. Specifies the interrupt collection of which the interrupt with identifier
+ physical ID is a member. */
+ uint64_t reserved_144_191 : 48;
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_mapvi_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_movall_s
+ *
+ * GIC ITS MOVALL Command Structure
+ */
+union bdk_gits_cmd_movall_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_movall_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MOVALL. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MOVALL. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_64_127 : 64;
+#else /* Word 1 - Little Endian */
+ uint64_t reserved_64_127 : 64;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_176_191 : 16;
+ uint64_t ta1 : 32; /**< [175:144] Target address 1. Specifies the old redistributor. */
+ uint64_t reserved_128_143 : 16;
+#else /* Word 2 - Little Endian */
+ uint64_t reserved_128_143 : 16;
+ uint64_t ta1 : 32; /**< [175:144] Target address 1. Specifies the old redistributor. */
+ uint64_t reserved_176_191 : 16;
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_240_255 : 16;
+ uint64_t ta2 : 32; /**< [239:208] Target address 2. Specifies the new redistributor. */
+ uint64_t reserved_192_207 : 16;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_207 : 16;
+ uint64_t ta2 : 32; /**< [239:208] Target address 2. Specifies the new redistributor. */
+ uint64_t reserved_240_255 : 16;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_movall_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_movi_s
+ *
+ * GIC ITS MOVI Command Structure
+ */
+union bdk_gits_cmd_movi_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_movi_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MOVI. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_MOVI. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t dev_id : 32; /**< [ 63: 32] Interrupt device ID. */
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_96_127 : 32;
+ uint64_t int_id : 32; /**< [ 95: 64] Interrupt ID to be translated. */
+#else /* Word 1 - Little Endian */
+ uint64_t int_id : 32; /**< [ 95: 64] Interrupt ID to be translated. */
+ uint64_t reserved_96_127 : 32;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_144_191 : 48;
+ uint64_t cid : 16; /**< [143:128] Interrupt collection ID. */
+#else /* Word 2 - Little Endian */
+ uint64_t cid : 16; /**< [143:128] Interrupt collection ID. */
+ uint64_t reserved_144_191 : 48;
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_movi_s_s cn; */
+};
+
+/**
+ * Structure gits_cmd_sync_s
+ *
+ * GIC ITS SYNC Command Structure
+ */
+union bdk_gits_cmd_sync_s
+{
+ uint64_t u[4];
+ struct bdk_gits_cmd_sync_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_SYNC. */
+#else /* Word 0 - Little Endian */
+ uint64_t cmd_type : 8; /**< [ 7: 0] Command type. Indicates GITS_CMD_TYPE_E::CMD_SYNC. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_64_127 : 64;
+#else /* Word 1 - Little Endian */
+ uint64_t reserved_64_127 : 64;
+#endif /* Word 1 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 2 - Big Endian */
+ uint64_t reserved_176_191 : 16;
+ uint64_t ta : 32; /**< [175:144] Target address of the redistributor 0. */
+ uint64_t reserved_128_143 : 16;
+#else /* Word 2 - Little Endian */
+ uint64_t reserved_128_143 : 16;
+ uint64_t ta : 32; /**< [175:144] Target address of the redistributor 0. */
+ uint64_t reserved_176_191 : 16;
+#endif /* Word 2 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 3 - Big Endian */
+ uint64_t reserved_192_255 : 64;
+#else /* Word 3 - Little Endian */
+ uint64_t reserved_192_255 : 64;
+#endif /* Word 3 - End */
+ } s;
+ /* struct bdk_gits_cmd_sync_s_s cn; */
+};
+
+/**
+ * Register (NCB) gic_bist_statusr
+ *
+ * GIC Implementation BIST Status Register
+ * This register contains the BIST status for the GIC memories (including ITS and RDB).
+ */
+union bdk_gic_bist_statusr
+{
+ uint64_t u;
+ struct bdk_gic_bist_statusr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t bist : 9; /**< [ 8: 0](RO/H) Memory BIST status:
+ 0 = Pass.
+ 1 = Fail.
+
+ Internal:
+ [8:0]= [cic2cic_ig_buf, lpi_cfg_buf, lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+#else /* Word 0 - Little Endian */
+ uint64_t bist : 9; /**< [ 8: 0](RO/H) Memory BIST status:
+ 0 = Pass.
+ 1 = Fail.
+
+ Internal:
+ [8:0]= [cic2cic_ig_buf, lpi_cfg_buf, lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_bist_statusr_s cn; */
+};
+typedef union bdk_gic_bist_statusr bdk_gic_bist_statusr_t;
+
+#define BDK_GIC_BIST_STATUSR BDK_GIC_BIST_STATUSR_FUNC()
+static inline uint64_t BDK_GIC_BIST_STATUSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_BIST_STATUSR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x801000010020ll;
+ __bdk_csr_fatal("GIC_BIST_STATUSR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GIC_BIST_STATUSR bdk_gic_bist_statusr_t
+#define bustype_BDK_GIC_BIST_STATUSR BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_BIST_STATUSR "GIC_BIST_STATUSR"
+#define device_bar_BDK_GIC_BIST_STATUSR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_BIST_STATUSR 0
+#define arguments_BDK_GIC_BIST_STATUSR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_bp_test0
+ *
+ * INTERNAL: GIC Backpressure Test Register
+ */
+union bdk_gic_bp_test0
+{
+ uint64_t u;
+ struct bdk_gic_bp_test0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Limit RDB NCBI transactions. Never limit 100% of the time.
+ \<62\> = Limit ITS NCBI transactions. Never limit 100% of the time.
+ \<61\> = Limit RDB interrupt message handling via NCBO. Never limit 100% of the time.
+ \<60\> = Limit ITS interrupt message handling via NCBO. Never limit 100% of the time. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Limit RDB NCBI transactions. Never limit 100% of the time.
+ \<62\> = Limit ITS NCBI transactions. Never limit 100% of the time.
+ \<61\> = Limit RDB interrupt message handling via NCBO. Never limit 100% of the time.
+ \<60\> = Limit ITS interrupt message handling via NCBO. Never limit 100% of the time. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_bp_test0_s cn; */
+};
+typedef union bdk_gic_bp_test0 bdk_gic_bp_test0_t;
+
+#define BDK_GIC_BP_TEST0 BDK_GIC_BP_TEST0_FUNC()
+static inline uint64_t BDK_GIC_BP_TEST0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_BP_TEST0_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8010000100a0ll;
+ __bdk_csr_fatal("GIC_BP_TEST0", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GIC_BP_TEST0 bdk_gic_bp_test0_t
+#define bustype_BDK_GIC_BP_TEST0 BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_BP_TEST0 "GIC_BP_TEST0"
+#define device_bar_BDK_GIC_BP_TEST0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_BP_TEST0 0
+#define arguments_BDK_GIC_BP_TEST0 -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_bp_test1
+ *
+ * INTERNAL: GIC Backpressure Test Register
+ */
+union bdk_gic_bp_test1
+{
+ uint64_t u;
+ struct bdk_gic_bp_test1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Reserved.
+ \<62\> = Reserved.
+ \<61\> = Reserved.
+ \<60\> = Reserved. TBD?: Limit messages to AP CIMs. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Reserved.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Reserved.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Reserved.
+ \<62\> = Reserved.
+ \<61\> = Reserved.
+ \<60\> = Reserved. TBD?: Limit messages to AP CIMs. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_bp_test1_s cn; */
+};
+typedef union bdk_gic_bp_test1 bdk_gic_bp_test1_t;
+
+#define BDK_GIC_BP_TEST1 BDK_GIC_BP_TEST1_FUNC()
+static inline uint64_t BDK_GIC_BP_TEST1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_BP_TEST1_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8010000100b0ll;
+ __bdk_csr_fatal("GIC_BP_TEST1", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GIC_BP_TEST1 bdk_gic_bp_test1_t
+#define bustype_BDK_GIC_BP_TEST1 BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_BP_TEST1 "GIC_BP_TEST1"
+#define device_bar_BDK_GIC_BP_TEST1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_BP_TEST1 0
+#define arguments_BDK_GIC_BP_TEST1 -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_cfg_ctlr
+ *
+ * GIC Implementation Secure Configuration Control Register
+ * This register configures GIC features.
+ */
+union bdk_gic_cfg_ctlr
+{
+ uint64_t u;
+ struct bdk_gic_cfg_ctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t dis_redist_lpi_aggr_merge : 1;/**< [ 33: 33](SR/W) Disable aggressive SETLPIR merging in redistributors. */
+ uint64_t dis_cpu_if_load_balancer : 1;/**< [ 32: 32](SR/W) Disable the CPU interface load balancer. */
+ uint64_t reserved_10_31 : 22;
+ uint64_t dis_lpi_pend_cache : 1; /**< [ 9: 9](SR/W) Disable the LPI pending table cache. */
+ uint64_t dis_lpi_cfg_cache : 1; /**< [ 8: 8](SR/W) Disable the LPI configuration cache. */
+ uint64_t dis_inv_hct : 1; /**< [ 7: 7](SR/W) Disable HW invalidating ITS HCT during ITS disable process. */
+ uint64_t dis_its_cdtc : 1; /**< [ 6: 6](SR/W) Disable 1-entry device table cache in ITS CEU. */
+ uint64_t dis_its_itlb : 1; /**< [ 5: 5](SR/W) Disable ITS ITLB (interrupt translation entry lookup buffer). */
+ uint64_t dis_its_dtlb : 1; /**< [ 4: 4](SR/W) Disable ITS DTLB (device table entry lookup buffer). */
+ uint64_t reserved_3 : 1;
+ uint64_t root_dist : 1; /**< [ 2: 2](SR/W) Specifies whether the distributor on this socket is root.
+ 0 = Distributor is not root.
+ 1 = Distributor is root.
+
+ Out of reset, this field is set. EL3 firmware will clear this field as required for multi-
+ socket operation. */
+ uint64_t om : 2; /**< [ 1: 0](SR/W) Operation mode.
+ 0x0 = Single-socket single-root mode.
+ 0x1 = Reserved.
+ 0x2 = Multisocket single-root mode.
+ 0x3 = Multisocket multiroot mode. */
+#else /* Word 0 - Little Endian */
+ uint64_t om : 2; /**< [ 1: 0](SR/W) Operation mode.
+ 0x0 = Single-socket single-root mode.
+ 0x1 = Reserved.
+ 0x2 = Multisocket single-root mode.
+ 0x3 = Multisocket multiroot mode. */
+ uint64_t root_dist : 1; /**< [ 2: 2](SR/W) Specifies whether the distributor on this socket is root.
+ 0 = Distributor is not root.
+ 1 = Distributor is root.
+
+ Out of reset, this field is set. EL3 firmware will clear this field as required for multi-
+ socket operation. */
+ uint64_t reserved_3 : 1;
+ uint64_t dis_its_dtlb : 1; /**< [ 4: 4](SR/W) Disable ITS DTLB (device table entry lookup buffer). */
+ uint64_t dis_its_itlb : 1; /**< [ 5: 5](SR/W) Disable ITS ITLB (interrupt translation entry lookup buffer). */
+ uint64_t dis_its_cdtc : 1; /**< [ 6: 6](SR/W) Disable 1-entry device table cache in ITS CEU. */
+ uint64_t dis_inv_hct : 1; /**< [ 7: 7](SR/W) Disable HW invalidating ITS HCT during ITS disable process. */
+ uint64_t dis_lpi_cfg_cache : 1; /**< [ 8: 8](SR/W) Disable the LPI configuration cache. */
+ uint64_t dis_lpi_pend_cache : 1; /**< [ 9: 9](SR/W) Disable the LPI pending table cache. */
+ uint64_t reserved_10_31 : 22;
+ uint64_t dis_cpu_if_load_balancer : 1;/**< [ 32: 32](SR/W) Disable the CPU interface load balancer. */
+ uint64_t dis_redist_lpi_aggr_merge : 1;/**< [ 33: 33](SR/W) Disable aggressive SETLPIR merging in redistributors. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_cfg_ctlr_s cn9; */
+ /* struct bdk_gic_cfg_ctlr_s cn81xx; */
+ struct bdk_gic_cfg_ctlr_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t dis_cpu_if_load_balancer : 1;/**< [ 32: 32](SR/W) Disable the CPU interface load balancer. */
+ uint64_t reserved_10_31 : 22;
+ uint64_t dis_lpi_pend_cache : 1; /**< [ 9: 9](SR/W) Disable the LPI pending table cache. */
+ uint64_t dis_lpi_cfg_cache : 1; /**< [ 8: 8](SR/W) Disable the LPI configuration cache. */
+ uint64_t dis_inv_hct : 1; /**< [ 7: 7](SR/W) Disable HW invalidating ITS HCT during ITS disable process. */
+ uint64_t dis_its_cdtc : 1; /**< [ 6: 6](SR/W) Disable 1-entry device table cache in ITS CEU. */
+ uint64_t dis_its_itlb : 1; /**< [ 5: 5](SR/W) Disable ITS ITLB (interrupt translation entry lookup buffer). */
+ uint64_t dis_its_dtlb : 1; /**< [ 4: 4](SR/W) Disable ITS DTLB (device table entry lookup buffer). */
+ uint64_t reserved_3 : 1;
+ uint64_t root_dist : 1; /**< [ 2: 2](SR/W) Specifies whether the distributor on this socket is root.
+ 0 = Distributor is not root.
+ 1 = Distributor is root.
+
+ Out of reset, this field is set. EL3 firmware will clear this field as required for multi-
+ socket operation. */
+ uint64_t om : 2; /**< [ 1: 0](SR/W) Operation mode.
+ 0x0 = Single-socket single-root mode.
+ 0x1 = Reserved.
+ 0x2 = Multisocket single-root mode.
+ 0x3 = Multisocket multiroot mode. */
+#else /* Word 0 - Little Endian */
+ uint64_t om : 2; /**< [ 1: 0](SR/W) Operation mode.
+ 0x0 = Single-socket single-root mode.
+ 0x1 = Reserved.
+ 0x2 = Multisocket single-root mode.
+ 0x3 = Multisocket multiroot mode. */
+ uint64_t root_dist : 1; /**< [ 2: 2](SR/W) Specifies whether the distributor on this socket is root.
+ 0 = Distributor is not root.
+ 1 = Distributor is root.
+
+ Out of reset, this field is set. EL3 firmware will clear this field as required for multi-
+ socket operation. */
+ uint64_t reserved_3 : 1;
+ uint64_t dis_its_dtlb : 1; /**< [ 4: 4](SR/W) Disable ITS DTLB (device table entry lookup buffer). */
+ uint64_t dis_its_itlb : 1; /**< [ 5: 5](SR/W) Disable ITS ITLB (interrupt translation entry lookup buffer). */
+ uint64_t dis_its_cdtc : 1; /**< [ 6: 6](SR/W) Disable 1-entry device table cache in ITS CEU. */
+ uint64_t dis_inv_hct : 1; /**< [ 7: 7](SR/W) Disable HW invalidating ITS HCT during ITS disable process. */
+ uint64_t dis_lpi_cfg_cache : 1; /**< [ 8: 8](SR/W) Disable the LPI configuration cache. */
+ uint64_t dis_lpi_pend_cache : 1; /**< [ 9: 9](SR/W) Disable the LPI pending table cache. */
+ uint64_t reserved_10_31 : 22;
+ uint64_t dis_cpu_if_load_balancer : 1;/**< [ 32: 32](SR/W) Disable the CPU interface load balancer. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gic_cfg_ctlr_s cn83xx; */
+};
+typedef union bdk_gic_cfg_ctlr bdk_gic_cfg_ctlr_t;
+
+#define BDK_GIC_CFG_CTLR BDK_GIC_CFG_CTLR_FUNC()
+static inline uint64_t BDK_GIC_CFG_CTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_CFG_CTLR_FUNC(void)
+{
+ return 0x801000010000ll;
+}
+
+#define typedef_BDK_GIC_CFG_CTLR bdk_gic_cfg_ctlr_t
+#define bustype_BDK_GIC_CFG_CTLR BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_CFG_CTLR "GIC_CFG_CTLR"
+#define device_bar_BDK_GIC_CFG_CTLR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_CFG_CTLR 0
+#define arguments_BDK_GIC_CFG_CTLR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_const
+ *
+ * GIC Constants Register
+ * This register contains constant for software discovery.
+ */
+union bdk_gic_const
+{
+ uint64_t u;
+ struct bdk_gic_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_const_s cn; */
+};
+typedef union bdk_gic_const bdk_gic_const_t;
+
+#define BDK_GIC_CONST BDK_GIC_CONST_FUNC()
+static inline uint64_t BDK_GIC_CONST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_CONST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x801000010088ll;
+ __bdk_csr_fatal("GIC_CONST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GIC_CONST bdk_gic_const_t
+#define bustype_BDK_GIC_CONST BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_CONST "GIC_CONST"
+#define device_bar_BDK_GIC_CONST 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_CONST 0
+#define arguments_BDK_GIC_CONST -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_csclk_active_pc
+ *
+ * GIC Conditional Sclk Clock Counter Register
+ * This register counts conditional clocks for power management.
+ */
+union bdk_gic_csclk_active_pc
+{
+ uint64_t u;
+ struct bdk_gic_csclk_active_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Count of conditional coprocessor-clock cycles since reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Count of conditional coprocessor-clock cycles since reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_csclk_active_pc_s cn; */
+};
+typedef union bdk_gic_csclk_active_pc bdk_gic_csclk_active_pc_t;
+
+#define BDK_GIC_CSCLK_ACTIVE_PC BDK_GIC_CSCLK_ACTIVE_PC_FUNC()
+static inline uint64_t BDK_GIC_CSCLK_ACTIVE_PC_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_CSCLK_ACTIVE_PC_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x801000010090ll;
+ __bdk_csr_fatal("GIC_CSCLK_ACTIVE_PC", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GIC_CSCLK_ACTIVE_PC bdk_gic_csclk_active_pc_t
+#define bustype_BDK_GIC_CSCLK_ACTIVE_PC BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_CSCLK_ACTIVE_PC "GIC_CSCLK_ACTIVE_PC"
+#define device_bar_BDK_GIC_CSCLK_ACTIVE_PC 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_CSCLK_ACTIVE_PC 0
+#define arguments_BDK_GIC_CSCLK_ACTIVE_PC -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_del3t_ctlr
+ *
+ * GIC Debug EL3 Trap Secure Control Register
+ * This register allows disabling the signaling of some DEL3T errors.
+ */
+union bdk_gic_del3t_ctlr
+{
+ uint64_t u;
+ struct bdk_gic_del3t_ctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t del3t_core_id : 6; /**< [ 37: 32](SR/W) Target CoreID for signaling of GIC DEL3T Errors. Legal range is [0,47]. */
+ uint64_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_31 : 32;
+ uint64_t del3t_core_id : 6; /**< [ 37: 32](SR/W) Target CoreID for signaling of GIC DEL3T Errors. Legal range is [0,47]. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gic_del3t_ctlr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t del3t_core_id : 6; /**< [ 37: 32](SR/W) Target CoreID for signaling of GIC DEL3T Errors. Legal range is [0,47]. */
+ uint64_t reserved_11_31 : 21;
+ uint64_t del3t_dis : 11; /**< [ 10: 0](SR/W) Disable signaling of DEL3T Errors.
+ Internal:
+ for del3t_dis[10:0]=
+ [ncbr_stdn,ncbr_fill,cic2cic_ig_buf, lpi_cfg_buf,
+ lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+#else /* Word 0 - Little Endian */
+ uint64_t del3t_dis : 11; /**< [ 10: 0](SR/W) Disable signaling of DEL3T Errors.
+ Internal:
+ for del3t_dis[10:0]=
+ [ncbr_stdn,ncbr_fill,cic2cic_ig_buf, lpi_cfg_buf,
+ lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+ uint64_t reserved_11_31 : 21;
+ uint64_t del3t_core_id : 6; /**< [ 37: 32](SR/W) Target CoreID for signaling of GIC DEL3T Errors. Legal range is [0,47]. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_gic_del3t_ctlr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_38_63 : 26;
+ uint64_t del3t_core_id : 6; /**< [ 37: 32](SR/W) Target CoreID for signaling of GIC DEL3T Errors. Legal range is [0,23]. */
+ uint64_t reserved_11_31 : 21;
+ uint64_t del3t_dis : 2; /**< [ 10: 9](SR/W) Disable signaling of DEL3T Errors.
+ Internal:
+ for del3t_dis[10:9]=
+ [ncbr_stdn,ncbr_fill] in GIC. */
+ uint64_t reserved_0_8 : 9;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_8 : 9;
+ uint64_t del3t_dis : 2; /**< [ 10: 9](SR/W) Disable signaling of DEL3T Errors.
+ Internal:
+ for del3t_dis[10:9]=
+ [ncbr_stdn,ncbr_fill] in GIC. */
+ uint64_t reserved_11_31 : 21;
+ uint64_t del3t_core_id : 6; /**< [ 37: 32](SR/W) Target CoreID for signaling of GIC DEL3T Errors. Legal range is [0,23]. */
+ uint64_t reserved_38_63 : 26;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gic_del3t_ctlr bdk_gic_del3t_ctlr_t;
+
+#define BDK_GIC_DEL3T_CTLR BDK_GIC_DEL3T_CTLR_FUNC()
+static inline uint64_t BDK_GIC_DEL3T_CTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_DEL3T_CTLR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x801000010060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x801000010060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x801000010060ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x801000010060ll;
+ __bdk_csr_fatal("GIC_DEL3T_CTLR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GIC_DEL3T_CTLR bdk_gic_del3t_ctlr_t
+#define bustype_BDK_GIC_DEL3T_CTLR BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_DEL3T_CTLR "GIC_DEL3T_CTLR"
+#define device_bar_BDK_GIC_DEL3T_CTLR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_DEL3T_CTLR 0
+#define arguments_BDK_GIC_DEL3T_CTLR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_ecc_ctlr
+ *
+ * INTERNAL: GIC Implementation Secure ECC Control Register
+ *
+ * This register is reserved for backwards compatibility.
+ */
+union bdk_gic_ecc_ctlr
+{
+ uint64_t u;
+ struct bdk_gic_ecc_ctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t ram_flip1 : 9; /**< [ 48: 40](SR/W) Flip syndrome bits on write. Flip syndrome bits \<1\> on writes to the corresponding ram to
+ test single-bit or double-bit error handling. See COR_DIS bit definitions. */
+ uint64_t reserved_29_39 : 11;
+ uint64_t ram_flip0 : 9; /**< [ 28: 20](SR/W) Flip syndrome bits on write. Flip syndrome bits \<0\> on writes to the corresponding ram to
+ test single-bit or double-bit error handling. See COR_DIS bit definitions. */
+ uint64_t reserved_9_19 : 11;
+ uint64_t cor_dis : 9; /**< [ 8: 0](SR/W) RAM ECC correction disable.
+ Internal:
+ for cor_dis[8:0]= [cic2cic_ig_buf, lpi_cfg_buf,
+ lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+#else /* Word 0 - Little Endian */
+ uint64_t cor_dis : 9; /**< [ 8: 0](SR/W) RAM ECC correction disable.
+ Internal:
+ for cor_dis[8:0]= [cic2cic_ig_buf, lpi_cfg_buf,
+ lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+ uint64_t reserved_9_19 : 11;
+ uint64_t ram_flip0 : 9; /**< [ 28: 20](SR/W) Flip syndrome bits on write. Flip syndrome bits \<0\> on writes to the corresponding ram to
+ test single-bit or double-bit error handling. See COR_DIS bit definitions. */
+ uint64_t reserved_29_39 : 11;
+ uint64_t ram_flip1 : 9; /**< [ 48: 40](SR/W) Flip syndrome bits on write. Flip syndrome bits \<1\> on writes to the corresponding ram to
+ test single-bit or double-bit error handling. See COR_DIS bit definitions. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gic_ecc_ctlr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t reserved_49_59 : 11;
+ uint64_t ram_flip1 : 9; /**< [ 48: 40](SR/W) Flip syndrome bits on write. Flip syndrome bits \<1\> on writes to the corresponding ram to
+ test single-bit or double-bit error handling. See COR_DIS bit definitions. */
+ uint64_t reserved_29_39 : 11;
+ uint64_t ram_flip0 : 9; /**< [ 28: 20](SR/W) Flip syndrome bits on write. Flip syndrome bits \<0\> on writes to the corresponding ram to
+ test single-bit or double-bit error handling. See COR_DIS bit definitions. */
+ uint64_t reserved_9_19 : 11;
+ uint64_t cor_dis : 9; /**< [ 8: 0](SR/W) RAM ECC correction disable.
+ Internal:
+ for cor_dis[8:0]= [cic2cic_ig_buf, lpi_cfg_buf,
+ lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+#else /* Word 0 - Little Endian */
+ uint64_t cor_dis : 9; /**< [ 8: 0](SR/W) RAM ECC correction disable.
+ Internal:
+ for cor_dis[8:0]= [cic2cic_ig_buf, lpi_cfg_buf,
+ lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+ uint64_t reserved_9_19 : 11;
+ uint64_t ram_flip0 : 9; /**< [ 28: 20](SR/W) Flip syndrome bits on write. Flip syndrome bits \<0\> on writes to the corresponding ram to
+ test single-bit or double-bit error handling. See COR_DIS bit definitions. */
+ uint64_t reserved_29_39 : 11;
+ uint64_t ram_flip1 : 9; /**< [ 48: 40](SR/W) Flip syndrome bits on write. Flip syndrome bits \<1\> on writes to the corresponding ram to
+ test single-bit or double-bit error handling. See COR_DIS bit definitions. */
+ uint64_t reserved_49_59 : 11;
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_gic_ecc_ctlr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_gic_ecc_ctlr bdk_gic_ecc_ctlr_t;
+
+#define BDK_GIC_ECC_CTLR BDK_GIC_ECC_CTLR_FUNC()
+static inline uint64_t BDK_GIC_ECC_CTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_ECC_CTLR_FUNC(void)
+{
+ return 0x801000010008ll;
+}
+
+#define typedef_BDK_GIC_ECC_CTLR bdk_gic_ecc_ctlr_t
+#define bustype_BDK_GIC_ECC_CTLR BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_ECC_CTLR "GIC_ECC_CTLR"
+#define device_bar_BDK_GIC_ECC_CTLR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_ECC_CTLR 0
+#define arguments_BDK_GIC_ECC_CTLR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_ecc_int_statusr
+ *
+ * GIC Implementation ECC Error Interrupt Status Register
+ * This register contains the ECC error status for the GIC memories (including ITS and RDB).
+ */
+union bdk_gic_ecc_int_statusr
+{
+ uint64_t u;
+ struct bdk_gic_ecc_int_statusr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_41_63 : 23;
+ uint64_t dbe : 9; /**< [ 40: 32](R/W1C/H) RAM ECC DBE detected.
+ Internal:
+ [8:0] = [cic2cic_ig_buf, lpi_cfg_buf, lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+ uint64_t reserved_9_31 : 23;
+ uint64_t sbe : 9; /**< [ 8: 0](R/W1C/H) RAM ECC SBE detected.
+ Internal:
+ [8:0] = [cic2cic_ig_buf, lpi_cfg_buf, lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 9; /**< [ 8: 0](R/W1C/H) RAM ECC SBE detected.
+ Internal:
+ [8:0] = [cic2cic_ig_buf, lpi_cfg_buf, lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+ uint64_t reserved_9_31 : 23;
+ uint64_t dbe : 9; /**< [ 40: 32](R/W1C/H) RAM ECC DBE detected.
+ Internal:
+ [8:0] = [cic2cic_ig_buf, lpi_cfg_buf, lip_rmw_buf,
+ dtlb_mem,itlb_mem,hct_mem,cqf_mem,rdb_pktf_mem,aprf_mem] in GIC. */
+ uint64_t reserved_41_63 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_ecc_int_statusr_s cn; */
+};
+typedef union bdk_gic_ecc_int_statusr bdk_gic_ecc_int_statusr_t;
+
+#define BDK_GIC_ECC_INT_STATUSR BDK_GIC_ECC_INT_STATUSR_FUNC()
+static inline uint64_t BDK_GIC_ECC_INT_STATUSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_ECC_INT_STATUSR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN8XXX))
+ return 0x801000010030ll;
+ __bdk_csr_fatal("GIC_ECC_INT_STATUSR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GIC_ECC_INT_STATUSR bdk_gic_ecc_int_statusr_t
+#define bustype_BDK_GIC_ECC_INT_STATUSR BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_ECC_INT_STATUSR "GIC_ECC_INT_STATUSR"
+#define device_bar_BDK_GIC_ECC_INT_STATUSR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_ECC_INT_STATUSR 0
+#define arguments_BDK_GIC_ECC_INT_STATUSR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_int_req#_pc
+ *
+ * GIC Performance Counter Register
+ * Index enumerated by GIC_INT_REQ_E.
+ */
+union bdk_gic_int_reqx_pc
+{
+ uint64_t u;
+ struct bdk_gic_int_reqx_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Performance count for each register. Increments each time the corresponding register is written. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Performance count for each register. Increments each time the corresponding register is written. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_int_reqx_pc_s cn; */
+};
+typedef union bdk_gic_int_reqx_pc bdk_gic_int_reqx_pc_t;
+
+static inline uint64_t BDK_GIC_INT_REQX_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_INT_REQX_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=6))
+ return 0x801000010100ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("GIC_INT_REQX_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GIC_INT_REQX_PC(a) bdk_gic_int_reqx_pc_t
+#define bustype_BDK_GIC_INT_REQX_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_INT_REQX_PC(a) "GIC_INT_REQX_PC"
+#define device_bar_BDK_GIC_INT_REQX_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_INT_REQX_PC(a) (a)
+#define arguments_BDK_GIC_INT_REQX_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gic_rdb_its_if_err_statusr
+ *
+ * GIC Redistributor Network ITS Interface Error Status Register
+ * This register holds the status of errors detected on the redistributor network interface to ITS.
+ */
+union bdk_gic_rdb_its_if_err_statusr
+{
+ uint64_t u;
+ struct bdk_gic_rdb_its_if_err_statusr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t v : 1; /**< [ 61: 61](R/W1C/H) When set, the command error is valid. */
+ uint64_t m : 1; /**< [ 60: 60](RO/H) When set, it means multiple errors have happened. It is meaningful only when [V]=1. */
+ uint64_t reserved_59 : 1;
+ uint64_t cmd : 3; /**< [ 58: 56](RO/H) ITS Command. Relevant only when [V]=1. Command encodings are
+ SETLPIR = 0x1,
+ CLRLPIR = 0x2,
+ INVLPIR = 0x3,
+ INVALLR = 0x4,
+ SYNCR = 0x5,
+ MOVLPIR = 0x6, and
+ MOVALLR = 0x7. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t err_multi_socket : 1; /**< [ 51: 51](RO/H) Invalid multi-socket message. Relevant only when [V]=1. Indicates incompatibility between
+ operation mode setting (GIC_CFG_CTLR[OM]) and the ITS message. */
+ uint64_t err_dest_gicr_id : 1; /**< [ 50: 50](RO/H) Invalid destination GICR (redistributor). Relevant only when [V]=1 and [CMD]=MOVLPIR or MOVALLR. */
+ uint64_t err_src_gicr_id : 1; /**< [ 49: 49](RO/H) Invalid source GICR (Redistributor). Relevant only when [V]=1. */
+ uint64_t err_int_id_range : 1; /**< [ 48: 48](RO/H) LPI interrupt ID out of range. Relevant only when [V]=1. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t dst_id : 8; /**< [ 43: 36](RO/H) DestID, specified as node_id[1:0], gicr_id[5:0]. Relevant only when [V]=1 and
+ [CMD]=MOVLPIR or MOVALLR. */
+ uint64_t src_id : 8; /**< [ 35: 28](RO/H) SourceID, specified as node_id[1:0], gicr_id[5:0]. It is meaningful only when [V]=1. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t int_id : 20; /**< [ 19: 0](RO/H) Interrrupt ID in the ITS message (except for INVALLR, SYNCR, MOVALLR). It is meaningful
+ only when [V]=1. */
+#else /* Word 0 - Little Endian */
+ uint64_t int_id : 20; /**< [ 19: 0](RO/H) Interrrupt ID in the ITS message (except for INVALLR, SYNCR, MOVALLR). It is meaningful
+ only when [V]=1. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t src_id : 8; /**< [ 35: 28](RO/H) SourceID, specified as node_id[1:0], gicr_id[5:0]. It is meaningful only when [V]=1. */
+ uint64_t dst_id : 8; /**< [ 43: 36](RO/H) DestID, specified as node_id[1:0], gicr_id[5:0]. Relevant only when [V]=1 and
+ [CMD]=MOVLPIR or MOVALLR. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t err_int_id_range : 1; /**< [ 48: 48](RO/H) LPI interrupt ID out of range. Relevant only when [V]=1. */
+ uint64_t err_src_gicr_id : 1; /**< [ 49: 49](RO/H) Invalid source GICR (Redistributor). Relevant only when [V]=1. */
+ uint64_t err_dest_gicr_id : 1; /**< [ 50: 50](RO/H) Invalid destination GICR (redistributor). Relevant only when [V]=1 and [CMD]=MOVLPIR or MOVALLR. */
+ uint64_t err_multi_socket : 1; /**< [ 51: 51](RO/H) Invalid multi-socket message. Relevant only when [V]=1. Indicates incompatibility between
+ operation mode setting (GIC_CFG_CTLR[OM]) and the ITS message. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t cmd : 3; /**< [ 58: 56](RO/H) ITS Command. Relevant only when [V]=1. Command encodings are
+ SETLPIR = 0x1,
+ CLRLPIR = 0x2,
+ INVLPIR = 0x3,
+ INVALLR = 0x4,
+ SYNCR = 0x5,
+ MOVLPIR = 0x6, and
+ MOVALLR = 0x7. */
+ uint64_t reserved_59 : 1;
+ uint64_t m : 1; /**< [ 60: 60](RO/H) When set, it means multiple errors have happened. It is meaningful only when [V]=1. */
+ uint64_t v : 1; /**< [ 61: 61](R/W1C/H) When set, the command error is valid. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_rdb_its_if_err_statusr_s cn; */
+};
+typedef union bdk_gic_rdb_its_if_err_statusr bdk_gic_rdb_its_if_err_statusr_t;
+
+#define BDK_GIC_RDB_ITS_IF_ERR_STATUSR BDK_GIC_RDB_ITS_IF_ERR_STATUSR_FUNC()
+static inline uint64_t BDK_GIC_RDB_ITS_IF_ERR_STATUSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_RDB_ITS_IF_ERR_STATUSR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x801000010070ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x801000010070ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x801000010070ll;
+ __bdk_csr_fatal("GIC_RDB_ITS_IF_ERR_STATUSR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GIC_RDB_ITS_IF_ERR_STATUSR bdk_gic_rdb_its_if_err_statusr_t
+#define bustype_BDK_GIC_RDB_ITS_IF_ERR_STATUSR BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_RDB_ITS_IF_ERR_STATUSR "GIC_RDB_ITS_IF_ERR_STATUSR"
+#define device_bar_BDK_GIC_RDB_ITS_IF_ERR_STATUSR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_RDB_ITS_IF_ERR_STATUSR 0
+#define arguments_BDK_GIC_RDB_ITS_IF_ERR_STATUSR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_rib_err_adrr
+ *
+ * GIC Implementation RIB Error Address Register
+ * This register holds the address of the first RIB error message.
+ */
+union bdk_gic_rib_err_adrr
+{
+ uint64_t u;
+ struct bdk_gic_rib_err_adrr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t addr : 34; /**< [ 35: 2](RO/H) Address of the target CSR. It is meaningful only when GIC_RIB_ERR_STATUSR[V] is set. */
+ uint64_t node : 2; /**< [ 1: 0](RO/H) ID of the target node. It is meaningful only when GIC_RIB_ERR_STATUSR[V] is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t node : 2; /**< [ 1: 0](RO/H) ID of the target node. It is meaningful only when GIC_RIB_ERR_STATUSR[V] is set. */
+ uint64_t addr : 34; /**< [ 35: 2](RO/H) Address of the target CSR. It is meaningful only when GIC_RIB_ERR_STATUSR[V] is set. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_rib_err_adrr_s cn; */
+};
+typedef union bdk_gic_rib_err_adrr bdk_gic_rib_err_adrr_t;
+
+#define BDK_GIC_RIB_ERR_ADRR BDK_GIC_RIB_ERR_ADRR_FUNC()
+static inline uint64_t BDK_GIC_RIB_ERR_ADRR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_RIB_ERR_ADRR_FUNC(void)
+{
+ return 0x801000010048ll;
+}
+
+#define typedef_BDK_GIC_RIB_ERR_ADRR bdk_gic_rib_err_adrr_t
+#define bustype_BDK_GIC_RIB_ERR_ADRR BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_RIB_ERR_ADRR "GIC_RIB_ERR_ADRR"
+#define device_bar_BDK_GIC_RIB_ERR_ADRR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_RIB_ERR_ADRR 0
+#define arguments_BDK_GIC_RIB_ERR_ADRR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_rib_err_statusr
+ *
+ * GIC Implementation RIB Error Status Register
+ * This register holds the status of the first RIB error message.
+ */
+union bdk_gic_rib_err_statusr
+{
+ uint64_t u;
+ struct bdk_gic_rib_err_statusr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t v : 1; /**< [ 61: 61](R/W1C/H) When set, the command error is valid. */
+ uint64_t m : 1; /**< [ 60: 60](RO/H) When set, it means multiple errors have happened. It is meaningful only when [V]=1. */
+ uint64_t reserved_56_59 : 4;
+ uint64_t dev_id : 24; /**< [ 55: 32](RO/H) Device ID inside the RIB message. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t secure : 1; /**< [ 28: 28](RO/H) Secure bit inside the RIB message. It is meaningful only when V=1. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t int_id : 20; /**< [ 19: 0](RO/H) Interrrupt ID inside the RIB message. It is meaningful only when V=1. */
+#else /* Word 0 - Little Endian */
+ uint64_t int_id : 20; /**< [ 19: 0](RO/H) Interrrupt ID inside the RIB message. It is meaningful only when V=1. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t secure : 1; /**< [ 28: 28](RO/H) Secure bit inside the RIB message. It is meaningful only when V=1. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t dev_id : 24; /**< [ 55: 32](RO/H) Device ID inside the RIB message. */
+ uint64_t reserved_56_59 : 4;
+ uint64_t m : 1; /**< [ 60: 60](RO/H) When set, it means multiple errors have happened. It is meaningful only when [V]=1. */
+ uint64_t v : 1; /**< [ 61: 61](R/W1C/H) When set, the command error is valid. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gic_rib_err_statusr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t v : 1; /**< [ 61: 61](R/W1C/H) When set, the command error is valid. */
+ uint64_t m : 1; /**< [ 60: 60](RO/H) When set, it means multiple errors have happened. It is meaningful only when [V]=1. */
+ uint64_t reserved_53_59 : 7;
+ uint64_t dev_id : 21; /**< [ 52: 32](RO/H) Device ID inside the RIB message. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t secure : 1; /**< [ 28: 28](RO/H) Secure bit inside the RIB message. It is meaningful only when V=1. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t int_id : 20; /**< [ 19: 0](RO/H) Interrrupt ID inside the RIB message. It is meaningful only when V=1. */
+#else /* Word 0 - Little Endian */
+ uint64_t int_id : 20; /**< [ 19: 0](RO/H) Interrrupt ID inside the RIB message. It is meaningful only when V=1. */
+ uint64_t reserved_20_27 : 8;
+ uint64_t secure : 1; /**< [ 28: 28](RO/H) Secure bit inside the RIB message. It is meaningful only when V=1. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t dev_id : 21; /**< [ 52: 32](RO/H) Device ID inside the RIB message. */
+ uint64_t reserved_53_59 : 7;
+ uint64_t m : 1; /**< [ 60: 60](RO/H) When set, it means multiple errors have happened. It is meaningful only when [V]=1. */
+ uint64_t v : 1; /**< [ 61: 61](R/W1C/H) When set, the command error is valid. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gic_rib_err_statusr_s cn9; */
+};
+typedef union bdk_gic_rib_err_statusr bdk_gic_rib_err_statusr_t;
+
+#define BDK_GIC_RIB_ERR_STATUSR BDK_GIC_RIB_ERR_STATUSR_FUNC()
+static inline uint64_t BDK_GIC_RIB_ERR_STATUSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_RIB_ERR_STATUSR_FUNC(void)
+{
+ return 0x801000010040ll;
+}
+
+#define typedef_BDK_GIC_RIB_ERR_STATUSR bdk_gic_rib_err_statusr_t
+#define bustype_BDK_GIC_RIB_ERR_STATUSR BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_RIB_ERR_STATUSR "GIC_RIB_ERR_STATUSR"
+#define device_bar_BDK_GIC_RIB_ERR_STATUSR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_RIB_ERR_STATUSR 0
+#define arguments_BDK_GIC_RIB_ERR_STATUSR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_scratch
+ *
+ * GIC Scratch Register
+ * This is a scratch register.
+ */
+union bdk_gic_scratch
+{
+ uint64_t u;
+ struct bdk_gic_scratch_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) This is a scratch register. Reads and writes of this register have no side effects. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) This is a scratch register. Reads and writes of this register have no side effects. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_scratch_s cn; */
+};
+typedef union bdk_gic_scratch bdk_gic_scratch_t;
+
+#define BDK_GIC_SCRATCH BDK_GIC_SCRATCH_FUNC()
+static inline uint64_t BDK_GIC_SCRATCH_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_SCRATCH_FUNC(void)
+{
+ return 0x801000010080ll;
+}
+
+#define typedef_BDK_GIC_SCRATCH bdk_gic_scratch_t
+#define bustype_BDK_GIC_SCRATCH BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_SCRATCH "GIC_SCRATCH"
+#define device_bar_BDK_GIC_SCRATCH 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_SCRATCH 0
+#define arguments_BDK_GIC_SCRATCH -1,-1,-1,-1
+
+/**
+ * Register (NCB) gic_sync_cfg
+ *
+ * GIC SYNC Configuration Register
+ * This register configures the behavior of ITS SYNC command.
+ */
+union bdk_gic_sync_cfg
+{
+ uint64_t u;
+ struct bdk_gic_sync_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t te : 1; /**< [ 63: 63](R/W) Translation enable. If set, there is interrupt translation is enabled during
+ sync command execution. If clear, interrupt translation is disabled during sync
+ command execution. */
+ uint64_t reserved_32_62 : 31;
+ uint64_t tol : 32; /**< [ 31: 0](R/W) Time out limit. Timeout wait period for the ITS SYNC command. SYNC command will
+ wait for ACK from a GICR for at most [TOL] system-clock cycles. If ACK is not
+ received within [TOL] system-clock cycles, SYNC is timed out and considered
+ done. [TOL] = 0x0 means SYNC timeout scheme is not used and SYNC command always
+ waits for ACK. */
+#else /* Word 0 - Little Endian */
+ uint64_t tol : 32; /**< [ 31: 0](R/W) Time out limit. Timeout wait period for the ITS SYNC command. SYNC command will
+ wait for ACK from a GICR for at most [TOL] system-clock cycles. If ACK is not
+ received within [TOL] system-clock cycles, SYNC is timed out and considered
+ done. [TOL] = 0x0 means SYNC timeout scheme is not used and SYNC command always
+ waits for ACK. */
+ uint64_t reserved_32_62 : 31;
+ uint64_t te : 1; /**< [ 63: 63](R/W) Translation enable. If set, there is interrupt translation is enabled during
+ sync command execution. If clear, interrupt translation is disabled during sync
+ command execution. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gic_sync_cfg_s cn; */
+};
+typedef union bdk_gic_sync_cfg bdk_gic_sync_cfg_t;
+
+#define BDK_GIC_SYNC_CFG BDK_GIC_SYNC_CFG_FUNC()
+static inline uint64_t BDK_GIC_SYNC_CFG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GIC_SYNC_CFG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x801000010050ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x801000010050ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0x801000010050ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x801000010050ll;
+ __bdk_csr_fatal("GIC_SYNC_CFG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_GIC_SYNC_CFG bdk_gic_sync_cfg_t
+#define bustype_BDK_GIC_SYNC_CFG BDK_CSR_TYPE_NCB
+#define basename_BDK_GIC_SYNC_CFG "GIC_SYNC_CFG"
+#define device_bar_BDK_GIC_SYNC_CFG 0x0 /* PF_BAR0 */
+#define busnum_BDK_GIC_SYNC_CFG 0
+#define arguments_BDK_GIC_SYNC_CFG -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_cidr0
+ *
+ * GIC Distributor Component Identification Register 0
+ */
+union bdk_gicd_cidr0
+{
+ uint32_t u;
+ struct bdk_gicd_cidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_cidr0_s cn; */
+};
+typedef union bdk_gicd_cidr0 bdk_gicd_cidr0_t;
+
+#define BDK_GICD_CIDR0 BDK_GICD_CIDR0_FUNC()
+static inline uint64_t BDK_GICD_CIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_CIDR0_FUNC(void)
+{
+ return 0x80100000fff0ll;
+}
+
+#define typedef_BDK_GICD_CIDR0 bdk_gicd_cidr0_t
+#define bustype_BDK_GICD_CIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_CIDR0 "GICD_CIDR0"
+#define device_bar_BDK_GICD_CIDR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_CIDR0 0
+#define arguments_BDK_GICD_CIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_cidr1
+ *
+ * GIC Distributor Component Identification Register 1
+ */
+union bdk_gicd_cidr1
+{
+ uint32_t u;
+ struct bdk_gicd_cidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_cidr1_s cn; */
+};
+typedef union bdk_gicd_cidr1 bdk_gicd_cidr1_t;
+
+#define BDK_GICD_CIDR1 BDK_GICD_CIDR1_FUNC()
+static inline uint64_t BDK_GICD_CIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_CIDR1_FUNC(void)
+{
+ return 0x80100000fff4ll;
+}
+
+#define typedef_BDK_GICD_CIDR1 bdk_gicd_cidr1_t
+#define bustype_BDK_GICD_CIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_CIDR1 "GICD_CIDR1"
+#define device_bar_BDK_GICD_CIDR1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_CIDR1 0
+#define arguments_BDK_GICD_CIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_cidr2
+ *
+ * GIC Distributor Component Identification Register 2
+ */
+union bdk_gicd_cidr2
+{
+ uint32_t u;
+ struct bdk_gicd_cidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_cidr2_s cn; */
+};
+typedef union bdk_gicd_cidr2 bdk_gicd_cidr2_t;
+
+#define BDK_GICD_CIDR2 BDK_GICD_CIDR2_FUNC()
+static inline uint64_t BDK_GICD_CIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_CIDR2_FUNC(void)
+{
+ return 0x80100000fff8ll;
+}
+
+#define typedef_BDK_GICD_CIDR2 bdk_gicd_cidr2_t
+#define bustype_BDK_GICD_CIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_CIDR2 "GICD_CIDR2"
+#define device_bar_BDK_GICD_CIDR2 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_CIDR2 0
+#define arguments_BDK_GICD_CIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_cidr3
+ *
+ * GIC Distributor Component Identification Register 3
+ */
+union bdk_gicd_cidr3
+{
+ uint32_t u;
+ struct bdk_gicd_cidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_cidr3_s cn; */
+};
+typedef union bdk_gicd_cidr3 bdk_gicd_cidr3_t;
+
+#define BDK_GICD_CIDR3 BDK_GICD_CIDR3_FUNC()
+static inline uint64_t BDK_GICD_CIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_CIDR3_FUNC(void)
+{
+ return 0x80100000fffcll;
+}
+
+#define typedef_BDK_GICD_CIDR3 bdk_gicd_cidr3_t
+#define bustype_BDK_GICD_CIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_CIDR3 "GICD_CIDR3"
+#define device_bar_BDK_GICD_CIDR3 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_CIDR3 0
+#define arguments_BDK_GICD_CIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_clrspi_nsr
+ *
+ * GIC Distributor Clear SPI Pending Register
+ */
+union bdk_gicd_clrspi_nsr
+{
+ uint32_t u;
+ struct bdk_gicd_clrspi_nsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t spi_id : 10; /**< [ 9: 0](WO) Clear an SPI pending state (write-only). If the SPI is not pending, then the write has no
+ effect.
+
+ If the SPI ID is invalid, then the write has no effect.
+
+ If the register is written using a nonsecure access and the value specifies a secure SPI
+ and the value of the corresponding GICD_NSACR() register is less than 0x2 (i.e. does not
+ permit nonsecure accesses to clear the interrupt pending state), the write has no effect. */
+#else /* Word 0 - Little Endian */
+ uint32_t spi_id : 10; /**< [ 9: 0](WO) Clear an SPI pending state (write-only). If the SPI is not pending, then the write has no
+ effect.
+
+ If the SPI ID is invalid, then the write has no effect.
+
+ If the register is written using a nonsecure access and the value specifies a secure SPI
+ and the value of the corresponding GICD_NSACR() register is less than 0x2 (i.e. does not
+ permit nonsecure accesses to clear the interrupt pending state), the write has no effect. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_clrspi_nsr_s cn; */
+};
+typedef union bdk_gicd_clrspi_nsr bdk_gicd_clrspi_nsr_t;
+
+#define BDK_GICD_CLRSPI_NSR BDK_GICD_CLRSPI_NSR_FUNC()
+static inline uint64_t BDK_GICD_CLRSPI_NSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_CLRSPI_NSR_FUNC(void)
+{
+ return 0x801000000048ll;
+}
+
+#define typedef_BDK_GICD_CLRSPI_NSR bdk_gicd_clrspi_nsr_t
+#define bustype_BDK_GICD_CLRSPI_NSR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_CLRSPI_NSR "GICD_CLRSPI_NSR"
+#define device_bar_BDK_GICD_CLRSPI_NSR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_CLRSPI_NSR 0
+#define arguments_BDK_GICD_CLRSPI_NSR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_clrspi_sr
+ *
+ * GIC Distributor Clear Secure SPI Pending Register
+ */
+union bdk_gicd_clrspi_sr
+{
+ uint32_t u;
+ struct bdk_gicd_clrspi_sr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t spi_id : 10; /**< [ 9: 0](SWO) Clear an SPI pending state (write-only). If the SPI is not pending, then the write has no
+ effect.
+
+ If the SPI ID is invalid, then the write has no effect.
+
+ If the register is written using a nonsecure access, the write has no effect. */
+#else /* Word 0 - Little Endian */
+ uint32_t spi_id : 10; /**< [ 9: 0](SWO) Clear an SPI pending state (write-only). If the SPI is not pending, then the write has no
+ effect.
+
+ If the SPI ID is invalid, then the write has no effect.
+
+ If the register is written using a nonsecure access, the write has no effect. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_clrspi_sr_s cn; */
+};
+typedef union bdk_gicd_clrspi_sr bdk_gicd_clrspi_sr_t;
+
+#define BDK_GICD_CLRSPI_SR BDK_GICD_CLRSPI_SR_FUNC()
+static inline uint64_t BDK_GICD_CLRSPI_SR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_CLRSPI_SR_FUNC(void)
+{
+ return 0x801000000058ll;
+}
+
+#define typedef_BDK_GICD_CLRSPI_SR bdk_gicd_clrspi_sr_t
+#define bustype_BDK_GICD_CLRSPI_SR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_CLRSPI_SR "GICD_CLRSPI_SR"
+#define device_bar_BDK_GICD_CLRSPI_SR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_CLRSPI_SR 0
+#define arguments_BDK_GICD_CLRSPI_SR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_icactiver#
+ *
+ * GIC Distributor Interrupt Clear-Active Registers
+ * Each bit in this register provides a clear-active bit for each SPI supported by the GIC.
+ * Writing one to a clear-active bit clears the active status of the corresponding SPI.
+ */
+union bdk_gicd_icactiverx
+{
+ uint32_t u;
+ struct bdk_gicd_icactiverx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not active. If read as one, the SPI is in active state.
+
+ Clear-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A clear-active bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not active. If read as one, the SPI is in active state.
+
+ Clear-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A clear-active bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicd_icactiverx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C/H) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not active. If read as one, the SPI is in active state.
+
+ Clear-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A clear-active bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C/H) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not active. If read as one, the SPI is in active state.
+
+ Clear-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A clear-active bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gicd_icactiverx_cn9 cn81xx; */
+ /* struct bdk_gicd_icactiverx_s cn88xx; */
+ /* struct bdk_gicd_icactiverx_cn9 cn83xx; */
+};
+typedef union bdk_gicd_icactiverx bdk_gicd_icactiverx_t;
+
+static inline uint64_t BDK_GICD_ICACTIVERX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_ICACTIVERX(unsigned long a)
+{
+ if ((a>=1)&&(a<=4))
+ return 0x801000000380ll + 4ll * ((a) & 0x7);
+ __bdk_csr_fatal("GICD_ICACTIVERX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_ICACTIVERX(a) bdk_gicd_icactiverx_t
+#define bustype_BDK_GICD_ICACTIVERX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_ICACTIVERX(a) "GICD_ICACTIVERX"
+#define device_bar_BDK_GICD_ICACTIVERX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_ICACTIVERX(a) (a)
+#define arguments_BDK_GICD_ICACTIVERX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_icenabler#
+ *
+ * GIC Distributor Interrupt Clear-Enable Registers
+ * Each bit in GICD_ICENABLER() provides a clear-enable bit for each SPI supported by the GIC.
+ * Writing one to a clear-enable bit disables forwarding of the corresponding SPI from the
+ * distributor to the CPU interfaces. Reading a bit identifies whether the SPI is enabled.
+ */
+union bdk_gicd_icenablerx
+{
+ uint32_t u;
+ struct bdk_gicd_icenablerx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SPI for SPI IDs in the range 159..32. Upon reading, if a bit is
+ zero, then the SPI is not enabled to be forwarded to the CPU interface. Upon reading, if a
+ bit is one, the SPI is enabled to be forwarded to the CPU interface. Clear-enable bits
+ corresponding to secure interrupts (either group 0 or group 1) may only be set by secure
+ accesses.
+
+ Writes to the register cannot be considered complete until the effects of the write are
+ visible throughout the affinity hierarchy. To ensure that an enable has been cleared,
+ software must write to this register with bits set to clear the required enables. Software
+ must then poll GICD_(S)CTLR[RWP] (register writes pending) until it has the value zero. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SPI for SPI IDs in the range 159..32. Upon reading, if a bit is
+ zero, then the SPI is not enabled to be forwarded to the CPU interface. Upon reading, if a
+ bit is one, the SPI is enabled to be forwarded to the CPU interface. Clear-enable bits
+ corresponding to secure interrupts (either group 0 or group 1) may only be set by secure
+ accesses.
+
+ Writes to the register cannot be considered complete until the effects of the write are
+ visible throughout the affinity hierarchy. To ensure that an enable has been cleared,
+ software must write to this register with bits set to clear the required enables. Software
+ must then poll GICD_(S)CTLR[RWP] (register writes pending) until it has the value zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_icenablerx_s cn; */
+};
+typedef union bdk_gicd_icenablerx bdk_gicd_icenablerx_t;
+
+static inline uint64_t BDK_GICD_ICENABLERX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_ICENABLERX(unsigned long a)
+{
+ if ((a>=1)&&(a<=4))
+ return 0x801000000180ll + 4ll * ((a) & 0x7);
+ __bdk_csr_fatal("GICD_ICENABLERX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_ICENABLERX(a) bdk_gicd_icenablerx_t
+#define bustype_BDK_GICD_ICENABLERX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_ICENABLERX(a) "GICD_ICENABLERX"
+#define device_bar_BDK_GICD_ICENABLERX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_ICENABLERX(a) (a)
+#define arguments_BDK_GICD_ICENABLERX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_icfgr#
+ *
+ * GIC Distributor SPI Configuration Registers
+ */
+union bdk_gicd_icfgrx
+{
+ uint32_t u;
+ struct bdk_gicd_icfgrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W) Two bits per SPI. Defines whether an SPI is level-sensitive or edge-triggered.
+ Bit[1] is zero, the SPI is level-sensitive.
+ Bit[1] is one, the SPI is edge-triggered.
+ Bit[0] Reserved.
+
+ If SPI is a secure interrupt, then its corresponding field is RAZ/WI to nonsecure
+ accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W) Two bits per SPI. Defines whether an SPI is level-sensitive or edge-triggered.
+ Bit[1] is zero, the SPI is level-sensitive.
+ Bit[1] is one, the SPI is edge-triggered.
+ Bit[0] Reserved.
+
+ If SPI is a secure interrupt, then its corresponding field is RAZ/WI to nonsecure
+ accesses. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_icfgrx_s cn; */
+};
+typedef union bdk_gicd_icfgrx bdk_gicd_icfgrx_t;
+
+static inline uint64_t BDK_GICD_ICFGRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_ICFGRX(unsigned long a)
+{
+ if ((a>=2)&&(a<=9))
+ return 0x801000000c00ll + 4ll * ((a) & 0xf);
+ __bdk_csr_fatal("GICD_ICFGRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_ICFGRX(a) bdk_gicd_icfgrx_t
+#define bustype_BDK_GICD_ICFGRX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_ICFGRX(a) "GICD_ICFGRX"
+#define device_bar_BDK_GICD_ICFGRX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_ICFGRX(a) (a)
+#define arguments_BDK_GICD_ICFGRX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_icpendr#
+ *
+ * GIC Distributor Interrupt Clear-Pending Registers
+ * Each bit in GICD_ICPENDR() provides a clear-pending bit for each SPI supported by the GIC.
+ * Writing one to a clear-pending bit clears the pending status of the corresponding SPI.
+ */
+union bdk_gicd_icpendrx
+{
+ uint32_t u;
+ struct bdk_gicd_icpendrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not pending. If read as one, the SPI is in pending state.
+
+ Clear-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A clear-pending bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not pending. If read as one, the SPI is in pending state.
+
+ Clear-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A clear-pending bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicd_icpendrx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C/H) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not pending. If read as one, the SPI is in pending state.
+
+ Clear-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A clear-pending bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C/H) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not pending. If read as one, the SPI is in pending state.
+
+ Clear-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A clear-pending bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gicd_icpendrx_cn9 cn81xx; */
+ /* struct bdk_gicd_icpendrx_s cn88xx; */
+ /* struct bdk_gicd_icpendrx_cn9 cn83xx; */
+};
+typedef union bdk_gicd_icpendrx bdk_gicd_icpendrx_t;
+
+static inline uint64_t BDK_GICD_ICPENDRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_ICPENDRX(unsigned long a)
+{
+ if ((a>=1)&&(a<=4))
+ return 0x801000000280ll + 4ll * ((a) & 0x7);
+ __bdk_csr_fatal("GICD_ICPENDRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_ICPENDRX(a) bdk_gicd_icpendrx_t
+#define bustype_BDK_GICD_ICPENDRX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_ICPENDRX(a) "GICD_ICPENDRX"
+#define device_bar_BDK_GICD_ICPENDRX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_ICPENDRX(a) (a)
+#define arguments_BDK_GICD_ICPENDRX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_igroupr#
+ *
+ * GIC Distributor Secure Interrupt Group Registers
+ * The bit in this register for a particular SPI is concatenated with the corresponding
+ * bit for that SPI in GICD_IGRPMODR() to form a two-bit field that defines the
+ * interrupt group (G0S, G1S, G1NS) for that SPI.
+ */
+union bdk_gicd_igrouprx
+{
+ uint32_t u;
+ struct bdk_gicd_igrouprx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](SR/W) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If zero, then the SPI is
+ secure. If one, the SPI is nonsecure. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](SR/W) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If zero, then the SPI is
+ secure. If one, the SPI is nonsecure. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_igrouprx_s cn; */
+};
+typedef union bdk_gicd_igrouprx bdk_gicd_igrouprx_t;
+
+static inline uint64_t BDK_GICD_IGROUPRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_IGROUPRX(unsigned long a)
+{
+ if ((a>=1)&&(a<=4))
+ return 0x801000000080ll + 4ll * ((a) & 0x7);
+ __bdk_csr_fatal("GICD_IGROUPRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_IGROUPRX(a) bdk_gicd_igrouprx_t
+#define bustype_BDK_GICD_IGROUPRX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_IGROUPRX(a) "GICD_IGROUPRX"
+#define device_bar_BDK_GICD_IGROUPRX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_IGROUPRX(a) (a)
+#define arguments_BDK_GICD_IGROUPRX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_igrpmodr#
+ *
+ * GIC Distributor Interrupt Group Modifier Secure Registers
+ * The bit in this register for a particular SPI is concatenated with the
+ * corresponding bit for that SPI in GICD_IGROUPR() to form a two-bit field that defines
+ * the interrupt group (G0S, G1S, G1NS) for that SPI.
+ */
+union bdk_gicd_igrpmodrx
+{
+ uint32_t u;
+ struct bdk_gicd_igrpmodrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](SR/W) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If zero, then
+ the SPI group is not modified. If one, then the SPI group is modified from group
+ 0 to secure group 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](SR/W) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If zero, then
+ the SPI group is not modified. If one, then the SPI group is modified from group
+ 0 to secure group 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_igrpmodrx_s cn; */
+};
+typedef union bdk_gicd_igrpmodrx bdk_gicd_igrpmodrx_t;
+
+static inline uint64_t BDK_GICD_IGRPMODRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_IGRPMODRX(unsigned long a)
+{
+ if ((a>=1)&&(a<=4))
+ return 0x801000000d00ll + 4ll * ((a) & 0x7);
+ __bdk_csr_fatal("GICD_IGRPMODRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_IGRPMODRX(a) bdk_gicd_igrpmodrx_t
+#define bustype_BDK_GICD_IGRPMODRX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_IGRPMODRX(a) "GICD_IGRPMODRX"
+#define device_bar_BDK_GICD_IGRPMODRX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_IGRPMODRX(a) (a)
+#define arguments_BDK_GICD_IGRPMODRX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_iidr
+ *
+ * GIC Distributor Implementation Identification Register
+ * This 32-bit register is read-only and specifies the version and features supported by the
+ * distributor.
+ */
+union bdk_gicd_iidr
+{
+ uint32_t u;
+ struct bdk_gicd_iidr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t productid : 8; /**< [ 31: 24](RO) An implementation defined product number for the device.
+ In CNXXXX, enumerated by PCC_PROD_E. */
+ uint32_t reserved_20_23 : 4;
+ uint32_t variant : 4; /**< [ 19: 16](RO) Indicates the major revision or variant of the product.
+ On CNXXXX, this is the major revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t revision : 4; /**< [ 15: 12](RO) Indicates the minor revision of the product.
+ On CNXXXX, this is the minor revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t implementer : 12; /**< [ 11: 0](RO) Indicates the implementer:
+ 0x34C = Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t implementer : 12; /**< [ 11: 0](RO) Indicates the implementer:
+ 0x34C = Cavium. */
+ uint32_t revision : 4; /**< [ 15: 12](RO) Indicates the minor revision of the product.
+ On CNXXXX, this is the minor revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t variant : 4; /**< [ 19: 16](RO) Indicates the major revision or variant of the product.
+ On CNXXXX, this is the major revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t reserved_20_23 : 4;
+ uint32_t productid : 8; /**< [ 31: 24](RO) An implementation defined product number for the device.
+ In CNXXXX, enumerated by PCC_PROD_E. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_iidr_s cn; */
+};
+typedef union bdk_gicd_iidr bdk_gicd_iidr_t;
+
+#define BDK_GICD_IIDR BDK_GICD_IIDR_FUNC()
+static inline uint64_t BDK_GICD_IIDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_IIDR_FUNC(void)
+{
+ return 0x801000000008ll;
+}
+
+#define typedef_BDK_GICD_IIDR bdk_gicd_iidr_t
+#define bustype_BDK_GICD_IIDR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_IIDR "GICD_IIDR"
+#define device_bar_BDK_GICD_IIDR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_IIDR 0
+#define arguments_BDK_GICD_IIDR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_ipriorityr#
+ *
+ * GIC Distributor Interrupt Priority Registers
+ * Each byte in this register provides a priority field for each SPI supported by the GIC.
+ */
+union bdk_gicd_ipriorityrx
+{
+ uint32_t u;
+ struct bdk_gicd_ipriorityrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W) Each byte corresponds to an SPI for SPI IDs in the range 159..32.
+
+ Priority fields corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses, or when GICD_(S)CTLR[DS] is one.
+
+ Byte accesses are permitted to these registers.
+
+ A priority field for a secure SPI is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W) Each byte corresponds to an SPI for SPI IDs in the range 159..32.
+
+ Priority fields corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses, or when GICD_(S)CTLR[DS] is one.
+
+ Byte accesses are permitted to these registers.
+
+ A priority field for a secure SPI is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_ipriorityrx_s cn; */
+};
+typedef union bdk_gicd_ipriorityrx bdk_gicd_ipriorityrx_t;
+
+static inline uint64_t BDK_GICD_IPRIORITYRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_IPRIORITYRX(unsigned long a)
+{
+ if ((a>=8)&&(a<=39))
+ return 0x801000000400ll + 4ll * ((a) & 0x3f);
+ __bdk_csr_fatal("GICD_IPRIORITYRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_IPRIORITYRX(a) bdk_gicd_ipriorityrx_t
+#define bustype_BDK_GICD_IPRIORITYRX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_IPRIORITYRX(a) "GICD_IPRIORITYRX"
+#define device_bar_BDK_GICD_IPRIORITYRX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_IPRIORITYRX(a) (a)
+#define arguments_BDK_GICD_IPRIORITYRX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gicd_irouter#
+ *
+ * GIC Distributor SPI Routing Registers
+ * These registers provide the routing information for the security state of the associated SPIs.
+ * Up to 64 bits of state to control the routing.
+ */
+union bdk_gicd_irouterx
+{
+ uint64_t u;
+ struct bdk_gicd_irouterx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t a3 : 8; /**< [ 39: 32](RO) Specifies the affinity 3 level for the SPI. In CNXXXX implementation, 0x0. */
+ uint64_t irm : 1; /**< [ 31: 31](R/W) Specifies the interrupt routing mode for the SPI.
+ 0 = Route to the processor specified by the affinity levels A3.A2.A1.A0.
+ 1 = Route to any one processor in the system (one-of-N). */
+ uint64_t reserved_24_30 : 7;
+ uint64_t a2 : 8; /**< [ 23: 16](R/W) Specifies the affinity 2 level for the SPI. */
+ uint64_t a1 : 8; /**< [ 15: 8](R/W) Specifies the affinity 1 level for the SPI. */
+ uint64_t a0 : 8; /**< [ 7: 0](R/W) Specifies the affinity 0 level for the SPI. */
+#else /* Word 0 - Little Endian */
+ uint64_t a0 : 8; /**< [ 7: 0](R/W) Specifies the affinity 0 level for the SPI. */
+ uint64_t a1 : 8; /**< [ 15: 8](R/W) Specifies the affinity 1 level for the SPI. */
+ uint64_t a2 : 8; /**< [ 23: 16](R/W) Specifies the affinity 2 level for the SPI. */
+ uint64_t reserved_24_30 : 7;
+ uint64_t irm : 1; /**< [ 31: 31](R/W) Specifies the interrupt routing mode for the SPI.
+ 0 = Route to the processor specified by the affinity levels A3.A2.A1.A0.
+ 1 = Route to any one processor in the system (one-of-N). */
+ uint64_t a3 : 8; /**< [ 39: 32](RO) Specifies the affinity 3 level for the SPI. In CNXXXX implementation, 0x0. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_irouterx_s cn; */
+};
+typedef union bdk_gicd_irouterx bdk_gicd_irouterx_t;
+
+static inline uint64_t BDK_GICD_IROUTERX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_IROUTERX(unsigned long a)
+{
+ if ((a>=32)&&(a<=159))
+ return 0x801000006000ll + 8ll * ((a) & 0xff);
+ __bdk_csr_fatal("GICD_IROUTERX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_IROUTERX(a) bdk_gicd_irouterx_t
+#define bustype_BDK_GICD_IROUTERX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GICD_IROUTERX(a) "GICD_IROUTERX"
+#define device_bar_BDK_GICD_IROUTERX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_IROUTERX(a) (a)
+#define arguments_BDK_GICD_IROUTERX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_isactiver#
+ *
+ * GIC Distributor Interrupt Set-Active Registers
+ * Each bit in this register provides a set-active bit for each SPI supported by the GIC.
+ * Writing one to a set-active bit sets the status of the corresponding SPI to active.
+ */
+union bdk_gicd_isactiverx
+{
+ uint32_t u;
+ struct bdk_gicd_isactiverx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not active. If read as one, the SPI is in active state.
+
+ Set-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A set-active bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not active. If read as one, the SPI is in active state.
+
+ Set-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A set-active bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicd_isactiverx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S/H) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not active. If read as one, the SPI is in active state.
+
+ Set-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A set-active bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S/H) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not active. If read as one, the SPI is in active state.
+
+ Set-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A set-active bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gicd_isactiverx_cn9 cn81xx; */
+ /* struct bdk_gicd_isactiverx_s cn88xx; */
+ /* struct bdk_gicd_isactiverx_cn9 cn83xx; */
+};
+typedef union bdk_gicd_isactiverx bdk_gicd_isactiverx_t;
+
+static inline uint64_t BDK_GICD_ISACTIVERX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_ISACTIVERX(unsigned long a)
+{
+ if ((a>=1)&&(a<=4))
+ return 0x801000000300ll + 4ll * ((a) & 0x7);
+ __bdk_csr_fatal("GICD_ISACTIVERX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_ISACTIVERX(a) bdk_gicd_isactiverx_t
+#define bustype_BDK_GICD_ISACTIVERX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_ISACTIVERX(a) "GICD_ISACTIVERX"
+#define device_bar_BDK_GICD_ISACTIVERX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_ISACTIVERX(a) (a)
+#define arguments_BDK_GICD_ISACTIVERX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_isenabler#
+ *
+ * GIC Distributor Interrupt Set-Enable Registers
+ * Each bit in GICD_ISENABLER() provides a set-enable bit for each SPI supported by the GIC.
+ * Writing one to a set-enable bit enables forwarding of the corresponding SPI from the
+ * distributor to the CPU interfaces.
+ */
+union bdk_gicd_isenablerx
+{
+ uint32_t u;
+ struct bdk_gicd_isenablerx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If zero, then
+ the SPI is not enabled to be forwarded to the CPU interface. If one, the SPI is
+ enabled to be forwarded to the CPU interface. Set-enable bits corresponding to
+ secure interrupts (either group 0 or group 1) may only be set by secure
+ accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If zero, then
+ the SPI is not enabled to be forwarded to the CPU interface. If one, the SPI is
+ enabled to be forwarded to the CPU interface. Set-enable bits corresponding to
+ secure interrupts (either group 0 or group 1) may only be set by secure
+ accesses. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_isenablerx_s cn; */
+};
+typedef union bdk_gicd_isenablerx bdk_gicd_isenablerx_t;
+
+static inline uint64_t BDK_GICD_ISENABLERX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_ISENABLERX(unsigned long a)
+{
+ if ((a>=1)&&(a<=4))
+ return 0x801000000100ll + 4ll * ((a) & 0x7);
+ __bdk_csr_fatal("GICD_ISENABLERX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_ISENABLERX(a) bdk_gicd_isenablerx_t
+#define bustype_BDK_GICD_ISENABLERX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_ISENABLERX(a) "GICD_ISENABLERX"
+#define device_bar_BDK_GICD_ISENABLERX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_ISENABLERX(a) (a)
+#define arguments_BDK_GICD_ISENABLERX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_ispendr#
+ *
+ * GIC Distributor Interrupt Set-Pending Registers
+ * Each bit in GICD_ISPENDR() provides a set-pending bit for each SPI supported by the GIC.
+ * Writing one to a set-pending bit sets the status of the corresponding SPI to pending.
+ */
+union bdk_gicd_ispendrx
+{
+ uint32_t u;
+ struct bdk_gicd_ispendrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not pending. If read as one, the SPI is in pending state.
+
+ Set-pending bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A set-pending bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not pending. If read as one, the SPI is in pending state.
+
+ Set-pending bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A set-pending bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicd_ispendrx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S/H) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not pending. If read as one, the SPI is in pending state.
+
+ Set-pending bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A set-pending bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S/H) Each bit corresponds to an SPI for SPI IDs in the range 159..32. If read as zero, then the
+ SPI
+ is not pending. If read as one, the SPI is in pending state.
+
+ Set-pending bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A set-pending bit for a secure SPI is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gicd_ispendrx_cn9 cn81xx; */
+ /* struct bdk_gicd_ispendrx_s cn88xx; */
+ /* struct bdk_gicd_ispendrx_cn9 cn83xx; */
+};
+typedef union bdk_gicd_ispendrx bdk_gicd_ispendrx_t;
+
+static inline uint64_t BDK_GICD_ISPENDRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_ISPENDRX(unsigned long a)
+{
+ if ((a>=1)&&(a<=4))
+ return 0x801000000200ll + 4ll * ((a) & 0x7);
+ __bdk_csr_fatal("GICD_ISPENDRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_ISPENDRX(a) bdk_gicd_ispendrx_t
+#define bustype_BDK_GICD_ISPENDRX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_ISPENDRX(a) "GICD_ISPENDRX"
+#define device_bar_BDK_GICD_ISPENDRX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_ISPENDRX(a) (a)
+#define arguments_BDK_GICD_ISPENDRX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_nsacr#
+ *
+ * GIC Distributor Nonsecure Access Control Secure Registers
+ */
+union bdk_gicd_nsacrx
+{
+ uint32_t u;
+ struct bdk_gicd_nsacrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](SR/W) Two bits per SPI. Defines whether nonsecure access is permitted to secure SPI resources.
+ 0x0 = No nonsecure access is permitted to fields associated with the corresponding SPI.
+ 0x1 = Nonsecure read and write access is permitted to fields associated with the SPI in
+ GICD_ISPENDR(). A nonsecure write access to GICD_SETSPI_NSR is permitted to
+ set the pending state of the corresponding SPI.
+ 0x2 = Adds nonsecure read and write access permissions to fields associated with the
+ corresponding SPI in GICD_ICPENDR(). A nonsecure write access to
+ GICD_CLRSPI_NSR is permitted to clear the pending state of the corresponding SPI. Also
+ adds nonsecure read access permission to fields associated with the corresponding SPI in
+ the GICD_ISACTIVER() and GICD_ICACTIVER().
+ 0x3 = Adds nonsecure read and write access permission to fields associated with the
+ corresponding SPI in GICD_IROUTER().
+
+ This register is RAZ/WI for nonsecure accesses.
+
+ When GICD_(S)CTLR[DS] is one, this register is RAZ/WI. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](SR/W) Two bits per SPI. Defines whether nonsecure access is permitted to secure SPI resources.
+ 0x0 = No nonsecure access is permitted to fields associated with the corresponding SPI.
+ 0x1 = Nonsecure read and write access is permitted to fields associated with the SPI in
+ GICD_ISPENDR(). A nonsecure write access to GICD_SETSPI_NSR is permitted to
+ set the pending state of the corresponding SPI.
+ 0x2 = Adds nonsecure read and write access permissions to fields associated with the
+ corresponding SPI in GICD_ICPENDR(). A nonsecure write access to
+ GICD_CLRSPI_NSR is permitted to clear the pending state of the corresponding SPI. Also
+ adds nonsecure read access permission to fields associated with the corresponding SPI in
+ the GICD_ISACTIVER() and GICD_ICACTIVER().
+ 0x3 = Adds nonsecure read and write access permission to fields associated with the
+ corresponding SPI in GICD_IROUTER().
+
+ This register is RAZ/WI for nonsecure accesses.
+
+ When GICD_(S)CTLR[DS] is one, this register is RAZ/WI. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_nsacrx_s cn; */
+};
+typedef union bdk_gicd_nsacrx bdk_gicd_nsacrx_t;
+
+static inline uint64_t BDK_GICD_NSACRX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_NSACRX(unsigned long a)
+{
+ if ((a>=2)&&(a<=9))
+ return 0x801000000e00ll + 4ll * ((a) & 0xf);
+ __bdk_csr_fatal("GICD_NSACRX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICD_NSACRX(a) bdk_gicd_nsacrx_t
+#define bustype_BDK_GICD_NSACRX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_NSACRX(a) "GICD_NSACRX"
+#define device_bar_BDK_GICD_NSACRX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_NSACRX(a) (a)
+#define arguments_BDK_GICD_NSACRX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_pidr0
+ *
+ * GIC Distributor Peripheral Identification Register 0
+ */
+union bdk_gicd_pidr0
+{
+ uint32_t u;
+ struct bdk_gicd_pidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GICD. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GICD. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_pidr0_s cn; */
+};
+typedef union bdk_gicd_pidr0 bdk_gicd_pidr0_t;
+
+#define BDK_GICD_PIDR0 BDK_GICD_PIDR0_FUNC()
+static inline uint64_t BDK_GICD_PIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_PIDR0_FUNC(void)
+{
+ return 0x80100000ffe0ll;
+}
+
+#define typedef_BDK_GICD_PIDR0 bdk_gicd_pidr0_t
+#define bustype_BDK_GICD_PIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_PIDR0 "GICD_PIDR0"
+#define device_bar_BDK_GICD_PIDR0 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_PIDR0 0
+#define arguments_BDK_GICD_PIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_pidr1
+ *
+ * GIC Distributor Peripheral Identification Register 1
+ */
+union bdk_gicd_pidr1
+{
+ uint32_t u;
+ struct bdk_gicd_pidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_pidr1_s cn; */
+};
+typedef union bdk_gicd_pidr1 bdk_gicd_pidr1_t;
+
+#define BDK_GICD_PIDR1 BDK_GICD_PIDR1_FUNC()
+static inline uint64_t BDK_GICD_PIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_PIDR1_FUNC(void)
+{
+ return 0x80100000ffe4ll;
+}
+
+#define typedef_BDK_GICD_PIDR1 bdk_gicd_pidr1_t
+#define bustype_BDK_GICD_PIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_PIDR1 "GICD_PIDR1"
+#define device_bar_BDK_GICD_PIDR1 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_PIDR1 0
+#define arguments_BDK_GICD_PIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_pidr2
+ *
+ * GIC Distributor Peripheral Identification Register 2
+ */
+union bdk_gicd_pidr2
+{
+ uint32_t u;
+ struct bdk_gicd_pidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t archrev : 4; /**< [ 7: 4](RO) Architectural revision:
+ 0x1 = GICv1.
+ 0x2 = GICV2.
+ 0x3 = GICv3.
+ 0x4 = GICv4.
+ 0x5-0xF = Reserved. */
+ uint32_t usesjepcode : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t jepid : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+#else /* Word 0 - Little Endian */
+ uint32_t jepid : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+ uint32_t usesjepcode : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t archrev : 4; /**< [ 7: 4](RO) Architectural revision:
+ 0x1 = GICv1.
+ 0x2 = GICV2.
+ 0x3 = GICv3.
+ 0x4 = GICv4.
+ 0x5-0xF = Reserved. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_pidr2_s cn; */
+};
+typedef union bdk_gicd_pidr2 bdk_gicd_pidr2_t;
+
+#define BDK_GICD_PIDR2 BDK_GICD_PIDR2_FUNC()
+static inline uint64_t BDK_GICD_PIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_PIDR2_FUNC(void)
+{
+ return 0x80100000ffe8ll;
+}
+
+#define typedef_BDK_GICD_PIDR2 bdk_gicd_pidr2_t
+#define bustype_BDK_GICD_PIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_PIDR2 "GICD_PIDR2"
+#define device_bar_BDK_GICD_PIDR2 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_PIDR2 0
+#define arguments_BDK_GICD_PIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_pidr3
+ *
+ * GIC Distributor Peripheral Identification Register 3
+ */
+union bdk_gicd_pidr3
+{
+ uint32_t u;
+ struct bdk_gicd_pidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t cmod : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+#else /* Word 0 - Little Endian */
+ uint32_t cmod : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_pidr3_s cn; */
+};
+typedef union bdk_gicd_pidr3 bdk_gicd_pidr3_t;
+
+#define BDK_GICD_PIDR3 BDK_GICD_PIDR3_FUNC()
+static inline uint64_t BDK_GICD_PIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_PIDR3_FUNC(void)
+{
+ return 0x80100000ffecll;
+}
+
+#define typedef_BDK_GICD_PIDR3 bdk_gicd_pidr3_t
+#define bustype_BDK_GICD_PIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_PIDR3 "GICD_PIDR3"
+#define device_bar_BDK_GICD_PIDR3 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_PIDR3 0
+#define arguments_BDK_GICD_PIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_pidr4
+ *
+ * GIC Distributor Peripheral Identification Register 4
+ */
+union bdk_gicd_pidr4
+{
+ uint32_t u;
+ struct bdk_gicd_pidr4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cnt_4k : 4; /**< [ 7: 4](RO) This field is 0x4, indicating a 64 KB software-visible page. */
+ uint32_t continuation_code : 4; /**< [ 3: 0](RO) JEP106 continuation code, least significant nibble. Indicates Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t continuation_code : 4; /**< [ 3: 0](RO) JEP106 continuation code, least significant nibble. Indicates Cavium. */
+ uint32_t cnt_4k : 4; /**< [ 7: 4](RO) This field is 0x4, indicating a 64 KB software-visible page. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_pidr4_s cn; */
+};
+typedef union bdk_gicd_pidr4 bdk_gicd_pidr4_t;
+
+#define BDK_GICD_PIDR4 BDK_GICD_PIDR4_FUNC()
+static inline uint64_t BDK_GICD_PIDR4_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_PIDR4_FUNC(void)
+{
+ return 0x80100000ffd0ll;
+}
+
+#define typedef_BDK_GICD_PIDR4 bdk_gicd_pidr4_t
+#define bustype_BDK_GICD_PIDR4 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_PIDR4 "GICD_PIDR4"
+#define device_bar_BDK_GICD_PIDR4 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_PIDR4 0
+#define arguments_BDK_GICD_PIDR4 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_pidr5
+ *
+ * GIC Distributor Peripheral Identification Register 5
+ */
+union bdk_gicd_pidr5
+{
+ uint32_t u;
+ struct bdk_gicd_pidr5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_pidr5_s cn; */
+};
+typedef union bdk_gicd_pidr5 bdk_gicd_pidr5_t;
+
+#define BDK_GICD_PIDR5 BDK_GICD_PIDR5_FUNC()
+static inline uint64_t BDK_GICD_PIDR5_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_PIDR5_FUNC(void)
+{
+ return 0x80100000ffd4ll;
+}
+
+#define typedef_BDK_GICD_PIDR5 bdk_gicd_pidr5_t
+#define bustype_BDK_GICD_PIDR5 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_PIDR5 "GICD_PIDR5"
+#define device_bar_BDK_GICD_PIDR5 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_PIDR5 0
+#define arguments_BDK_GICD_PIDR5 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_pidr6
+ *
+ * GIC Distributor Peripheral Identification Register 6
+ */
+union bdk_gicd_pidr6
+{
+ uint32_t u;
+ struct bdk_gicd_pidr6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_pidr6_s cn; */
+};
+typedef union bdk_gicd_pidr6 bdk_gicd_pidr6_t;
+
+#define BDK_GICD_PIDR6 BDK_GICD_PIDR6_FUNC()
+static inline uint64_t BDK_GICD_PIDR6_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_PIDR6_FUNC(void)
+{
+ return 0x80100000ffd8ll;
+}
+
+#define typedef_BDK_GICD_PIDR6 bdk_gicd_pidr6_t
+#define bustype_BDK_GICD_PIDR6 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_PIDR6 "GICD_PIDR6"
+#define device_bar_BDK_GICD_PIDR6 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_PIDR6 0
+#define arguments_BDK_GICD_PIDR6 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_pidr7
+ *
+ * GIC Distributor Peripheral Identification Register 7
+ */
+union bdk_gicd_pidr7
+{
+ uint32_t u;
+ struct bdk_gicd_pidr7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_pidr7_s cn; */
+};
+typedef union bdk_gicd_pidr7 bdk_gicd_pidr7_t;
+
+#define BDK_GICD_PIDR7 BDK_GICD_PIDR7_FUNC()
+static inline uint64_t BDK_GICD_PIDR7_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_PIDR7_FUNC(void)
+{
+ return 0x80100000ffdcll;
+}
+
+#define typedef_BDK_GICD_PIDR7 bdk_gicd_pidr7_t
+#define bustype_BDK_GICD_PIDR7 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_PIDR7 "GICD_PIDR7"
+#define device_bar_BDK_GICD_PIDR7 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_PIDR7 0
+#define arguments_BDK_GICD_PIDR7 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_sctlr
+ *
+ * GIC Distributor (Secure) Control Register
+ * Controls the behavior of the distributor.
+ */
+union bdk_gicd_sctlr
+{
+ uint32_t u;
+ struct bdk_gicd_sctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rwp : 1; /**< [ 31: 31](RO/H) Register write pending.
+ Indicates whether a register write is in progress.
+ 0 = The effects of all register writes are visible to all descendants of the top-level
+ redistributor, including processors.
+ 1 = The effects of all register writes are not visible to all descendants of the top-level
+ redistributor.
+
+ Note: this field tracks completion of writes to GICD_(S)CTLR that change the state of an
+ interrupt group enable or an affinity routing setting and writes to GICD_ICENABLER() that
+ clear the enable of one or more SPIs. */
+ uint32_t reserved_7_30 : 24;
+ uint32_t ds : 1; /**< [ 6: 6](SR/W) Disable security.
+ When set, nonsecure accesses are permitted to access and modify registers that control
+ group 0 interrupts.
+ If [DS] becomes one when [ARE_SNS] is one, then ARE for the single security state is
+ RAO/WI.
+
+ When [DS] is set, all accesses to GICD_(S)CTLR access the single security state view
+ (below) and all bits are accessible.
+
+ This bit is RAO/WI if the distributor only supports a single security state (see
+ below).
+
+ Once set, [DS] may only be clear by a hardware reset. */
+ uint32_t are_ns : 1; /**< [ 5: 5](SRO) Enable affinity routing for the nonsecure state when set.
+ In CNXXXX this bit is always one as only affinity routing is supported.
+
+ Note: this bit is RAO/WI when ARE is one for the secure state. */
+ uint32_t are_sns : 1; /**< [ 4: 4](RO) Enables affinity routing for the nonsecure state.
+ This field is fixed as RAO/WI for CNXXXX for both secure and non secure state. */
+ uint32_t reserved_3 : 1;
+ uint32_t enable_g1s : 1; /**< [ 2: 2](SR/W) Enables secure group 1 interrupts.
+ 0 = Disable G1S interrupts.
+ 1 = Enable G1S interrupts. */
+ uint32_t enable_g1ns : 1; /**< [ 1: 1](R/W) S - Enables nonsecure group 1 interrupts. Behaves as defined for GICv2. This
+ enable also controls whether LPIs are forwarded to processors. When written
+ to zero, [RWP] indicates whether the effects of this enable on LPIs
+ have been made visible.
+
+ NS - This field is called ENABLE_G1A. It enables nonsecure group 1 interrupts. */
+ uint32_t enable_g0 : 1; /**< [ 0: 0](SR/W) Secure view or [DS] is set -- Enable/disable group 0 interrupts.
+ 0 = Disable G0 interrupts.
+ 1 = Enable G0 interrupts.
+
+ Nonsecure view -- RES0 for CNXXXX since [ARE_NS] is RAO. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable_g0 : 1; /**< [ 0: 0](SR/W) Secure view or [DS] is set -- Enable/disable group 0 interrupts.
+ 0 = Disable G0 interrupts.
+ 1 = Enable G0 interrupts.
+
+ Nonsecure view -- RES0 for CNXXXX since [ARE_NS] is RAO. */
+ uint32_t enable_g1ns : 1; /**< [ 1: 1](R/W) S - Enables nonsecure group 1 interrupts. Behaves as defined for GICv2. This
+ enable also controls whether LPIs are forwarded to processors. When written
+ to zero, [RWP] indicates whether the effects of this enable on LPIs
+ have been made visible.
+
+ NS - This field is called ENABLE_G1A. It enables nonsecure group 1 interrupts. */
+ uint32_t enable_g1s : 1; /**< [ 2: 2](SR/W) Enables secure group 1 interrupts.
+ 0 = Disable G1S interrupts.
+ 1 = Enable G1S interrupts. */
+ uint32_t reserved_3 : 1;
+ uint32_t are_sns : 1; /**< [ 4: 4](RO) Enables affinity routing for the nonsecure state.
+ This field is fixed as RAO/WI for CNXXXX for both secure and non secure state. */
+ uint32_t are_ns : 1; /**< [ 5: 5](SRO) Enable affinity routing for the nonsecure state when set.
+ In CNXXXX this bit is always one as only affinity routing is supported.
+
+ Note: this bit is RAO/WI when ARE is one for the secure state. */
+ uint32_t ds : 1; /**< [ 6: 6](SR/W) Disable security.
+ When set, nonsecure accesses are permitted to access and modify registers that control
+ group 0 interrupts.
+ If [DS] becomes one when [ARE_SNS] is one, then ARE for the single security state is
+ RAO/WI.
+
+ When [DS] is set, all accesses to GICD_(S)CTLR access the single security state view
+ (below) and all bits are accessible.
+
+ This bit is RAO/WI if the distributor only supports a single security state (see
+ below).
+
+ Once set, [DS] may only be clear by a hardware reset. */
+ uint32_t reserved_7_30 : 24;
+ uint32_t rwp : 1; /**< [ 31: 31](RO/H) Register write pending.
+ Indicates whether a register write is in progress.
+ 0 = The effects of all register writes are visible to all descendants of the top-level
+ redistributor, including processors.
+ 1 = The effects of all register writes are not visible to all descendants of the top-level
+ redistributor.
+
+ Note: this field tracks completion of writes to GICD_(S)CTLR that change the state of an
+ interrupt group enable or an affinity routing setting and writes to GICD_ICENABLER() that
+ clear the enable of one or more SPIs. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_sctlr_s cn; */
+};
+typedef union bdk_gicd_sctlr bdk_gicd_sctlr_t;
+
+#define BDK_GICD_SCTLR BDK_GICD_SCTLR_FUNC()
+static inline uint64_t BDK_GICD_SCTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_SCTLR_FUNC(void)
+{
+ return 0x801000000000ll;
+}
+
+#define typedef_BDK_GICD_SCTLR bdk_gicd_sctlr_t
+#define bustype_BDK_GICD_SCTLR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_SCTLR "GICD_SCTLR"
+#define device_bar_BDK_GICD_SCTLR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_SCTLR 0
+#define arguments_BDK_GICD_SCTLR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_setspi_nsr
+ *
+ * GIC Distributor Set SPI Pending Register
+ */
+union bdk_gicd_setspi_nsr
+{
+ uint32_t u;
+ struct bdk_gicd_setspi_nsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t spi_id : 10; /**< [ 9: 0](WO) Set an SPI to pending (write-only). If the SPI is already pending, then the write has no
+ effect.
+
+ If the SPI ID is invalid, then the write has no effect.
+
+ If the register is written using a nonsecure access and the value specifies a secure SPI
+ and the value of the corresponding GICD_NSACR() register is zero (i.e. does not permit
+ nonsecure accesses to set the interrupt as pending), the write has no effect. */
+#else /* Word 0 - Little Endian */
+ uint32_t spi_id : 10; /**< [ 9: 0](WO) Set an SPI to pending (write-only). If the SPI is already pending, then the write has no
+ effect.
+
+ If the SPI ID is invalid, then the write has no effect.
+
+ If the register is written using a nonsecure access and the value specifies a secure SPI
+ and the value of the corresponding GICD_NSACR() register is zero (i.e. does not permit
+ nonsecure accesses to set the interrupt as pending), the write has no effect. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_setspi_nsr_s cn; */
+};
+typedef union bdk_gicd_setspi_nsr bdk_gicd_setspi_nsr_t;
+
+#define BDK_GICD_SETSPI_NSR BDK_GICD_SETSPI_NSR_FUNC()
+static inline uint64_t BDK_GICD_SETSPI_NSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_SETSPI_NSR_FUNC(void)
+{
+ return 0x801000000040ll;
+}
+
+#define typedef_BDK_GICD_SETSPI_NSR bdk_gicd_setspi_nsr_t
+#define bustype_BDK_GICD_SETSPI_NSR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_SETSPI_NSR "GICD_SETSPI_NSR"
+#define device_bar_BDK_GICD_SETSPI_NSR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_SETSPI_NSR 0
+#define arguments_BDK_GICD_SETSPI_NSR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_setspi_sr
+ *
+ * GIC Distributor Set Secure SPI Pending Register
+ */
+union bdk_gicd_setspi_sr
+{
+ uint32_t u;
+ struct bdk_gicd_setspi_sr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t spi_id : 10; /**< [ 9: 0](SWO) Set an SPI to pending (write-only). If the SPI is already pending, then the write has no
+ effect.
+
+ If the SPI ID is invalid, then the write has no effect.
+
+ If the register is written using a nonsecure access, the write has no effect. */
+#else /* Word 0 - Little Endian */
+ uint32_t spi_id : 10; /**< [ 9: 0](SWO) Set an SPI to pending (write-only). If the SPI is already pending, then the write has no
+ effect.
+
+ If the SPI ID is invalid, then the write has no effect.
+
+ If the register is written using a nonsecure access, the write has no effect. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_setspi_sr_s cn; */
+};
+typedef union bdk_gicd_setspi_sr bdk_gicd_setspi_sr_t;
+
+#define BDK_GICD_SETSPI_SR BDK_GICD_SETSPI_SR_FUNC()
+static inline uint64_t BDK_GICD_SETSPI_SR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_SETSPI_SR_FUNC(void)
+{
+ return 0x801000000050ll;
+}
+
+#define typedef_BDK_GICD_SETSPI_SR bdk_gicd_setspi_sr_t
+#define bustype_BDK_GICD_SETSPI_SR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_SETSPI_SR "GICD_SETSPI_SR"
+#define device_bar_BDK_GICD_SETSPI_SR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_SETSPI_SR 0
+#define arguments_BDK_GICD_SETSPI_SR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_sstatusr
+ *
+ * GIC Distributor (Secure) Status Register
+ */
+union bdk_gicd_sstatusr
+{
+ uint32_t u;
+ struct bdk_gicd_sstatusr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t wrod : 1; /**< [ 3: 3](R/W) This bit is set if a write to a read-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t rwod : 1; /**< [ 2: 2](R/W) This bit is set if a read to a write-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t wrd : 1; /**< [ 1: 1](R/W) This bit is set if a write to a reserved location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t rrd : 1; /**< [ 0: 0](R/W) This bit is set if a read to a reserved location is detected. Software must write a one to
+ this bit to clear it. */
+#else /* Word 0 - Little Endian */
+ uint32_t rrd : 1; /**< [ 0: 0](R/W) This bit is set if a read to a reserved location is detected. Software must write a one to
+ this bit to clear it. */
+ uint32_t wrd : 1; /**< [ 1: 1](R/W) This bit is set if a write to a reserved location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t rwod : 1; /**< [ 2: 2](R/W) This bit is set if a read to a write-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t wrod : 1; /**< [ 3: 3](R/W) This bit is set if a write to a read-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicd_sstatusr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t wrod : 1; /**< [ 3: 3](R/W/H) This bit is set if a write to a read-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t rwod : 1; /**< [ 2: 2](R/W/H) This bit is set if a read to a write-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t wrd : 1; /**< [ 1: 1](R/W/H) This bit is set if a write to a reserved location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t rrd : 1; /**< [ 0: 0](R/W/H) This bit is set if a read to a reserved location is detected. Software must write a one to
+ this bit to clear it. */
+#else /* Word 0 - Little Endian */
+ uint32_t rrd : 1; /**< [ 0: 0](R/W/H) This bit is set if a read to a reserved location is detected. Software must write a one to
+ this bit to clear it. */
+ uint32_t wrd : 1; /**< [ 1: 1](R/W/H) This bit is set if a write to a reserved location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t rwod : 1; /**< [ 2: 2](R/W/H) This bit is set if a read to a write-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t wrod : 1; /**< [ 3: 3](R/W/H) This bit is set if a write to a read-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gicd_sstatusr_cn9 cn81xx; */
+ /* struct bdk_gicd_sstatusr_s cn88xx; */
+ /* struct bdk_gicd_sstatusr_cn9 cn83xx; */
+};
+typedef union bdk_gicd_sstatusr bdk_gicd_sstatusr_t;
+
+#define BDK_GICD_SSTATUSR BDK_GICD_SSTATUSR_FUNC()
+static inline uint64_t BDK_GICD_SSTATUSR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_SSTATUSR_FUNC(void)
+{
+ return 0x801000000010ll;
+}
+
+#define typedef_BDK_GICD_SSTATUSR bdk_gicd_sstatusr_t
+#define bustype_BDK_GICD_SSTATUSR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_SSTATUSR "GICD_SSTATUSR"
+#define device_bar_BDK_GICD_SSTATUSR 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_SSTATUSR 0
+#define arguments_BDK_GICD_SSTATUSR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicd_typer
+ *
+ * GIC Distributor Type Register
+ * Describes features supported by the distributor.
+ */
+union bdk_gicd_typer
+{
+ uint32_t u;
+ struct bdk_gicd_typer_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t a3v : 1; /**< [ 24: 24](RO) Indicates whether the distributor supports nonzero values of affinity 3. */
+ uint32_t idbits : 5; /**< [ 23: 19](RO) The number of interrupt identifier bits supported by the GIC stream protocol interface minus one. */
+ uint32_t dvis : 1; /**< [ 18: 18](RO) Direct virtual LPI injection supported. */
+ uint32_t lpis : 1; /**< [ 17: 17](RO) Locality-specific peripheral interrupt supported. */
+ uint32_t mbis : 1; /**< [ 16: 16](RO) Message based interrupt supported. */
+ uint32_t lspi : 5; /**< [ 15: 11](RO) The number of lockable SPI interrupts. This is not supported in GICv3 and is RES0. */
+ uint32_t securityextn : 1; /**< [ 10: 10](RO) Security extension supported. When GICD_(S)CTLR[DS] is
+ set, this field is clear. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t cpunumber : 3; /**< [ 7: 5](RO) Reserved. In CNXXXX implementation, not used. */
+ uint32_t itlinesnumber : 5; /**< [ 4: 0](RO) The value derived from this specifies the maximum number of SPIs. */
+#else /* Word 0 - Little Endian */
+ uint32_t itlinesnumber : 5; /**< [ 4: 0](RO) The value derived from this specifies the maximum number of SPIs. */
+ uint32_t cpunumber : 3; /**< [ 7: 5](RO) Reserved. In CNXXXX implementation, not used. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t securityextn : 1; /**< [ 10: 10](RO) Security extension supported. When GICD_(S)CTLR[DS] is
+ set, this field is clear. */
+ uint32_t lspi : 5; /**< [ 15: 11](RO) The number of lockable SPI interrupts. This is not supported in GICv3 and is RES0. */
+ uint32_t mbis : 1; /**< [ 16: 16](RO) Message based interrupt supported. */
+ uint32_t lpis : 1; /**< [ 17: 17](RO) Locality-specific peripheral interrupt supported. */
+ uint32_t dvis : 1; /**< [ 18: 18](RO) Direct virtual LPI injection supported. */
+ uint32_t idbits : 5; /**< [ 23: 19](RO) The number of interrupt identifier bits supported by the GIC stream protocol interface minus one. */
+ uint32_t a3v : 1; /**< [ 24: 24](RO) Indicates whether the distributor supports nonzero values of affinity 3. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicd_typer_s cn; */
+};
+typedef union bdk_gicd_typer bdk_gicd_typer_t;
+
+#define BDK_GICD_TYPER BDK_GICD_TYPER_FUNC()
+static inline uint64_t BDK_GICD_TYPER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICD_TYPER_FUNC(void)
+{
+ return 0x801000000004ll;
+}
+
+#define typedef_BDK_GICD_TYPER bdk_gicd_typer_t
+#define bustype_BDK_GICD_TYPER BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICD_TYPER "GICD_TYPER"
+#define device_bar_BDK_GICD_TYPER 0x0 /* PF_BAR0 */
+#define busnum_BDK_GICD_TYPER 0
+#define arguments_BDK_GICD_TYPER -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_cidr0
+ *
+ * GIC Redistributor Component Identification Register 0
+ */
+union bdk_gicrx_cidr0
+{
+ uint32_t u;
+ struct bdk_gicrx_cidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_cidr0_s cn; */
+};
+typedef union bdk_gicrx_cidr0 bdk_gicrx_cidr0_t;
+
+static inline uint64_t BDK_GICRX_CIDR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_CIDR0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000fff0ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000fff0ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000fff0ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000fff0ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_CIDR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_CIDR0(a) bdk_gicrx_cidr0_t
+#define bustype_BDK_GICRX_CIDR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_CIDR0(a) "GICRX_CIDR0"
+#define device_bar_BDK_GICRX_CIDR0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_CIDR0(a) (a)
+#define arguments_BDK_GICRX_CIDR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_cidr1
+ *
+ * GIC Redistributor Component Identification Register 1
+ */
+union bdk_gicrx_cidr1
+{
+ uint32_t u;
+ struct bdk_gicrx_cidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_cidr1_s cn; */
+};
+typedef union bdk_gicrx_cidr1 bdk_gicrx_cidr1_t;
+
+static inline uint64_t BDK_GICRX_CIDR1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_CIDR1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000fff4ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000fff4ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000fff4ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000fff4ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_CIDR1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_CIDR1(a) bdk_gicrx_cidr1_t
+#define bustype_BDK_GICRX_CIDR1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_CIDR1(a) "GICRX_CIDR1"
+#define device_bar_BDK_GICRX_CIDR1(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_CIDR1(a) (a)
+#define arguments_BDK_GICRX_CIDR1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_cidr2
+ *
+ * GIC Redistributor Component Identification Register 2
+ */
+union bdk_gicrx_cidr2
+{
+ uint32_t u;
+ struct bdk_gicrx_cidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_cidr2_s cn; */
+};
+typedef union bdk_gicrx_cidr2 bdk_gicrx_cidr2_t;
+
+static inline uint64_t BDK_GICRX_CIDR2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_CIDR2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000fff8ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000fff8ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000fff8ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000fff8ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_CIDR2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_CIDR2(a) bdk_gicrx_cidr2_t
+#define bustype_BDK_GICRX_CIDR2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_CIDR2(a) "GICRX_CIDR2"
+#define device_bar_BDK_GICRX_CIDR2(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_CIDR2(a) (a)
+#define arguments_BDK_GICRX_CIDR2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_cidr3
+ *
+ * GIC Redistributor Component Identification Register 3
+ */
+union bdk_gicrx_cidr3
+{
+ uint32_t u;
+ struct bdk_gicrx_cidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_cidr3_s cn; */
+};
+typedef union bdk_gicrx_cidr3 bdk_gicrx_cidr3_t;
+
+static inline uint64_t BDK_GICRX_CIDR3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_CIDR3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000fffcll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000fffcll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000fffcll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000fffcll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_CIDR3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_CIDR3(a) bdk_gicrx_cidr3_t
+#define bustype_BDK_GICRX_CIDR3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_CIDR3(a) "GICRX_CIDR3"
+#define device_bar_BDK_GICRX_CIDR3(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_CIDR3(a) (a)
+#define arguments_BDK_GICRX_CIDR3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gicr#_clrlpir
+ *
+ * GIC Redistributor Clear LPI Register
+ */
+union bdk_gicrx_clrlpir
+{
+ uint64_t u;
+ struct bdk_gicrx_clrlpir_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t pid : 32; /**< [ 31: 0](WO) Physical ID of the LPI to be set as not pending. If the LPI is already not pending, the
+ write has no effect.
+ If the LPI with the physical ID is not implemented, the write has no effect.
+ If GICR()_(S)CTLR[ENABLE_LPIS] is zero, the write has no effect. */
+#else /* Word 0 - Little Endian */
+ uint64_t pid : 32; /**< [ 31: 0](WO) Physical ID of the LPI to be set as not pending. If the LPI is already not pending, the
+ write has no effect.
+ If the LPI with the physical ID is not implemented, the write has no effect.
+ If GICR()_(S)CTLR[ENABLE_LPIS] is zero, the write has no effect. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_clrlpir_s cn; */
+};
+typedef union bdk_gicrx_clrlpir bdk_gicrx_clrlpir_t;
+
+static inline uint64_t BDK_GICRX_CLRLPIR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_CLRLPIR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000048ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000048ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000048ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000048ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_CLRLPIR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_CLRLPIR(a) bdk_gicrx_clrlpir_t
+#define bustype_BDK_GICRX_CLRLPIR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GICRX_CLRLPIR(a) "GICRX_CLRLPIR"
+#define device_bar_BDK_GICRX_CLRLPIR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_CLRLPIR(a) (a)
+#define arguments_BDK_GICRX_CLRLPIR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_icactiver0
+ *
+ * GIC Redistributor Interrupt Clear-Active Register 0
+ * Each bit in GICR()_ICACTIVER0 provides a clear-active bit for an SGI or a
+ * PPI. Writing one to a clear-active bit clears the active status of the corresponding
+ * interrupt.
+ */
+union bdk_gicrx_icactiver0
+{
+ uint32_t u;
+ struct bdk_gicrx_icactiver0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SGI or a PPI for interrupt IDs in the range 31..0. If read as
+ zero, then the interrupt is not active. If read as one, the interrupt is in active state.
+
+ Clear-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A clear-active bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SGI or a PPI for interrupt IDs in the range 31..0. If read as
+ zero, then the interrupt is not active. If read as one, the interrupt is in active state.
+
+ Clear-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A clear-active bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicrx_icactiver0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C/H) Each bit corresponds to an SGI or a PPI for interrupt IDs in the range 31..0. If read as
+ zero, then the interrupt is not active. If read as one, the interrupt is in active state.
+
+ Clear-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A clear-active bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C/H) Each bit corresponds to an SGI or a PPI for interrupt IDs in the range 31..0. If read as
+ zero, then the interrupt is not active. If read as one, the interrupt is in active state.
+
+ Clear-active bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ A clear-active bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gicrx_icactiver0_cn9 cn81xx; */
+ /* struct bdk_gicrx_icactiver0_s cn88xx; */
+ /* struct bdk_gicrx_icactiver0_cn9 cn83xx; */
+};
+typedef union bdk_gicrx_icactiver0 bdk_gicrx_icactiver0_t;
+
+static inline uint64_t BDK_GICRX_ICACTIVER0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_ICACTIVER0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010380ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010380ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010380ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010380ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_ICACTIVER0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_ICACTIVER0(a) bdk_gicrx_icactiver0_t
+#define bustype_BDK_GICRX_ICACTIVER0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_ICACTIVER0(a) "GICRX_ICACTIVER0"
+#define device_bar_BDK_GICRX_ICACTIVER0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_ICACTIVER0(a) (a)
+#define arguments_BDK_GICRX_ICACTIVER0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_icenabler0
+ *
+ * GIC Redistributor Interrupt Clear-Enable Register 0
+ * Each bit in GICR()_ICENABLER0 provides a clear-enable bit for an SGI or a PPI. Writing one to
+ * a
+ * clear-enable bit disables forwarding of the corresponding SGI or PPI from the redistributor
+ * to the CPU interfaces. Reading a bit identifies whether the interrupt is enabled.
+ */
+union bdk_gicrx_icenabler0
+{
+ uint32_t u;
+ struct bdk_gicrx_icenabler0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. Upon reading,
+ if a bit is zero, then the interrupt is not enabled to be forwarded to the CPU interface.
+ Upon reading, if a bit is one, the SPI is enabled to be forwarded to the CPU interface.
+
+ Clear-enable bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ Writes to the register cannot be considered complete until the effects of the write are
+ visible throughout the affinity hierarchy. To ensure that an enable has been cleared,
+ software must write to this register with bits set to clear the required enables. Software
+ must then poll GICR()_(S)CTLR[RWP] (register writes pending) until it has the value zero. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. Upon reading,
+ if a bit is zero, then the interrupt is not enabled to be forwarded to the CPU interface.
+ Upon reading, if a bit is one, the SPI is enabled to be forwarded to the CPU interface.
+
+ Clear-enable bits corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses.
+
+ Writes to the register cannot be considered complete until the effects of the write are
+ visible throughout the affinity hierarchy. To ensure that an enable has been cleared,
+ software must write to this register with bits set to clear the required enables. Software
+ must then poll GICR()_(S)CTLR[RWP] (register writes pending) until it has the value zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_icenabler0_s cn; */
+};
+typedef union bdk_gicrx_icenabler0 bdk_gicrx_icenabler0_t;
+
+static inline uint64_t BDK_GICRX_ICENABLER0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_ICENABLER0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010180ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010180ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010180ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010180ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_ICENABLER0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_ICENABLER0(a) bdk_gicrx_icenabler0_t
+#define bustype_BDK_GICRX_ICENABLER0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_ICENABLER0(a) "GICRX_ICENABLER0"
+#define device_bar_BDK_GICRX_ICENABLER0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_ICENABLER0(a) (a)
+#define arguments_BDK_GICRX_ICENABLER0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_icfgr0
+ *
+ * GIC Redistributor Interrupt Configuration Register 0
+ */
+union bdk_gicrx_icfgr0
+{
+ uint32_t u;
+ struct bdk_gicrx_icfgr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](RO) Two bits per SGI. Defines whether an SGI is level-sensitive or edge-triggered.
+ Note SGIs are always edge-triggered, so Bit[1] for an SGI is RAO and read-only.
+
+ Bit[1] is zero, the interrupt is level-sensitive.
+
+ Bit[1] is one, the interrupt is edge-triggered.
+
+ Bit[0] Reserved.
+
+ If a secure interrupt, then its corresponding field is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](RO) Two bits per SGI. Defines whether an SGI is level-sensitive or edge-triggered.
+ Note SGIs are always edge-triggered, so Bit[1] for an SGI is RAO and read-only.
+
+ Bit[1] is zero, the interrupt is level-sensitive.
+
+ Bit[1] is one, the interrupt is edge-triggered.
+
+ Bit[0] Reserved.
+
+ If a secure interrupt, then its corresponding field is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_icfgr0_s cn; */
+};
+typedef union bdk_gicrx_icfgr0 bdk_gicrx_icfgr0_t;
+
+static inline uint64_t BDK_GICRX_ICFGR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_ICFGR0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010c00ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010c00ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010c00ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010c00ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_ICFGR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_ICFGR0(a) bdk_gicrx_icfgr0_t
+#define bustype_BDK_GICRX_ICFGR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_ICFGR0(a) "GICRX_ICFGR0"
+#define device_bar_BDK_GICRX_ICFGR0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_ICFGR0(a) (a)
+#define arguments_BDK_GICRX_ICFGR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_icfgr1
+ *
+ * GIC Redistributor Interrupt Configuration Register 1
+ * Redistributor interrupt configuration register 1.
+ */
+union bdk_gicrx_icfgr1
+{
+ uint32_t u;
+ struct bdk_gicrx_icfgr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](RO) Two bits per PPI. Defines whether an PPI is level-sensitive or edge-triggered.
+
+ Bit[1] is zero, the interrupt is level-sensitive.
+
+ Bit[1] is one, the interrupt is edge-triggered.
+
+ Bit[0] Reserved.
+
+ If a secure interrupt, then its corresponding field is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](RO) Two bits per PPI. Defines whether an PPI is level-sensitive or edge-triggered.
+
+ Bit[1] is zero, the interrupt is level-sensitive.
+
+ Bit[1] is one, the interrupt is edge-triggered.
+
+ Bit[0] Reserved.
+
+ If a secure interrupt, then its corresponding field is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_icfgr1_s cn; */
+};
+typedef union bdk_gicrx_icfgr1 bdk_gicrx_icfgr1_t;
+
+static inline uint64_t BDK_GICRX_ICFGR1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_ICFGR1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010c04ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010c04ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010c04ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010c04ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_ICFGR1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_ICFGR1(a) bdk_gicrx_icfgr1_t
+#define bustype_BDK_GICRX_ICFGR1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_ICFGR1(a) "GICRX_ICFGR1"
+#define device_bar_BDK_GICRX_ICFGR1(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_ICFGR1(a) (a)
+#define arguments_BDK_GICRX_ICFGR1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_icpendr0
+ *
+ * GIC Redistributor Interrupt Clear-Pending Register 0
+ * Each bit in GICR()_ICPENDR0 provides a clear-pending bit for an SGI or a PPI. Writing one to a
+ * clear-pending bit clears the pending status of the corresponding interrupt.
+ */
+union bdk_gicrx_icpendr0
+{
+ uint32_t u;
+ struct bdk_gicrx_icpendr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SGI or a PPI for interrupt IDs in the range 31..0. If read as
+ zero, then the interrupt is not pending. If read as one, the interrupt is in pending
+ state.
+
+ Clear-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A clear-pending bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C) Each bit corresponds to an SGI or a PPI for interrupt IDs in the range 31..0. If read as
+ zero, then the interrupt is not pending. If read as one, the interrupt is in pending
+ state.
+
+ Clear-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A clear-pending bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicrx_icpendr0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C/H) Each bit corresponds to an SGI or a PPI for interrupt IDs in the range 31..0. If read as
+ zero, then the interrupt is not pending. If read as one, the interrupt is in pending
+ state.
+
+ Clear-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A clear-pending bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1C/H) Each bit corresponds to an SGI or a PPI for interrupt IDs in the range 31..0. If read as
+ zero, then the interrupt is not pending. If read as one, the interrupt is in pending
+ state.
+
+ Clear-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A clear-pending bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gicrx_icpendr0_cn9 cn81xx; */
+ /* struct bdk_gicrx_icpendr0_s cn88xx; */
+ /* struct bdk_gicrx_icpendr0_cn9 cn83xx; */
+};
+typedef union bdk_gicrx_icpendr0 bdk_gicrx_icpendr0_t;
+
+static inline uint64_t BDK_GICRX_ICPENDR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_ICPENDR0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010280ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010280ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010280ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010280ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_ICPENDR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_ICPENDR0(a) bdk_gicrx_icpendr0_t
+#define bustype_BDK_GICRX_ICPENDR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_ICPENDR0(a) "GICRX_ICPENDR0"
+#define device_bar_BDK_GICRX_ICPENDR0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_ICPENDR0(a) (a)
+#define arguments_BDK_GICRX_ICPENDR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_igroupr0
+ *
+ * GIC Redistributor Interrupt Group Secure Register
+ */
+union bdk_gicrx_igroupr0
+{
+ uint32_t u;
+ struct bdk_gicrx_igroupr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ppi : 16; /**< [ 31: 16](SR/W) Groups for PPIs.
+ 0 = Group 0.
+ 1 = Group 1. */
+ uint32_t sgi : 16; /**< [ 15: 0](SR/W) Groups for SGIs.
+ 0 = Group 0.
+ 1 = Group 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t sgi : 16; /**< [ 15: 0](SR/W) Groups for SGIs.
+ 0 = Group 0.
+ 1 = Group 1. */
+ uint32_t ppi : 16; /**< [ 31: 16](SR/W) Groups for PPIs.
+ 0 = Group 0.
+ 1 = Group 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_igroupr0_s cn; */
+};
+typedef union bdk_gicrx_igroupr0 bdk_gicrx_igroupr0_t;
+
+static inline uint64_t BDK_GICRX_IGROUPR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_IGROUPR0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010080ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010080ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010080ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010080ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_IGROUPR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_IGROUPR0(a) bdk_gicrx_igroupr0_t
+#define bustype_BDK_GICRX_IGROUPR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_IGROUPR0(a) "GICRX_IGROUPR0"
+#define device_bar_BDK_GICRX_IGROUPR0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_IGROUPR0(a) (a)
+#define arguments_BDK_GICRX_IGROUPR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_igrpmodr0
+ *
+ * GIC Redistributor Interrupt Group Secure Register
+ * Control the group modifier for PPIs and SGIs, similar to GICD_IGRPMODR() for SPIs.
+ */
+union bdk_gicrx_igrpmodr0
+{
+ uint32_t u;
+ struct bdk_gicrx_igrpmodr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ppi : 16; /**< [ 31: 16](SR/W) Group modifiers for PPIs.
+ 0 = No group modification.
+ 1 = Modify to group 1. */
+ uint32_t sgi : 16; /**< [ 15: 0](SR/W) Group modifiers for SGIs.
+ 0 = No group modification.
+ 1 = Modify to group 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t sgi : 16; /**< [ 15: 0](SR/W) Group modifiers for SGIs.
+ 0 = No group modification.
+ 1 = Modify to group 1. */
+ uint32_t ppi : 16; /**< [ 31: 16](SR/W) Group modifiers for PPIs.
+ 0 = No group modification.
+ 1 = Modify to group 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_igrpmodr0_s cn; */
+};
+typedef union bdk_gicrx_igrpmodr0 bdk_gicrx_igrpmodr0_t;
+
+static inline uint64_t BDK_GICRX_IGRPMODR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_IGRPMODR0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010d00ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010d00ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010d00ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010d00ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_IGRPMODR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_IGRPMODR0(a) bdk_gicrx_igrpmodr0_t
+#define bustype_BDK_GICRX_IGRPMODR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_IGRPMODR0(a) "GICRX_IGRPMODR0"
+#define device_bar_BDK_GICRX_IGRPMODR0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_IGRPMODR0(a) (a)
+#define arguments_BDK_GICRX_IGRPMODR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_iidr
+ *
+ * GIC Redistributor Implementation Identification Register
+ * This 32-bit register is read-only and specifies the version and features supported by the
+ * redistributor.
+ */
+union bdk_gicrx_iidr
+{
+ uint32_t u;
+ struct bdk_gicrx_iidr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t productid : 8; /**< [ 31: 24](RO) An implementation defined product number for the device.
+ In CNXXXX, enumerated by PCC_PROD_E. */
+ uint32_t reserved_20_23 : 4;
+ uint32_t variant : 4; /**< [ 19: 16](RO) Indicates the major revision or variant of the product.
+ On CNXXXX, this is the major revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t revision : 4; /**< [ 15: 12](RO) Indicates the minor revision of the product.
+ On CNXXXX, this is the minor revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t implementer : 12; /**< [ 11: 0](RO) Indicates the implementer:
+ 0x34C = Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t implementer : 12; /**< [ 11: 0](RO) Indicates the implementer:
+ 0x34C = Cavium. */
+ uint32_t revision : 4; /**< [ 15: 12](RO) Indicates the minor revision of the product.
+ On CNXXXX, this is the minor revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t variant : 4; /**< [ 19: 16](RO) Indicates the major revision or variant of the product.
+ On CNXXXX, this is the major revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t reserved_20_23 : 4;
+ uint32_t productid : 8; /**< [ 31: 24](RO) An implementation defined product number for the device.
+ In CNXXXX, enumerated by PCC_PROD_E. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_iidr_s cn; */
+};
+typedef union bdk_gicrx_iidr bdk_gicrx_iidr_t;
+
+static inline uint64_t BDK_GICRX_IIDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_IIDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000004ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000004ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000004ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000004ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_IIDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_IIDR(a) bdk_gicrx_iidr_t
+#define bustype_BDK_GICRX_IIDR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_IIDR(a) "GICRX_IIDR"
+#define device_bar_BDK_GICRX_IIDR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_IIDR(a) (a)
+#define arguments_BDK_GICRX_IIDR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gicr#_invallr
+ *
+ * GIC Redistributor LPI Invalidate All Register
+ * This register is write-only and causes the LPI configuration to be reloaded from the table in
+ * memory.
+ */
+union bdk_gicrx_invallr
+{
+ uint64_t u;
+ struct bdk_gicrx_invallr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_invallr_s cn; */
+};
+typedef union bdk_gicrx_invallr bdk_gicrx_invallr_t;
+
+static inline uint64_t BDK_GICRX_INVALLR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_INVALLR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x8010800000b0ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x8010800000b0ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x8010800000b0ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x8010800000b0ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_INVALLR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_INVALLR(a) bdk_gicrx_invallr_t
+#define bustype_BDK_GICRX_INVALLR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GICRX_INVALLR(a) "GICRX_INVALLR"
+#define device_bar_BDK_GICRX_INVALLR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_INVALLR(a) (a)
+#define arguments_BDK_GICRX_INVALLR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gicr#_invlpir
+ *
+ * GIC Redistributor Invalidate LPI Register
+ */
+union bdk_gicrx_invlpir
+{
+ uint64_t u;
+ struct bdk_gicrx_invlpir_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t pid : 32; /**< [ 31: 0](WO) Physical LPI ID to be cleaned. The invalidate in the register name and the ITS command is
+ a misnomer. This actually results in a clean operation wherein the cached (in the
+ redistributor) pending state of the LPI is updated to the pending table held in memory and
+ its cached configuration is invalidated in the cache. */
+#else /* Word 0 - Little Endian */
+ uint64_t pid : 32; /**< [ 31: 0](WO) Physical LPI ID to be cleaned. The invalidate in the register name and the ITS command is
+ a misnomer. This actually results in a clean operation wherein the cached (in the
+ redistributor) pending state of the LPI is updated to the pending table held in memory and
+ its cached configuration is invalidated in the cache. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_invlpir_s cn; */
+};
+typedef union bdk_gicrx_invlpir bdk_gicrx_invlpir_t;
+
+static inline uint64_t BDK_GICRX_INVLPIR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_INVLPIR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x8010800000a0ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x8010800000a0ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x8010800000a0ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x8010800000a0ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_INVLPIR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_INVLPIR(a) bdk_gicrx_invlpir_t
+#define bustype_BDK_GICRX_INVLPIR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GICRX_INVLPIR(a) "GICRX_INVLPIR"
+#define device_bar_BDK_GICRX_INVLPIR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_INVLPIR(a) (a)
+#define arguments_BDK_GICRX_INVLPIR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_ipriorityr#
+ *
+ * GIC Redistributor Interrupt Priority Registers
+ * Each byte in this register provides a priority field for each SGI or PPI supported by the
+ * GIC.
+ */
+union bdk_gicrx_ipriorityrx
+{
+ uint32_t u;
+ struct bdk_gicrx_ipriorityrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W) Each byte corresponds to an SGI or PPI for interrupt IDs in the range 31..0.
+
+ Priority fields corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses, or when GICD_(S)CTLR[DS] is one.
+
+ Byte accesses are permitted to these registers.
+
+ A priority field for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W) Each byte corresponds to an SGI or PPI for interrupt IDs in the range 31..0.
+
+ Priority fields corresponding to secure interrupts (either group 0 or group 1)
+ may only be set by secure accesses, or when GICD_(S)CTLR[DS] is one.
+
+ Byte accesses are permitted to these registers.
+
+ A priority field for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_ipriorityrx_s cn; */
+};
+typedef union bdk_gicrx_ipriorityrx bdk_gicrx_ipriorityrx_t;
+
+static inline uint64_t BDK_GICRX_IPRIORITYRX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_IPRIORITYRX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=7)))
+ return 0x801080010400ll + 0x20000ll * ((a) & 0x3) + 4ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=23) && (b<=7)))
+ return 0x801080010400ll + 0x20000ll * ((a) & 0x1f) + 4ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=47) && (b<=7)))
+ return 0x801080010400ll + 0x20000ll * ((a) & 0x3f) + 4ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=23) && (b<=7)))
+ return 0x801080010400ll + 0x20000ll * ((a) & 0x1f) + 4ll * ((b) & 0x7);
+ __bdk_csr_fatal("GICRX_IPRIORITYRX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GICRX_IPRIORITYRX(a,b) bdk_gicrx_ipriorityrx_t
+#define bustype_BDK_GICRX_IPRIORITYRX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_IPRIORITYRX(a,b) "GICRX_IPRIORITYRX"
+#define device_bar_BDK_GICRX_IPRIORITYRX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_IPRIORITYRX(a,b) (a)
+#define arguments_BDK_GICRX_IPRIORITYRX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) gicr#_isactiver0
+ *
+ * GIC Redistributor Interrupt Set-Active Register 0
+ * Each bit in GICR()_ISACTIVER0 provides a set-active bit for an SGI or a PPI. Writing one to a
+ * set-active bit sets the status of the corresponding interrupt to active.
+ */
+union bdk_gicrx_isactiver0
+{
+ uint32_t u;
+ struct bdk_gicrx_isactiver0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. If
+ read as zero, then the interrupt is not active. If read as one, the interrupt is
+ in active state.
+
+ Set-active bits corresponding to secure interrupts (either group 0 or group 1) may only be
+ set by secure accesses.
+
+ A set-active bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. If
+ read as zero, then the interrupt is not active. If read as one, the interrupt is
+ in active state.
+
+ Set-active bits corresponding to secure interrupts (either group 0 or group 1) may only be
+ set by secure accesses.
+
+ A set-active bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicrx_isactiver0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S/H) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. If
+ read as zero, then the interrupt is not active. If read as one, the interrupt is
+ in active state.
+
+ Set-active bits corresponding to secure interrupts (either group 0 or group 1) may only be
+ set by secure accesses.
+
+ A set-active bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S/H) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. If
+ read as zero, then the interrupt is not active. If read as one, the interrupt is
+ in active state.
+
+ Set-active bits corresponding to secure interrupts (either group 0 or group 1) may only be
+ set by secure accesses.
+
+ A set-active bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gicrx_isactiver0_cn9 cn81xx; */
+ /* struct bdk_gicrx_isactiver0_s cn88xx; */
+ /* struct bdk_gicrx_isactiver0_cn9 cn83xx; */
+};
+typedef union bdk_gicrx_isactiver0 bdk_gicrx_isactiver0_t;
+
+static inline uint64_t BDK_GICRX_ISACTIVER0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_ISACTIVER0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010300ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010300ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010300ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010300ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_ISACTIVER0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_ISACTIVER0(a) bdk_gicrx_isactiver0_t
+#define bustype_BDK_GICRX_ISACTIVER0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_ISACTIVER0(a) "GICRX_ISACTIVER0"
+#define device_bar_BDK_GICRX_ISACTIVER0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_ISACTIVER0(a) (a)
+#define arguments_BDK_GICRX_ISACTIVER0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_isenabler0
+ *
+ * GIC Redistributor Interrupt Set-Enable Register 0
+ * Each bit in GICR()_ISENABLER0 provides a set-enable bit for an SGI or a PPI. Writing one
+ * to a set-enable bit enables forwarding of the corresponding SGI or PPI from the
+ * redistributor to the CPU interfaces.
+ */
+union bdk_gicrx_isenabler0
+{
+ uint32_t u;
+ struct bdk_gicrx_isenabler0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. If
+ zero, then the interrupt is not enabled to be forwarded to the CPU interface. If
+ one, the interrupt is enabled to be forwarded to the CPU interface. Set-enable
+ bits corresponding to secure interrupts (either group0 or group1) may only be
+ set by secure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. If
+ zero, then the interrupt is not enabled to be forwarded to the CPU interface. If
+ one, the interrupt is enabled to be forwarded to the CPU interface. Set-enable
+ bits corresponding to secure interrupts (either group0 or group1) may only be
+ set by secure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_isenabler0_s cn; */
+};
+typedef union bdk_gicrx_isenabler0 bdk_gicrx_isenabler0_t;
+
+static inline uint64_t BDK_GICRX_ISENABLER0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_ISENABLER0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010100ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010100ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010100ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010100ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_ISENABLER0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_ISENABLER0(a) bdk_gicrx_isenabler0_t
+#define bustype_BDK_GICRX_ISENABLER0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_ISENABLER0(a) "GICRX_ISENABLER0"
+#define device_bar_BDK_GICRX_ISENABLER0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_ISENABLER0(a) (a)
+#define arguments_BDK_GICRX_ISENABLER0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_ispendr0
+ *
+ * GIC Redistributor Interrupt Set-Pending Register 0
+ * Each bit in GICR()_ISPENDR0 provides a set-pending bit for an SGI or a PPI. Writing one
+ * to a set-pending bit sets the status of the corresponding interrupt to pending.
+ */
+union bdk_gicrx_ispendr0
+{
+ uint32_t u;
+ struct bdk_gicrx_ispendr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. If
+ read as zero, then the interrupt is not pending. If read as one, the interrupt
+ is in pending state.
+
+ Set-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A set-pending bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. If
+ read as zero, then the interrupt is not pending. If read as one, the interrupt
+ is in pending state.
+
+ Set-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A set-pending bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicrx_ispendr0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S/H) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. If
+ read as zero, then the interrupt is not pending. If read as one, the interrupt
+ is in pending state.
+
+ Set-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A set-pending bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](R/W1S/H) Each bit corresponds to an SGI or PPI for interrupt IDs in the range 31..0. If
+ read as zero, then the interrupt is not pending. If read as one, the interrupt
+ is in pending state.
+
+ Set-pending bits corresponding to secure interrupts (either group 0 or group 1) may only
+ be set by secure accesses.
+
+ A set-pending bit for a secure interrupt is RAZ/WI to nonsecure accesses. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gicrx_ispendr0_cn9 cn81xx; */
+ /* struct bdk_gicrx_ispendr0_s cn88xx; */
+ /* struct bdk_gicrx_ispendr0_cn9 cn83xx; */
+};
+typedef union bdk_gicrx_ispendr0 bdk_gicrx_ispendr0_t;
+
+static inline uint64_t BDK_GICRX_ISPENDR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_ISPENDR0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010200ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010200ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010200ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010200ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_ISPENDR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_ISPENDR0(a) bdk_gicrx_ispendr0_t
+#define bustype_BDK_GICRX_ISPENDR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_ISPENDR0(a) "GICRX_ISPENDR0"
+#define device_bar_BDK_GICRX_ISPENDR0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_ISPENDR0(a) (a)
+#define arguments_BDK_GICRX_ISPENDR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gicr#_movallr
+ *
+ * GIC Redistributor LPI Move All Register
+ * This register is write-only and causes the LPI configuration to be reloaded from the table in
+ * memory.
+ */
+union bdk_gicrx_movallr
+{
+ uint64_t u;
+ struct bdk_gicrx_movallr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pa : 32; /**< [ 63: 32](WO) Target address \<47:16\>. Base address of the redistributor to which pending LPIs are to be
+ moved.
+ If GICR()_(S)CTLR[ENABLE_LPIS] is zero, the write has no effect. */
+ uint64_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_31 : 32;
+ uint64_t pa : 32; /**< [ 63: 32](WO) Target address \<47:16\>. Base address of the redistributor to which pending LPIs are to be
+ moved.
+ If GICR()_(S)CTLR[ENABLE_LPIS] is zero, the write has no effect. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_movallr_s cn; */
+};
+typedef union bdk_gicrx_movallr bdk_gicrx_movallr_t;
+
+static inline uint64_t BDK_GICRX_MOVALLR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_MOVALLR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000110ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000110ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000110ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000110ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_MOVALLR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_MOVALLR(a) bdk_gicrx_movallr_t
+#define bustype_BDK_GICRX_MOVALLR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GICRX_MOVALLR(a) "GICRX_MOVALLR"
+#define device_bar_BDK_GICRX_MOVALLR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_MOVALLR(a) (a)
+#define arguments_BDK_GICRX_MOVALLR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gicr#_movlpir
+ *
+ * GIC Redistributor Move LPI Register
+ */
+union bdk_gicrx_movlpir
+{
+ uint64_t u;
+ struct bdk_gicrx_movlpir_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pa : 32; /**< [ 63: 32](WO) Target address \<47:16\>. Base address of the redistributor to which the physical LPI is to
+ be moved. */
+ uint64_t pid : 32; /**< [ 31: 0](WO) Physical LPI ID to be moved to the redistributor at [PA]. If the LPI with this
+ PID is unimplemented, the write has no effect.
+ If GICR()_(S)CTLR[ENABLE_LPIS] is zero, the write has no effect. */
+#else /* Word 0 - Little Endian */
+ uint64_t pid : 32; /**< [ 31: 0](WO) Physical LPI ID to be moved to the redistributor at [PA]. If the LPI with this
+ PID is unimplemented, the write has no effect.
+ If GICR()_(S)CTLR[ENABLE_LPIS] is zero, the write has no effect. */
+ uint64_t pa : 32; /**< [ 63: 32](WO) Target address \<47:16\>. Base address of the redistributor to which the physical LPI is to
+ be moved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_movlpir_s cn; */
+};
+typedef union bdk_gicrx_movlpir bdk_gicrx_movlpir_t;
+
+static inline uint64_t BDK_GICRX_MOVLPIR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_MOVLPIR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000100ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000100ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000100ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000100ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_MOVLPIR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_MOVLPIR(a) bdk_gicrx_movlpir_t
+#define bustype_BDK_GICRX_MOVLPIR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GICRX_MOVLPIR(a) "GICRX_MOVLPIR"
+#define device_bar_BDK_GICRX_MOVLPIR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_MOVLPIR(a) (a)
+#define arguments_BDK_GICRX_MOVLPIR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_nsacr
+ *
+ * GIC Redistributor Non-Secure Access Control Secure Registers
+ */
+union bdk_gicrx_nsacr
+{
+ uint32_t u;
+ struct bdk_gicrx_nsacr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](SR/W) Two bits per SGI or PPI. Defines whether nonsecure access is permitted to secure
+ interrupt resources.
+ 0x0 = No nonsecure access is permitted to fields associated with the corresponding
+ interrupt.
+ 0x1 = Nonsecure write access is permitted to generate secure group0 interrupts.
+ 0x2 = Adds nonsecure write access permissions to generate secure group1 interrupts.
+ 0x3 = Reserved. Treated as 0x1.
+
+ This register is RAZ/WI for nonsecure accesses.
+
+ When GICD_(S)CTLR[DS] is one, this register is RAZ/WI. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](SR/W) Two bits per SGI or PPI. Defines whether nonsecure access is permitted to secure
+ interrupt resources.
+ 0x0 = No nonsecure access is permitted to fields associated with the corresponding
+ interrupt.
+ 0x1 = Nonsecure write access is permitted to generate secure group0 interrupts.
+ 0x2 = Adds nonsecure write access permissions to generate secure group1 interrupts.
+ 0x3 = Reserved. Treated as 0x1.
+
+ This register is RAZ/WI for nonsecure accesses.
+
+ When GICD_(S)CTLR[DS] is one, this register is RAZ/WI. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_nsacr_s cn; */
+};
+typedef union bdk_gicrx_nsacr bdk_gicrx_nsacr_t;
+
+static inline uint64_t BDK_GICRX_NSACR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_NSACR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080010e00ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080010e00ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080010e00ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080010e00ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_NSACR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_NSACR(a) bdk_gicrx_nsacr_t
+#define bustype_BDK_GICRX_NSACR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_NSACR(a) "GICRX_NSACR"
+#define device_bar_BDK_GICRX_NSACR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_NSACR(a) (a)
+#define arguments_BDK_GICRX_NSACR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gicr#_pendbaser
+ *
+ * GIC Redistributor LPI Pending Table Address Register
+ */
+union bdk_gicrx_pendbaser
+{
+ uint64_t u;
+ struct bdk_gicrx_pendbaser_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t pending_table_zero : 1; /**< [ 62: 62](WO) Pending zero:
+ 0 = The coarse-grained map for the LPI pending table is valid.
+ 1 = The pending table has been zeroed out. */
+ uint64_t reserved_59_61 : 3;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t pa : 36; /**< [ 51: 16](R/W) Physical address bits \<46:16\> for the LPI pending table. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pa : 36; /**< [ 51: 16](R/W) Physical address bits \<46:16\> for the LPI pending table. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_59_61 : 3;
+ uint64_t pending_table_zero : 1; /**< [ 62: 62](WO) Pending zero:
+ 0 = The coarse-grained map for the LPI pending table is valid.
+ 1 = The pending table has been zeroed out. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicrx_pendbaser_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t pending_table_zero : 1; /**< [ 62: 62](WO) Pending zero:
+ 0 = The coarse-grained map for the LPI pending table is valid.
+ 1 = The pending table has been zeroed out. */
+ uint64_t reserved_48_61 : 14;
+ uint64_t pa : 32; /**< [ 47: 16](R/W) Physical address bits \<46:16\> for the LPI pending table. */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t pa : 32; /**< [ 47: 16](R/W) Physical address bits \<46:16\> for the LPI pending table. */
+ uint64_t reserved_48_61 : 14;
+ uint64_t pending_table_zero : 1; /**< [ 62: 62](WO) Pending zero:
+ 0 = The coarse-grained map for the LPI pending table is valid.
+ 1 = The pending table has been zeroed out. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_gicrx_pendbaser_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t pending_table_zero : 1; /**< [ 62: 62](WO) Pending zero:
+ 0 = The coarse-grained map for the LPI pending table is valid.
+ 1 = The pending table has been zeroed out. */
+ uint64_t reserved_59_61 : 3;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t pa : 36; /**< [ 51: 16](R/W) Physical address bits \<51:16\> for the LPI pending table.
+ Software must set bits \<51:46\> and \<43\> to zero. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pa : 36; /**< [ 51: 16](R/W) Physical address bits \<51:16\> for the LPI pending table.
+ Software must set bits \<51:46\> and \<43\> to zero. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_59_61 : 3;
+ uint64_t pending_table_zero : 1; /**< [ 62: 62](WO) Pending zero:
+ 0 = The coarse-grained map for the LPI pending table is valid.
+ 1 = The pending table has been zeroed out. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_gicrx_pendbaser_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t pending_table_zero : 1; /**< [ 62: 62](WO) Pending zero:
+ 0 = The coarse-grained map for the LPI pending table is valid.
+ 1 = The pending table has been zeroed out. */
+ uint64_t reserved_59_61 : 3;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_48_55 : 8;
+ uint64_t pa : 32; /**< [ 47: 16](R/W) Physical address bits \<46:16\> for the LPI pending table. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pa : 32; /**< [ 47: 16](R/W) Physical address bits \<46:16\> for the LPI pending table. */
+ uint64_t reserved_48_55 : 8;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_59_61 : 3;
+ uint64_t pending_table_zero : 1; /**< [ 62: 62](WO) Pending zero:
+ 0 = The coarse-grained map for the LPI pending table is valid.
+ 1 = The pending table has been zeroed out. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gicrx_pendbaser_cn81xx cn83xx; */
+ /* struct bdk_gicrx_pendbaser_cn81xx cn88xxp2; */
+};
+typedef union bdk_gicrx_pendbaser bdk_gicrx_pendbaser_t;
+
+static inline uint64_t BDK_GICRX_PENDBASER(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_PENDBASER(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000078ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000078ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000078ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000078ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_PENDBASER", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_PENDBASER(a) bdk_gicrx_pendbaser_t
+#define bustype_BDK_GICRX_PENDBASER(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GICRX_PENDBASER(a) "GICRX_PENDBASER"
+#define device_bar_BDK_GICRX_PENDBASER(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_PENDBASER(a) (a)
+#define arguments_BDK_GICRX_PENDBASER(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_pidr0
+ *
+ * GIC Redistributor Peripheral Identification Register 0
+ */
+union bdk_gicrx_pidr0
+{
+ uint32_t u;
+ struct bdk_gicrx_pidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GICR. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GICR. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_pidr0_s cn; */
+};
+typedef union bdk_gicrx_pidr0 bdk_gicrx_pidr0_t;
+
+static inline uint64_t BDK_GICRX_PIDR0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_PIDR0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000ffe0ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000ffe0ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000ffe0ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000ffe0ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_PIDR0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_PIDR0(a) bdk_gicrx_pidr0_t
+#define bustype_BDK_GICRX_PIDR0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_PIDR0(a) "GICRX_PIDR0"
+#define device_bar_BDK_GICRX_PIDR0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_PIDR0(a) (a)
+#define arguments_BDK_GICRX_PIDR0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_pidr1
+ *
+ * GIC Redistributor Peripheral Identification Register 1
+ */
+union bdk_gicrx_pidr1
+{
+ uint32_t u;
+ struct bdk_gicrx_pidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_pidr1_s cn; */
+};
+typedef union bdk_gicrx_pidr1 bdk_gicrx_pidr1_t;
+
+static inline uint64_t BDK_GICRX_PIDR1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_PIDR1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000ffe4ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000ffe4ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000ffe4ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000ffe4ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_PIDR1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_PIDR1(a) bdk_gicrx_pidr1_t
+#define bustype_BDK_GICRX_PIDR1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_PIDR1(a) "GICRX_PIDR1"
+#define device_bar_BDK_GICRX_PIDR1(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_PIDR1(a) (a)
+#define arguments_BDK_GICRX_PIDR1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_pidr2
+ *
+ * GIC Redistributor Peripheral Identification Register 2
+ */
+union bdk_gicrx_pidr2
+{
+ uint32_t u;
+ struct bdk_gicrx_pidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t archrev : 4; /**< [ 7: 4](RO) Architectural revision:
+ 0x1 = GICv1.
+ 0x2 = GICV2.
+ 0x3 = GICv3.
+ 0x4 = GICv4.
+ 0x5-0xF = Reserved. */
+ uint32_t usesjepcode : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t jepid : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+#else /* Word 0 - Little Endian */
+ uint32_t jepid : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+ uint32_t usesjepcode : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t archrev : 4; /**< [ 7: 4](RO) Architectural revision:
+ 0x1 = GICv1.
+ 0x2 = GICV2.
+ 0x3 = GICv3.
+ 0x4 = GICv4.
+ 0x5-0xF = Reserved. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_pidr2_s cn; */
+};
+typedef union bdk_gicrx_pidr2 bdk_gicrx_pidr2_t;
+
+static inline uint64_t BDK_GICRX_PIDR2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_PIDR2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000ffe8ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000ffe8ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000ffe8ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000ffe8ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_PIDR2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_PIDR2(a) bdk_gicrx_pidr2_t
+#define bustype_BDK_GICRX_PIDR2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_PIDR2(a) "GICRX_PIDR2"
+#define device_bar_BDK_GICRX_PIDR2(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_PIDR2(a) (a)
+#define arguments_BDK_GICRX_PIDR2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_pidr3
+ *
+ * GIC Redistributor Peripheral Identification Register 3
+ */
+union bdk_gicrx_pidr3
+{
+ uint32_t u;
+ struct bdk_gicrx_pidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t cmod : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+#else /* Word 0 - Little Endian */
+ uint32_t cmod : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_pidr3_s cn; */
+};
+typedef union bdk_gicrx_pidr3 bdk_gicrx_pidr3_t;
+
+static inline uint64_t BDK_GICRX_PIDR3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_PIDR3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000ffecll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000ffecll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000ffecll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000ffecll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_PIDR3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_PIDR3(a) bdk_gicrx_pidr3_t
+#define bustype_BDK_GICRX_PIDR3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_PIDR3(a) "GICRX_PIDR3"
+#define device_bar_BDK_GICRX_PIDR3(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_PIDR3(a) (a)
+#define arguments_BDK_GICRX_PIDR3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_pidr4
+ *
+ * GIC Redistributor Peripheral Identification Register 4
+ */
+union bdk_gicrx_pidr4
+{
+ uint32_t u;
+ struct bdk_gicrx_pidr4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cnt_4k : 4; /**< [ 7: 4](RO) This field is 0x4, indicating a 64 KB software-visible page. */
+ uint32_t continuation_code : 4; /**< [ 3: 0](RO) 0x3 = Cavium JEP106 continuation code. */
+#else /* Word 0 - Little Endian */
+ uint32_t continuation_code : 4; /**< [ 3: 0](RO) 0x3 = Cavium JEP106 continuation code. */
+ uint32_t cnt_4k : 4; /**< [ 7: 4](RO) This field is 0x4, indicating a 64 KB software-visible page. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_pidr4_s cn; */
+};
+typedef union bdk_gicrx_pidr4 bdk_gicrx_pidr4_t;
+
+static inline uint64_t BDK_GICRX_PIDR4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_PIDR4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000ffd0ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000ffd0ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000ffd0ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000ffd0ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_PIDR4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_PIDR4(a) bdk_gicrx_pidr4_t
+#define bustype_BDK_GICRX_PIDR4(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_PIDR4(a) "GICRX_PIDR4"
+#define device_bar_BDK_GICRX_PIDR4(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_PIDR4(a) (a)
+#define arguments_BDK_GICRX_PIDR4(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_pidr5
+ *
+ * GIC Redistributor Peripheral Identification Register 5
+ */
+union bdk_gicrx_pidr5
+{
+ uint32_t u;
+ struct bdk_gicrx_pidr5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_pidr5_s cn; */
+};
+typedef union bdk_gicrx_pidr5 bdk_gicrx_pidr5_t;
+
+static inline uint64_t BDK_GICRX_PIDR5(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_PIDR5(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000ffd4ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000ffd4ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000ffd4ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000ffd4ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_PIDR5", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_PIDR5(a) bdk_gicrx_pidr5_t
+#define bustype_BDK_GICRX_PIDR5(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_PIDR5(a) "GICRX_PIDR5"
+#define device_bar_BDK_GICRX_PIDR5(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_PIDR5(a) (a)
+#define arguments_BDK_GICRX_PIDR5(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_pidr6
+ *
+ * GIC Redistributor Peripheral Identification Register 6
+ */
+union bdk_gicrx_pidr6
+{
+ uint32_t u;
+ struct bdk_gicrx_pidr6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_pidr6_s cn; */
+};
+typedef union bdk_gicrx_pidr6 bdk_gicrx_pidr6_t;
+
+static inline uint64_t BDK_GICRX_PIDR6(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_PIDR6(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000ffd8ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000ffd8ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000ffd8ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000ffd8ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_PIDR6", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_PIDR6(a) bdk_gicrx_pidr6_t
+#define bustype_BDK_GICRX_PIDR6(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_PIDR6(a) "GICRX_PIDR6"
+#define device_bar_BDK_GICRX_PIDR6(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_PIDR6(a) (a)
+#define arguments_BDK_GICRX_PIDR6(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_pidr7
+ *
+ * GIC Redistributor Peripheral Identification Register 7
+ */
+union bdk_gicrx_pidr7
+{
+ uint32_t u;
+ struct bdk_gicrx_pidr7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_pidr7_s cn; */
+};
+typedef union bdk_gicrx_pidr7 bdk_gicrx_pidr7_t;
+
+static inline uint64_t BDK_GICRX_PIDR7(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_PIDR7(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000ffdcll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000ffdcll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000ffdcll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000ffdcll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_PIDR7", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_PIDR7(a) bdk_gicrx_pidr7_t
+#define bustype_BDK_GICRX_PIDR7(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_PIDR7(a) "GICRX_PIDR7"
+#define device_bar_BDK_GICRX_PIDR7(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_PIDR7(a) (a)
+#define arguments_BDK_GICRX_PIDR7(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gicr#_propbaser
+ *
+ * GIC Redistributor LPI Configuration Table Address Register
+ */
+union bdk_gicrx_propbaser
+{
+ uint64_t u;
+ struct bdk_gicrx_propbaser_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t pa : 40; /**< [ 51: 12](R/W) Physical address bits \<46:12\> for the LPI configuration table. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t num_bits : 5; /**< [ 4: 0](R/W) The number of bits of LPI ID supported, minus one. If this value exceeds the value of
+ GICD_TYPER[IDBITS], then the number of bits must be treated as the value defined by
+ GICD_TYPER[IDBITS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_bits : 5; /**< [ 4: 0](R/W) The number of bits of LPI ID supported, minus one. If this value exceeds the value of
+ GICD_TYPER[IDBITS], then the number of bits must be treated as the value defined by
+ GICD_TYPER[IDBITS]. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t pa : 40; /**< [ 51: 12](R/W) Physical address bits \<46:12\> for the LPI configuration table. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicrx_propbaser_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t pa : 36; /**< [ 47: 12](R/W) Physical address bits \<46:12\> for the LPI configuration table. */
+ uint64_t reserved_5_11 : 7;
+ uint64_t num_bits : 5; /**< [ 4: 0](R/W) The number of bits of LPI ID supported, minus one. If this value exceeds the value of
+ GICD_TYPER[IDBITS], then the number of bits must be treated as the value defined by
+ GICD_TYPER[IDBITS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_bits : 5; /**< [ 4: 0](R/W) The number of bits of LPI ID supported, minus one. If this value exceeds the value of
+ GICD_TYPER[IDBITS], then the number of bits must be treated as the value defined by
+ GICD_TYPER[IDBITS]. */
+ uint64_t reserved_5_11 : 7;
+ uint64_t pa : 36; /**< [ 47: 12](R/W) Physical address bits \<46:12\> for the LPI configuration table. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_gicrx_propbaser_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t pa : 40; /**< [ 51: 12](R/W) Physical address bits \<51:12\> for the LPI configuration table.
+ Software must set bits \<51:46\> and \<43\> to zero. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t num_bits : 5; /**< [ 4: 0](R/W) The number of bits of LPI ID supported, minus one. If this value exceeds the value of
+ GICD_TYPER[IDBITS], then the number of bits must be treated as the value defined by
+ GICD_TYPER[IDBITS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_bits : 5; /**< [ 4: 0](R/W) The number of bits of LPI ID supported, minus one. If this value exceeds the value of
+ GICD_TYPER[IDBITS], then the number of bits must be treated as the value defined by
+ GICD_TYPER[IDBITS]. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t pa : 40; /**< [ 51: 12](R/W) Physical address bits \<51:12\> for the LPI configuration table.
+ Software must set bits \<51:46\> and \<43\> to zero. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_gicrx_propbaser_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_48_55 : 8;
+ uint64_t pa : 36; /**< [ 47: 12](R/W) Physical address bits \<46:12\> for the LPI configuration table. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t num_bits : 5; /**< [ 4: 0](R/W) The number of bits of LPI ID supported, minus one. If this value exceeds the value of
+ GICD_TYPER[IDBITS], then the number of bits must be treated as the value defined by
+ GICD_TYPER[IDBITS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_bits : 5; /**< [ 4: 0](R/W) The number of bits of LPI ID supported, minus one. If this value exceeds the value of
+ GICD_TYPER[IDBITS], then the number of bits must be treated as the value defined by
+ GICD_TYPER[IDBITS]. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t cacheability : 3; /**< [ 9: 7](R/W) Cacheability attributes. Ignored in CNXXXX. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attributes. Ignored in CNXXXX. */
+ uint64_t pa : 36; /**< [ 47: 12](R/W) Physical address bits \<46:12\> for the LPI configuration table. */
+ uint64_t reserved_48_55 : 8;
+ uint64_t outer_cacheability : 3; /**< [ 58: 56](R/W) Outer cacheability attributes. Ignored in CNXXXX. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gicrx_propbaser_cn81xx cn83xx; */
+ /* struct bdk_gicrx_propbaser_cn81xx cn88xxp2; */
+};
+typedef union bdk_gicrx_propbaser bdk_gicrx_propbaser_t;
+
+static inline uint64_t BDK_GICRX_PROPBASER(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_PROPBASER(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000070ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000070ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000070ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000070ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_PROPBASER", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_PROPBASER(a) bdk_gicrx_propbaser_t
+#define bustype_BDK_GICRX_PROPBASER(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GICRX_PROPBASER(a) "GICRX_PROPBASER"
+#define device_bar_BDK_GICRX_PROPBASER(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_PROPBASER(a) (a)
+#define arguments_BDK_GICRX_PROPBASER(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_sctlr
+ *
+ * GIC Redistributor (Secure) Control Register
+ * This register controls the behavior of the nonsecure redistributor.
+ */
+union bdk_gicrx_sctlr
+{
+ uint32_t u;
+ struct bdk_gicrx_sctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t uwp : 1; /**< [ 31: 31](RO) Upstream write pending. Common to both security states. Read-only.
+ 0 = The effects of all upstream writes have been communicated to the parent
+ redistributor, including any generate SGI packets.
+ 1 = The effects of all upstream writes have not been communicated to the parent
+ redistributor, including any generate SGI packets. */
+ uint32_t reserved_4_30 : 27;
+ uint32_t rwp : 1; /**< [ 3: 3](RO) Register write pending. This bit indicates whether a register write for the current
+ security state (banked) is in progress or not.
+ 0 = The effect of all register writes are visible to all descendants of the
+ redistributor, including processors.
+ 1 = The effects of all register writes are not visible to all descendants of the
+ redistributor.
+
+ Note: this field tracks completion of writes to GICR()_ICENABLER0 that clear
+ the enable of one or more interrupts. */
+ uint32_t reserved_1_2 : 2;
+ uint32_t enable_lpis : 1; /**< [ 0: 0](R/W) Enable LPIs. Common to both security states. When this bit is clear,
+ writes to generate physical LPIs to GICR()_SETLPIR will be ignored.
+ When a write changes this bit from zero to one, this bit becomes RAO/WI and the
+ redistributor must load the pending table from memory to check for any pending interrupts. */
+#else /* Word 0 - Little Endian */
+ uint32_t enable_lpis : 1; /**< [ 0: 0](R/W) Enable LPIs. Common to both security states. When this bit is clear,
+ writes to generate physical LPIs to GICR()_SETLPIR will be ignored.
+ When a write changes this bit from zero to one, this bit becomes RAO/WI and the
+ redistributor must load the pending table from memory to check for any pending interrupts. */
+ uint32_t reserved_1_2 : 2;
+ uint32_t rwp : 1; /**< [ 3: 3](RO) Register write pending. This bit indicates whether a register write for the current
+ security state (banked) is in progress or not.
+ 0 = The effect of all register writes are visible to all descendants of the
+ redistributor, including processors.
+ 1 = The effects of all register writes are not visible to all descendants of the
+ redistributor.
+
+ Note: this field tracks completion of writes to GICR()_ICENABLER0 that clear
+ the enable of one or more interrupts. */
+ uint32_t reserved_4_30 : 27;
+ uint32_t uwp : 1; /**< [ 31: 31](RO) Upstream write pending. Common to both security states. Read-only.
+ 0 = The effects of all upstream writes have been communicated to the parent
+ redistributor, including any generate SGI packets.
+ 1 = The effects of all upstream writes have not been communicated to the parent
+ redistributor, including any generate SGI packets. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_sctlr_s cn; */
+};
+typedef union bdk_gicrx_sctlr bdk_gicrx_sctlr_t;
+
+static inline uint64_t BDK_GICRX_SCTLR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_SCTLR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000000ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000000ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000000ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000000ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_SCTLR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_SCTLR(a) bdk_gicrx_sctlr_t
+#define bustype_BDK_GICRX_SCTLR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_SCTLR(a) "GICRX_SCTLR"
+#define device_bar_BDK_GICRX_SCTLR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_SCTLR(a) (a)
+#define arguments_BDK_GICRX_SCTLR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_seir
+ *
+ * GIC Redistributor Generate SEI Register
+ */
+union bdk_gicrx_seir
+{
+ uint32_t u;
+ struct bdk_gicrx_seir_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t syndrome : 16; /**< [ 15: 0](WO) Syndrome value for the SEI to be generated. If another write to this register occurs
+ before the previous has been forwarded to its recipients, the new value is ORed with the
+ existing value. [SYNDROME] is sticky and indicates that at least one error of a
+ class has occurred. */
+#else /* Word 0 - Little Endian */
+ uint32_t syndrome : 16; /**< [ 15: 0](WO) Syndrome value for the SEI to be generated. If another write to this register occurs
+ before the previous has been forwarded to its recipients, the new value is ORed with the
+ existing value. [SYNDROME] is sticky and indicates that at least one error of a
+ class has occurred. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_seir_s cn; */
+};
+typedef union bdk_gicrx_seir bdk_gicrx_seir_t;
+
+static inline uint64_t BDK_GICRX_SEIR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_SEIR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000068ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000068ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000068ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000068ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_SEIR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_SEIR(a) bdk_gicrx_seir_t
+#define bustype_BDK_GICRX_SEIR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_SEIR(a) "GICRX_SEIR"
+#define device_bar_BDK_GICRX_SEIR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_SEIR(a) (a)
+#define arguments_BDK_GICRX_SEIR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_setdel3tr_el1s
+ *
+ * GIC Redistributor Set Non-Maskable Interrupt Secure Registers
+ */
+union bdk_gicrx_setdel3tr_el1s
+{
+ uint32_t u;
+ struct bdk_gicrx_setdel3tr_el1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vec : 32; /**< [ 31: 0](SWO) These write-only secure registers are used to generate DEL3T interrupts to the APs.
+ The value written into these registers is not used. There is no interrupt ID for DEL3Ts.
+ Whenever a register in this set is written, the DEL3T signal of the AP being
+ managed by that register is asserted.
+
+ Each register in this set is RAZ/WI for nonsecure accesses. */
+#else /* Word 0 - Little Endian */
+ uint32_t vec : 32; /**< [ 31: 0](SWO) These write-only secure registers are used to generate DEL3T interrupts to the APs.
+ The value written into these registers is not used. There is no interrupt ID for DEL3Ts.
+ Whenever a register in this set is written, the DEL3T signal of the AP being
+ managed by that register is asserted.
+
+ Each register in this set is RAZ/WI for nonsecure accesses. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_setdel3tr_el1s_s cn; */
+};
+typedef union bdk_gicrx_setdel3tr_el1s bdk_gicrx_setdel3tr_el1s_t;
+
+static inline uint64_t BDK_GICRX_SETDEL3TR_EL1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_SETDEL3TR_EL1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x80108000c000ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x80108000c000ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x80108000c000ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x80108000c000ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_SETDEL3TR_EL1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_SETDEL3TR_EL1S(a) bdk_gicrx_setdel3tr_el1s_t
+#define bustype_BDK_GICRX_SETDEL3TR_EL1S(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_SETDEL3TR_EL1S(a) "GICRX_SETDEL3TR_EL1S"
+#define device_bar_BDK_GICRX_SETDEL3TR_EL1S(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_SETDEL3TR_EL1S(a) (a)
+#define arguments_BDK_GICRX_SETDEL3TR_EL1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gicr#_setlpir
+ *
+ * GIC Redistributor Set LPI Register
+ */
+union bdk_gicrx_setlpir
+{
+ uint64_t u;
+ struct bdk_gicrx_setlpir_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t pid : 32; /**< [ 31: 0](WO) Physical ID of the LPI to be generated. If the LPI is already pending, the write has no
+ effect.
+ If the LPI with the physical ID is not implemented, the write has no effect.
+ If GICR()_(S)CTLR[ENABLE_LPIS] is zero, the write has no effect. */
+#else /* Word 0 - Little Endian */
+ uint64_t pid : 32; /**< [ 31: 0](WO) Physical ID of the LPI to be generated. If the LPI is already pending, the write has no
+ effect.
+ If the LPI with the physical ID is not implemented, the write has no effect.
+ If GICR()_(S)CTLR[ENABLE_LPIS] is zero, the write has no effect. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_setlpir_s cn; */
+};
+typedef union bdk_gicrx_setlpir bdk_gicrx_setlpir_t;
+
+static inline uint64_t BDK_GICRX_SETLPIR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_SETLPIR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000040ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000040ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000040ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000040ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_SETLPIR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_SETLPIR(a) bdk_gicrx_setlpir_t
+#define bustype_BDK_GICRX_SETLPIR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GICRX_SETLPIR(a) "GICRX_SETLPIR"
+#define device_bar_BDK_GICRX_SETLPIR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_SETLPIR(a) (a)
+#define arguments_BDK_GICRX_SETLPIR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_sstatusr
+ *
+ * GIC Redistributor (Secure) Status Register
+ */
+union bdk_gicrx_sstatusr
+{
+ uint32_t u;
+ struct bdk_gicrx_sstatusr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t wrod : 1; /**< [ 3: 3](R/W1C/H) This bit is set if a write to a read-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t rwod : 1; /**< [ 2: 2](R/W1C/H) This bit is set if a read to a write-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t wrd : 1; /**< [ 1: 1](R/W1C/H) This bit is set if a write to a reserved location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t rrd : 1; /**< [ 0: 0](R/W1C/H) This bit is set if a read to a reserved location is detected. Software must write a one to
+ this bit to clear it. */
+#else /* Word 0 - Little Endian */
+ uint32_t rrd : 1; /**< [ 0: 0](R/W1C/H) This bit is set if a read to a reserved location is detected. Software must write a one to
+ this bit to clear it. */
+ uint32_t wrd : 1; /**< [ 1: 1](R/W1C/H) This bit is set if a write to a reserved location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t rwod : 1; /**< [ 2: 2](R/W1C/H) This bit is set if a read to a write-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t wrod : 1; /**< [ 3: 3](R/W1C/H) This bit is set if a write to a read-only location is detected. Software must write a one
+ to this bit to clear it. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_sstatusr_s cn; */
+};
+typedef union bdk_gicrx_sstatusr bdk_gicrx_sstatusr_t;
+
+static inline uint64_t BDK_GICRX_SSTATUSR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_SSTATUSR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000010ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000010ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000010ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000010ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_SSTATUSR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_SSTATUSR(a) bdk_gicrx_sstatusr_t
+#define bustype_BDK_GICRX_SSTATUSR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_SSTATUSR(a) "GICRX_SSTATUSR"
+#define device_bar_BDK_GICRX_SSTATUSR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_SSTATUSR(a) (a)
+#define arguments_BDK_GICRX_SSTATUSR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_syncr
+ *
+ * GIC Redistributor Sync Register
+ */
+union bdk_gicrx_syncr
+{
+ uint32_t u;
+ struct bdk_gicrx_syncr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t busy : 1; /**< [ 0: 0](RO) Reserved. When this register is read, it will only return read-data with [BUSY] as zero when
+ none of the following operations are in progress:
+ * Any writes to GICR()_CLRLPIR within the redistributor.
+ * Any writes to GICR()_MOVLPIR within the redistributor.
+ * Any writes to GICR()_MOVALLR within the redistributor.
+ * Any writes to GICR()_INVLPIR within the redistributor.
+ * Any writes to GICR()_INVALLR within the redistributor.
+ * Any writes to another redistributor performed as a result of a previous write to
+ GICR()_MOVLPIR or GICR()_MOVALLR have completed and arrived at the target redistributor.
+ Including operations initiated by writing to GICR()_PENDBASER or GICR()_PROPBASER. */
+#else /* Word 0 - Little Endian */
+ uint32_t busy : 1; /**< [ 0: 0](RO) Reserved. When this register is read, it will only return read-data with [BUSY] as zero when
+ none of the following operations are in progress:
+ * Any writes to GICR()_CLRLPIR within the redistributor.
+ * Any writes to GICR()_MOVLPIR within the redistributor.
+ * Any writes to GICR()_MOVALLR within the redistributor.
+ * Any writes to GICR()_INVLPIR within the redistributor.
+ * Any writes to GICR()_INVALLR within the redistributor.
+ * Any writes to another redistributor performed as a result of a previous write to
+ GICR()_MOVLPIR or GICR()_MOVALLR have completed and arrived at the target redistributor.
+ Including operations initiated by writing to GICR()_PENDBASER or GICR()_PROPBASER. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gicrx_syncr_s cn; */
+};
+typedef union bdk_gicrx_syncr bdk_gicrx_syncr_t;
+
+static inline uint64_t BDK_GICRX_SYNCR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_SYNCR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x8010800000c0ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x8010800000c0ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x8010800000c0ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x8010800000c0ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_SYNCR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_SYNCR(a) bdk_gicrx_syncr_t
+#define bustype_BDK_GICRX_SYNCR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_SYNCR(a) "GICRX_SYNCR"
+#define device_bar_BDK_GICRX_SYNCR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_SYNCR(a) (a)
+#define arguments_BDK_GICRX_SYNCR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gicr#_typer
+ *
+ * GIC Redistributor Type Register
+ * This 64-bit read-only register is used to discover the properties of the redistributor and is
+ * always accessible regardless of the ARE setting for a security state.
+ */
+union bdk_gicrx_typer
+{
+ uint64_t u;
+ struct bdk_gicrx_typer_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t a3 : 8; /**< [ 63: 56](RO) The affinity level 3 value for the redistributor. */
+ uint64_t a2 : 8; /**< [ 55: 48](RO/H) The affinity level 2 value for the redistributor. */
+ uint64_t a1 : 8; /**< [ 47: 40](RO/H) The affinity level 1 value for the redistributor. */
+ uint64_t a0 : 8; /**< [ 39: 32](RO/H) The affinity level 0 value for the redistributor. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t commonlpiaff : 2; /**< [ 25: 24](RAZ) The affinity level at which re-distributors share a LPI configuration table.
+ 0x0 = All re-distributors must share a config table.
+ 0x1 = All re-distributors with the same Aff3 value must share a LPI configuration table.
+ 0x2 = All re-distributors with the same Aff3.Aff2 value must share a LPI configuration.
+ table.
+ 0x3 = All re-distributors with the same Aff3.Aff2.Aff1 value must share an LPI
+ configuration table. */
+ uint64_t pn : 16; /**< [ 23: 8](RO/H) The processor number, a unique identifier for the processor understood by the ITS. Should
+ be the logical processor number supported by the redistributor, which is the redistributor
+ ID, ie. the variable a. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t dpgs : 1; /**< [ 5: 5](RAZ) GICR()_(S)CTLR[DPG*] bits are NOT supported. */
+ uint64_t last : 1; /**< [ 4: 4](RO/H) Last. This bit is only set for the last redistributor in a set of contiguous redistributor
+ register pages. Needs to be determined from fuse signals or SKU. */
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed implementation:
+ 0 = Monolithic implementation.
+ 1 = Distributed implementation registers supported. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t plpis : 1; /**< [ 0: 0](RO) Physical LPIs supported:
+ 0 = Physical LPIs not supported.
+ 1 = Physical LPIs supported. */
+#else /* Word 0 - Little Endian */
+ uint64_t plpis : 1; /**< [ 0: 0](RO) Physical LPIs supported:
+ 0 = Physical LPIs not supported.
+ 1 = Physical LPIs supported. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed implementation:
+ 0 = Monolithic implementation.
+ 1 = Distributed implementation registers supported. */
+ uint64_t last : 1; /**< [ 4: 4](RO/H) Last. This bit is only set for the last redistributor in a set of contiguous redistributor
+ register pages. Needs to be determined from fuse signals or SKU. */
+ uint64_t dpgs : 1; /**< [ 5: 5](RAZ) GICR()_(S)CTLR[DPG*] bits are NOT supported. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t pn : 16; /**< [ 23: 8](RO/H) The processor number, a unique identifier for the processor understood by the ITS. Should
+ be the logical processor number supported by the redistributor, which is the redistributor
+ ID, ie. the variable a. */
+ uint64_t commonlpiaff : 2; /**< [ 25: 24](RAZ) The affinity level at which re-distributors share a LPI configuration table.
+ 0x0 = All re-distributors must share a config table.
+ 0x1 = All re-distributors with the same Aff3 value must share a LPI configuration table.
+ 0x2 = All re-distributors with the same Aff3.Aff2 value must share a LPI configuration.
+ table.
+ 0x3 = All re-distributors with the same Aff3.Aff2.Aff1 value must share an LPI
+ configuration table. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t a0 : 8; /**< [ 39: 32](RO/H) The affinity level 0 value for the redistributor. */
+ uint64_t a1 : 8; /**< [ 47: 40](RO/H) The affinity level 1 value for the redistributor. */
+ uint64_t a2 : 8; /**< [ 55: 48](RO/H) The affinity level 2 value for the redistributor. */
+ uint64_t a3 : 8; /**< [ 63: 56](RO) The affinity level 3 value for the redistributor. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicrx_typer_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t a3 : 8; /**< [ 63: 56](RO) The affinity level 3 value for the redistributor. */
+ uint64_t a2 : 8; /**< [ 55: 48](RO/H) The affinity level 2 value for the redistributor. */
+ uint64_t a1 : 8; /**< [ 47: 40](RO/H) The affinity level 1 value for the redistributor. */
+ uint64_t a0 : 8; /**< [ 39: 32](RO/H) The affinity level 0 value for the redistributor. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t pn : 16; /**< [ 23: 8](RO/H) The processor number, a unique identifier for the processor understood by the ITS. Should
+ be the logical processor number supported by the redistributor, which is the redistributor
+ ID, ie. the variable a. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t dpgs : 1; /**< [ 5: 5](RAZ) GICR()_(S)CTLR[DPG*] bits are NOT supported. */
+ uint64_t last : 1; /**< [ 4: 4](RO/H) Last. This bit is only set for the last redistributor in a set of contiguous redistributor
+ register pages. Needs to be determined from fuse signals or SKU. */
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed implementation:
+ 0 = Monolithic implementation.
+ 1 = Distributed implementation registers supported. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t plpis : 1; /**< [ 0: 0](RO) Physical LPIs supported:
+ 0 = Physical LPIs not supported.
+ 1 = Physical LPIs supported. */
+#else /* Word 0 - Little Endian */
+ uint64_t plpis : 1; /**< [ 0: 0](RO) Physical LPIs supported:
+ 0 = Physical LPIs not supported.
+ 1 = Physical LPIs supported. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed implementation:
+ 0 = Monolithic implementation.
+ 1 = Distributed implementation registers supported. */
+ uint64_t last : 1; /**< [ 4: 4](RO/H) Last. This bit is only set for the last redistributor in a set of contiguous redistributor
+ register pages. Needs to be determined from fuse signals or SKU. */
+ uint64_t dpgs : 1; /**< [ 5: 5](RAZ) GICR()_(S)CTLR[DPG*] bits are NOT supported. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t pn : 16; /**< [ 23: 8](RO/H) The processor number, a unique identifier for the processor understood by the ITS. Should
+ be the logical processor number supported by the redistributor, which is the redistributor
+ ID, ie. the variable a. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t a0 : 8; /**< [ 39: 32](RO/H) The affinity level 0 value for the redistributor. */
+ uint64_t a1 : 8; /**< [ 47: 40](RO/H) The affinity level 1 value for the redistributor. */
+ uint64_t a2 : 8; /**< [ 55: 48](RO/H) The affinity level 2 value for the redistributor. */
+ uint64_t a3 : 8; /**< [ 63: 56](RO) The affinity level 3 value for the redistributor. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gicrx_typer_s cn9; */
+};
+typedef union bdk_gicrx_typer bdk_gicrx_typer_t;
+
+static inline uint64_t BDK_GICRX_TYPER(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_TYPER(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000008ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000008ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000008ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000008ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_TYPER", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_TYPER(a) bdk_gicrx_typer_t
+#define bustype_BDK_GICRX_TYPER(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GICRX_TYPER(a) "GICRX_TYPER"
+#define device_bar_BDK_GICRX_TYPER(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_TYPER(a) (a)
+#define arguments_BDK_GICRX_TYPER(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) gicr#_waker
+ *
+ * GIC Redistributor Wake Request Control Secure Register
+ */
+union bdk_gicrx_waker
+{
+ uint32_t u;
+ struct bdk_gicrx_waker_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t quiescent : 1; /**< [ 31: 31](SRO) Indicates that redistributor is quiescent and can be powered off. */
+ uint32_t reserved_3_30 : 28;
+ uint32_t ca : 1; /**< [ 2: 2](SRO) Children asleep.
+ When [PS] is one, the redistributor treats the interrupt group enables as zero
+ until a subsequent update to the enables is received. */
+ uint32_t ps : 1; /**< [ 1: 1](SR/W) Processor sleep.
+ 0 = The redistributor never asserts WakeRequest.
+ 1 = The redistributor must assert WakeRequest and hold interrupts as pending if an enable
+ bit is zero for an interrupt group and there is a pending interrupt for that group. */
+ uint32_t sleep : 1; /**< [ 0: 0](SR/W) Sleep.
+ 0 = The parent never asserts WakeRequest.
+ 1 = The parent must assert WakeRequest and hold interrupts as pending. */
+#else /* Word 0 - Little Endian */
+ uint32_t sleep : 1; /**< [ 0: 0](SR/W) Sleep.
+ 0 = The parent never asserts WakeRequest.
+ 1 = The parent must assert WakeRequest and hold interrupts as pending. */
+ uint32_t ps : 1; /**< [ 1: 1](SR/W) Processor sleep.
+ 0 = The redistributor never asserts WakeRequest.
+ 1 = The redistributor must assert WakeRequest and hold interrupts as pending if an enable
+ bit is zero for an interrupt group and there is a pending interrupt for that group. */
+ uint32_t ca : 1; /**< [ 2: 2](SRO) Children asleep.
+ When [PS] is one, the redistributor treats the interrupt group enables as zero
+ until a subsequent update to the enables is received. */
+ uint32_t reserved_3_30 : 28;
+ uint32_t quiescent : 1; /**< [ 31: 31](SRO) Indicates that redistributor is quiescent and can be powered off. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gicrx_waker_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t quiescent : 1; /**< [ 31: 31](SRO/H) Indicates that redistributor is quiescent and can be powered off. */
+ uint32_t reserved_3_30 : 28;
+ uint32_t ca : 1; /**< [ 2: 2](SRO/H) Children asleep.
+ When [PS] is one, the redistributor treats the interrupt group enables as zero
+ until a subsequent update to the enables is received. */
+ uint32_t ps : 1; /**< [ 1: 1](SR/W) Processor sleep.
+ 0 = The redistributor never asserts WakeRequest.
+ 1 = The redistributor must assert WakeRequest and hold interrupts as pending if an enable
+ bit is zero for an interrupt group and there is a pending interrupt for that group. */
+ uint32_t sleep : 1; /**< [ 0: 0](SR/W) Sleep.
+ 0 = The parent never asserts WakeRequest.
+ 1 = The parent must assert WakeRequest and hold interrupts as pending. */
+#else /* Word 0 - Little Endian */
+ uint32_t sleep : 1; /**< [ 0: 0](SR/W) Sleep.
+ 0 = The parent never asserts WakeRequest.
+ 1 = The parent must assert WakeRequest and hold interrupts as pending. */
+ uint32_t ps : 1; /**< [ 1: 1](SR/W) Processor sleep.
+ 0 = The redistributor never asserts WakeRequest.
+ 1 = The redistributor must assert WakeRequest and hold interrupts as pending if an enable
+ bit is zero for an interrupt group and there is a pending interrupt for that group. */
+ uint32_t ca : 1; /**< [ 2: 2](SRO/H) Children asleep.
+ When [PS] is one, the redistributor treats the interrupt group enables as zero
+ until a subsequent update to the enables is received. */
+ uint32_t reserved_3_30 : 28;
+ uint32_t quiescent : 1; /**< [ 31: 31](SRO/H) Indicates that redistributor is quiescent and can be powered off. */
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_gicrx_waker_cn9 cn81xx; */
+ /* struct bdk_gicrx_waker_s cn88xx; */
+ /* struct bdk_gicrx_waker_cn9 cn83xx; */
+};
+typedef union bdk_gicrx_waker bdk_gicrx_waker_t;
+
+static inline uint64_t BDK_GICRX_WAKER(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GICRX_WAKER(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x801080000014ll + 0x20000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=23))
+ return 0x801080000014ll + 0x20000ll * ((a) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=47))
+ return 0x801080000014ll + 0x20000ll * ((a) & 0x3f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=23))
+ return 0x801080000014ll + 0x20000ll * ((a) & 0x1f);
+ __bdk_csr_fatal("GICRX_WAKER", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GICRX_WAKER(a) bdk_gicrx_waker_t
+#define bustype_BDK_GICRX_WAKER(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GICRX_WAKER(a) "GICRX_WAKER"
+#define device_bar_BDK_GICRX_WAKER(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_GICRX_WAKER(a) (a)
+#define arguments_BDK_GICRX_WAKER(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gits_baser#
+ *
+ * GIC ITS Device Table Registers
+ * This set of 64-bit registers specify the base address and size of a number of implementation
+ * defined tables required by the ITS.
+ * An implementation can provide up to eight such registers.
+ * Where a register is not implemented, it is RES0.
+ * Bits [63:32] and bits [31:0] may be accessed independently.
+ */
+union bdk_gits_baserx
+{
+ uint64_t u;
+ struct bdk_gits_baserx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid:
+ 0 = No memory has been allocated to the table and if the type field is nonzero, the ITS
+ discards any writes to the interrupt translation page.
+ 1 = Memory has been allocated to the table by software. */
+ uint64_t indirect : 1; /**< [ 62: 62](RO) Indirect.This field indicates whether an implemented register specifies a single, flat
+ table or a two-level table where the first level
+ contains a list of descriptors. Note: this field is RAZ/WI for implementations that only
+ support flat tables.
+ 0 = Single level. [SIZE] indicates a number of pages used by the ITS to store data
+ associated with each table entry.
+ 1 = Two level. [SIZE] indicates a number of pages which contain an array of 64-bit
+ descriptors to pages that are used
+ to store the data associated with each table entry. Each 64-bit descriptor has the
+ following format:
+ * Bits\<63\> = Valid.
+ * Bits\<62:48\> = Reserved.
+ * Bits\<47:N\> = Physical address.
+ * Bits\<N-1:0\> = Reserved.
+ * Where N is the number of bits required to specify the page size.
+ Note: software must ensure that each pointer in the first level table specifies a unique
+ physical address otherwise the effects are unpredictable.
+ For a two level table, if an entry is invalid:
+ * If the type field specifies a valid table type other than interrupt collections, the
+ ITS
+ discards any writes to the interrupt translation page.
+ * If the type field specifies the interrupt collections table and GITS_TYPER.HCC is
+ zero,
+ the ITS discards any writes to the interrupt translation page. */
+ uint64_t cacheability : 3; /**< [ 61: 59](RO) Cacheability attribute:
+ 0x0 = Noncacheable, nonbufferable.
+ 0x1 = Noncacheable.
+ 0x2 = Read-allocate, writethrough.
+ 0x3 = Read-allocate, writeback.
+ 0x4 = Write-allocate, writethrough.
+ 0x5 = Write-allocate, writeback.
+ 0x6 = Read-allocate, write-allocate, writethrough.
+ 0x7 = Read-allocate, write-allocate, writeback.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t tbl_type : 3; /**< [ 58: 56](RO) This field is read-only and specifies the type of entity that requires entries in the
+ associated table. The field may have the following values:
+ 0x0 = Unimplemented. This register does not correspond to an ITS table and requires no
+ memory.
+ 0x1 = Devices. This register corresponds to a table that scales according to the number of
+ devices serviced by the ITS and requires
+ (Entry-size * number-of-devices) bytes of memory.
+ 0x2 = Virtual processors. This register corresponds to a table that scales according to
+ the number of virtual processors in the system and
+ requires (Entry-size * number-of-processors) bytes ofmemory.
+ 0x3 = Physical processors.
+ 0x4 = Interrupt collections.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ Software must always provision memory for GITS_BASER() registers where this field
+ indicate "devices","interrupt collections" or "physical processors". */
+ uint64_t reserved_12_55 : 44;
+ uint64_t shareability : 2; /**< [ 11: 10](RO) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t pagesize : 2; /**< [ 9: 8](R/W) Page size:
+ 0x0 = 4 KB pages.
+ 0x1 = 16 KB pages (not supported, reserved).
+ 0x2 = 64 KB pages.
+ 0x3 = Reserved. Treated as 64 KB pages. */
+ uint64_t size : 8; /**< [ 7: 0](R/W) Size. The number of pages of memory allocated to the table, minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t size : 8; /**< [ 7: 0](R/W) Size. The number of pages of memory allocated to the table, minus one. */
+ uint64_t pagesize : 2; /**< [ 9: 8](R/W) Page size:
+ 0x0 = 4 KB pages.
+ 0x1 = 16 KB pages (not supported, reserved).
+ 0x2 = 64 KB pages.
+ 0x3 = Reserved. Treated as 64 KB pages. */
+ uint64_t shareability : 2; /**< [ 11: 10](RO) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t reserved_12_55 : 44;
+ uint64_t tbl_type : 3; /**< [ 58: 56](RO) This field is read-only and specifies the type of entity that requires entries in the
+ associated table. The field may have the following values:
+ 0x0 = Unimplemented. This register does not correspond to an ITS table and requires no
+ memory.
+ 0x1 = Devices. This register corresponds to a table that scales according to the number of
+ devices serviced by the ITS and requires
+ (Entry-size * number-of-devices) bytes of memory.
+ 0x2 = Virtual processors. This register corresponds to a table that scales according to
+ the number of virtual processors in the system and
+ requires (Entry-size * number-of-processors) bytes ofmemory.
+ 0x3 = Physical processors.
+ 0x4 = Interrupt collections.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ Software must always provision memory for GITS_BASER() registers where this field
+ indicate "devices","interrupt collections" or "physical processors". */
+ uint64_t cacheability : 3; /**< [ 61: 59](RO) Cacheability attribute:
+ 0x0 = Noncacheable, nonbufferable.
+ 0x1 = Noncacheable.
+ 0x2 = Read-allocate, writethrough.
+ 0x3 = Read-allocate, writeback.
+ 0x4 = Write-allocate, writethrough.
+ 0x5 = Write-allocate, writeback.
+ 0x6 = Read-allocate, write-allocate, writethrough.
+ 0x7 = Read-allocate, write-allocate, writeback.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t indirect : 1; /**< [ 62: 62](RO) Indirect.This field indicates whether an implemented register specifies a single, flat
+ table or a two-level table where the first level
+ contains a list of descriptors. Note: this field is RAZ/WI for implementations that only
+ support flat tables.
+ 0 = Single level. [SIZE] indicates a number of pages used by the ITS to store data
+ associated with each table entry.
+ 1 = Two level. [SIZE] indicates a number of pages which contain an array of 64-bit
+ descriptors to pages that are used
+ to store the data associated with each table entry. Each 64-bit descriptor has the
+ following format:
+ * Bits\<63\> = Valid.
+ * Bits\<62:48\> = Reserved.
+ * Bits\<47:N\> = Physical address.
+ * Bits\<N-1:0\> = Reserved.
+ * Where N is the number of bits required to specify the page size.
+ Note: software must ensure that each pointer in the first level table specifies a unique
+ physical address otherwise the effects are unpredictable.
+ For a two level table, if an entry is invalid:
+ * If the type field specifies a valid table type other than interrupt collections, the
+ ITS
+ discards any writes to the interrupt translation page.
+ * If the type field specifies the interrupt collections table and GITS_TYPER.HCC is
+ zero,
+ the ITS discards any writes to the interrupt translation page. */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid:
+ 0 = No memory has been allocated to the table and if the type field is nonzero, the ITS
+ discards any writes to the interrupt translation page.
+ 1 = Memory has been allocated to the table by software. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gits_baserx_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid:
+ 0 = No memory has been allocated to the table and if the type field is nonzero, the ITS
+ discards any writes to the interrupt translation page.
+ 1 = Memory has been allocated to the table by software. */
+ uint64_t indirect : 1; /**< [ 62: 62](RO) Indirect.This field indicates whether an implemented register specifies a single, flat
+ table or a two-level table where the first level
+ contains a list of descriptors. Note: this field is RAZ/WI for implementations that only
+ support flat tables.
+ 0 = Single level. [SIZE] indicates a number of pages used by the ITS to store data
+ associated with each table entry.
+ 1 = Two level. [SIZE] indicates a number of pages which contain an array of 64-bit
+ descriptors to pages that are used
+ to store the data associated with each table entry. Each 64-bit descriptor has the
+ following format:
+ * Bits\<63\> = Valid.
+ * Bits\<62:48\> = Reserved.
+ * Bits\<47:N\> = Physical address.
+ * Bits\<N-1:0\> = Reserved.
+ * Where N is the number of bits required to specify the page size.
+ Note: software must ensure that each pointer in the first level table specifies a unique
+ physical address otherwise the effects are unpredictable.
+ For a two level table, if an entry is invalid:
+ * If the type field specifies a valid table type other than interrupt collections, the
+ ITS
+ discards any writes to the interrupt translation page.
+ * If the type field specifies the interrupt collections table and GITS_TYPER.HCC is
+ zero,
+ the ITS discards any writes to the interrupt translation page. */
+ uint64_t cacheability : 3; /**< [ 61: 59](RO) Cacheability attribute:
+ 0x0 = Noncacheable, nonbufferable.
+ 0x1 = Noncacheable.
+ 0x2 = Read-allocate, writethrough.
+ 0x3 = Read-allocate, writeback.
+ 0x4 = Write-allocate, writethrough.
+ 0x5 = Write-allocate, writeback.
+ 0x6 = Read-allocate, write-allocate, writethrough.
+ 0x7 = Read-allocate, write-allocate, writeback.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t tbl_type : 3; /**< [ 58: 56](RO) This field is read-only and specifies the type of entity that requires entries in the
+ associated table. The field may have the following values:
+ 0x0 = Unimplemented. This register does not correspond to an ITS table and requires no
+ memory.
+ 0x1 = Devices. This register corresponds to a table that scales according to the number of
+ devices serviced by the ITS and requires
+ (Entry-size * number-of-devices) bytes of memory.
+ 0x2 = Virtual processors. This register corresponds to a table that scales according to
+ the number of virtual processors in the system and
+ requires (Entry-size * number-of-processors) bytes ofmemory.
+ 0x3 = Physical processors.
+ 0x4 = Interrupt collections.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ Software must always provision memory for GITS_BASER() registers where this field
+ indicate "devices","interrupt collections" or "physical processors". */
+ uint64_t entry_size : 8; /**< [ 55: 48](RO) This field is read-only and specifies the number of bytes per entry, minus one. */
+ uint64_t arsvd : 6; /**< [ 47: 42](R/W) Reserved; must be zero. This field will be ignored if not zero. */
+ uint64_t physical_address : 30; /**< [ 41: 12](R/W) Physical address. This field provides bits [41:12] of the base physical address of the
+ table.
+ Bits [11:0] of the base physical address are zero. The address must be aligned to the size
+ specified in the page size field. Otherwise the effect is CONSTRAINED UNPREDICTABLE, and
+ can
+ be one of the following:
+ * Bits X:12 (where X is derived from the page size) are treated as zero.
+ * The value of bits X:12 are used when calculating the address of a table access.
+
+ In CNXXXX where the address must be in DRAM this contains fewer than 48 bits of
+ physical address bits. */
+ uint64_t shareability : 2; /**< [ 11: 10](RO) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t pagesize : 2; /**< [ 9: 8](R/W) Page size:
+ 0x0 = 4 KB pages.
+ 0x1 = 16 KB pages (not supported, reserved).
+ 0x2 = 64 KB pages.
+ 0x3 = Reserved. Treated as 64 KB pages. */
+ uint64_t size : 8; /**< [ 7: 0](R/W) Size. The number of pages of memory allocated to the table, minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t size : 8; /**< [ 7: 0](R/W) Size. The number of pages of memory allocated to the table, minus one. */
+ uint64_t pagesize : 2; /**< [ 9: 8](R/W) Page size:
+ 0x0 = 4 KB pages.
+ 0x1 = 16 KB pages (not supported, reserved).
+ 0x2 = 64 KB pages.
+ 0x3 = Reserved. Treated as 64 KB pages. */
+ uint64_t shareability : 2; /**< [ 11: 10](RO) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t physical_address : 30; /**< [ 41: 12](R/W) Physical address. This field provides bits [41:12] of the base physical address of the
+ table.
+ Bits [11:0] of the base physical address are zero. The address must be aligned to the size
+ specified in the page size field. Otherwise the effect is CONSTRAINED UNPREDICTABLE, and
+ can
+ be one of the following:
+ * Bits X:12 (where X is derived from the page size) are treated as zero.
+ * The value of bits X:12 are used when calculating the address of a table access.
+
+ In CNXXXX where the address must be in DRAM this contains fewer than 48 bits of
+ physical address bits. */
+ uint64_t arsvd : 6; /**< [ 47: 42](R/W) Reserved; must be zero. This field will be ignored if not zero. */
+ uint64_t entry_size : 8; /**< [ 55: 48](RO) This field is read-only and specifies the number of bytes per entry, minus one. */
+ uint64_t tbl_type : 3; /**< [ 58: 56](RO) This field is read-only and specifies the type of entity that requires entries in the
+ associated table. The field may have the following values:
+ 0x0 = Unimplemented. This register does not correspond to an ITS table and requires no
+ memory.
+ 0x1 = Devices. This register corresponds to a table that scales according to the number of
+ devices serviced by the ITS and requires
+ (Entry-size * number-of-devices) bytes of memory.
+ 0x2 = Virtual processors. This register corresponds to a table that scales according to
+ the number of virtual processors in the system and
+ requires (Entry-size * number-of-processors) bytes ofmemory.
+ 0x3 = Physical processors.
+ 0x4 = Interrupt collections.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ Software must always provision memory for GITS_BASER() registers where this field
+ indicate "devices","interrupt collections" or "physical processors". */
+ uint64_t cacheability : 3; /**< [ 61: 59](RO) Cacheability attribute:
+ 0x0 = Noncacheable, nonbufferable.
+ 0x1 = Noncacheable.
+ 0x2 = Read-allocate, writethrough.
+ 0x3 = Read-allocate, writeback.
+ 0x4 = Write-allocate, writethrough.
+ 0x5 = Write-allocate, writeback.
+ 0x6 = Read-allocate, write-allocate, writethrough.
+ 0x7 = Read-allocate, write-allocate, writeback.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t indirect : 1; /**< [ 62: 62](RO) Indirect.This field indicates whether an implemented register specifies a single, flat
+ table or a two-level table where the first level
+ contains a list of descriptors. Note: this field is RAZ/WI for implementations that only
+ support flat tables.
+ 0 = Single level. [SIZE] indicates a number of pages used by the ITS to store data
+ associated with each table entry.
+ 1 = Two level. [SIZE] indicates a number of pages which contain an array of 64-bit
+ descriptors to pages that are used
+ to store the data associated with each table entry. Each 64-bit descriptor has the
+ following format:
+ * Bits\<63\> = Valid.
+ * Bits\<62:48\> = Reserved.
+ * Bits\<47:N\> = Physical address.
+ * Bits\<N-1:0\> = Reserved.
+ * Where N is the number of bits required to specify the page size.
+ Note: software must ensure that each pointer in the first level table specifies a unique
+ physical address otherwise the effects are unpredictable.
+ For a two level table, if an entry is invalid:
+ * If the type field specifies a valid table type other than interrupt collections, the
+ ITS
+ discards any writes to the interrupt translation page.
+ * If the type field specifies the interrupt collections table and GITS_TYPER.HCC is
+ zero,
+ the ITS discards any writes to the interrupt translation page. */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid:
+ 0 = No memory has been allocated to the table and if the type field is nonzero, the ITS
+ discards any writes to the interrupt translation page.
+ 1 = Memory has been allocated to the table by software. */
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_gits_baserx_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid:
+ 0 = No memory has been allocated to the table and if the type field is nonzero, the ITS
+ discards any writes to the interrupt translation page.
+ 1 = Memory has been allocated to the table by software. */
+ uint64_t indirect : 1; /**< [ 62: 62](RO) Indirect.This field indicates whether an implemented register specifies a single, flat
+ table or a two-level table where the first level
+ contains a list of descriptors. Note: this field is RAZ/WI for implementations that only
+ support flat tables.
+ 0 = Single level. [SIZE] indicates a number of pages used by the ITS to store data
+ associated with each table entry.
+ 1 = Two level. [SIZE] indicates a number of pages which contain an array of 64-bit
+ descriptors to pages that are used
+ to store the data associated with each table entry. Each 64-bit descriptor has the
+ following format:
+ * Bits\<63\> = Valid.
+ * Bits\<62:52\> = Reserved.
+ * Bits\<51:N\> = Physical address.
+ * Bits\<N-1:0\> = Reserved.
+ * Where N is the number of bits required to specify the page size.
+ Note: software must ensure that each pointer in the first level table specifies a unique
+ physical address otherwise the effects are unpredictable.
+ For a two level table, if an entry is invalid:
+ * If the type field specifies a valid table type other than interrupt collections, the
+ ITS
+ discards any writes to the interrupt translation page.
+ * If the type field specifies the interrupt collections table and GITS_TYPER.HCC is
+ zero,
+ the ITS discards any writes to the interrupt translation page. */
+ uint64_t cacheability : 3; /**< [ 61: 59](R/W) Cacheability. The cacheability attributes of accesses to the table. If the Type field is
+ zero this field is RAZ/WI.
+ 0x0 = Device-nGnRnE.
+ 0x1 = Normal inner noncacheable.
+ 0x2 = Normal inner cacheable read-allocate, write-through.
+ 0x3 = Normal inner cacheable read-allocate, write-back.
+ 0x4 = Normal inner cacheable write-allocate,write-through.
+ 0x5 = Normal inner cacheable write-allocate,write-back.
+ 0x6 = Normal inner cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal inner cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t tbl_type : 3; /**< [ 58: 56](RO) This field is read-only and specifies the type of entity that requires entries in the
+ associated table. The field may have the following values:
+ 0x0 = Unimplemented. This register does not correspond to an ITS table and requires no
+ memory.
+ 0x1 = Devices. This register corresponds to a table that scales according to the number of
+ devices serviced by the ITS and requires
+ (Entry-size * number-of-devices) bytes of memory.
+ 0x2 = Virtual processors. This register corresponds to a table that scales according to
+ the number of virtual processors in the system and
+ requires (Entry-size * number-of-processors) bytes ofmemory.
+ 0x3 = Physical processors.
+ 0x4 = Interrupt collections.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ Software must always provision memory for GITS_BASER() registers where this field
+ indicate "devices","interrupt collections" or "physical processors". */
+ uint64_t outer_cacheability : 3; /**< [ 55: 53](R/W) Outer cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Memory type defined in bits[61:59]; for normal memory outer cacheability is the same
+ as the inner cacheable.
+ 0x1 = Normal outer noncacheable.
+ 0x2 = Normal outer cacheable read-allocate, write-through.
+ 0x3 = Normal outer cacheable read-allocate, write-back.
+ 0x4 = Normal outer cacheable write-allocate, write-through.
+ 0x5 = Normal outer cacheable write-allocate, write-back.
+ 0x6 = Normal outer cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal outer cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t entry_size : 5; /**< [ 52: 48](RO) This field is read-only and specifies the number of bytes per entry, minus one. */
+ uint64_t physical_address : 36; /**< [ 47: 12](R/W) Physical address.
+ Software must configure this field to point to a valid DRAM base address.
+ When page size is 4 KB or 16 KB:
+ * This field provides bits \<47:12\> of the base physical address of the table.
+ * Bits \<51:48\> and \<11:0\> of the base physical address are zero.
+ * The address must be aligned to the size specified in the page size field.
+ Otherwise the effect is CONSTRAINED UNPREDICTABLE, and
+ can be one of the following:
+ * Bits X:12 (where X is derived from the page size) are treated as zero.
+ * The value of bits X:12 are used when calculating the address of a table access.
+
+ When page size is 64 KB:
+ * This field provides bits \<51:16\> of the base physical address of the table.
+ * Bits \<15:12\> of this field provide bits \<51:48\> of the base physical address.
+ * Bits \<15:0\> of the base physical address are zero.
+
+ In CNXXXX where the address must be in DRAM this contains fewer than 52 bits of
+ physical address bits. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t pagesize : 2; /**< [ 9: 8](R/W) Page size:
+ 0x0 = 4 KB pages.
+ 0x1 = 16 KB pages (not supported, reserved).
+ 0x2 = 64 KB pages.
+ 0x3 = Reserved. Treated as 64 KB pages. */
+ uint64_t size : 8; /**< [ 7: 0](R/W) Size. The number of pages of memory allocated to the table, minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t size : 8; /**< [ 7: 0](R/W) Size. The number of pages of memory allocated to the table, minus one. */
+ uint64_t pagesize : 2; /**< [ 9: 8](R/W) Page size:
+ 0x0 = 4 KB pages.
+ 0x1 = 16 KB pages (not supported, reserved).
+ 0x2 = 64 KB pages.
+ 0x3 = Reserved. Treated as 64 KB pages. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t physical_address : 36; /**< [ 47: 12](R/W) Physical address.
+ Software must configure this field to point to a valid DRAM base address.
+ When page size is 4 KB or 16 KB:
+ * This field provides bits \<47:12\> of the base physical address of the table.
+ * Bits \<51:48\> and \<11:0\> of the base physical address are zero.
+ * The address must be aligned to the size specified in the page size field.
+ Otherwise the effect is CONSTRAINED UNPREDICTABLE, and
+ can be one of the following:
+ * Bits X:12 (where X is derived from the page size) are treated as zero.
+ * The value of bits X:12 are used when calculating the address of a table access.
+
+ When page size is 64 KB:
+ * This field provides bits \<51:16\> of the base physical address of the table.
+ * Bits \<15:12\> of this field provide bits \<51:48\> of the base physical address.
+ * Bits \<15:0\> of the base physical address are zero.
+
+ In CNXXXX where the address must be in DRAM this contains fewer than 52 bits of
+ physical address bits. */
+ uint64_t entry_size : 5; /**< [ 52: 48](RO) This field is read-only and specifies the number of bytes per entry, minus one. */
+ uint64_t outer_cacheability : 3; /**< [ 55: 53](R/W) Outer cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Memory type defined in bits[61:59]; for normal memory outer cacheability is the same
+ as the inner cacheable.
+ 0x1 = Normal outer noncacheable.
+ 0x2 = Normal outer cacheable read-allocate, write-through.
+ 0x3 = Normal outer cacheable read-allocate, write-back.
+ 0x4 = Normal outer cacheable write-allocate, write-through.
+ 0x5 = Normal outer cacheable write-allocate, write-back.
+ 0x6 = Normal outer cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal outer cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t tbl_type : 3; /**< [ 58: 56](RO) This field is read-only and specifies the type of entity that requires entries in the
+ associated table. The field may have the following values:
+ 0x0 = Unimplemented. This register does not correspond to an ITS table and requires no
+ memory.
+ 0x1 = Devices. This register corresponds to a table that scales according to the number of
+ devices serviced by the ITS and requires
+ (Entry-size * number-of-devices) bytes of memory.
+ 0x2 = Virtual processors. This register corresponds to a table that scales according to
+ the number of virtual processors in the system and
+ requires (Entry-size * number-of-processors) bytes ofmemory.
+ 0x3 = Physical processors.
+ 0x4 = Interrupt collections.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ Software must always provision memory for GITS_BASER() registers where this field
+ indicate "devices","interrupt collections" or "physical processors". */
+ uint64_t cacheability : 3; /**< [ 61: 59](R/W) Cacheability. The cacheability attributes of accesses to the table. If the Type field is
+ zero this field is RAZ/WI.
+ 0x0 = Device-nGnRnE.
+ 0x1 = Normal inner noncacheable.
+ 0x2 = Normal inner cacheable read-allocate, write-through.
+ 0x3 = Normal inner cacheable read-allocate, write-back.
+ 0x4 = Normal inner cacheable write-allocate,write-through.
+ 0x5 = Normal inner cacheable write-allocate,write-back.
+ 0x6 = Normal inner cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal inner cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t indirect : 1; /**< [ 62: 62](RO) Indirect.This field indicates whether an implemented register specifies a single, flat
+ table or a two-level table where the first level
+ contains a list of descriptors. Note: this field is RAZ/WI for implementations that only
+ support flat tables.
+ 0 = Single level. [SIZE] indicates a number of pages used by the ITS to store data
+ associated with each table entry.
+ 1 = Two level. [SIZE] indicates a number of pages which contain an array of 64-bit
+ descriptors to pages that are used
+ to store the data associated with each table entry. Each 64-bit descriptor has the
+ following format:
+ * Bits\<63\> = Valid.
+ * Bits\<62:52\> = Reserved.
+ * Bits\<51:N\> = Physical address.
+ * Bits\<N-1:0\> = Reserved.
+ * Where N is the number of bits required to specify the page size.
+ Note: software must ensure that each pointer in the first level table specifies a unique
+ physical address otherwise the effects are unpredictable.
+ For a two level table, if an entry is invalid:
+ * If the type field specifies a valid table type other than interrupt collections, the
+ ITS
+ discards any writes to the interrupt translation page.
+ * If the type field specifies the interrupt collections table and GITS_TYPER.HCC is
+ zero,
+ the ITS discards any writes to the interrupt translation page. */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid:
+ 0 = No memory has been allocated to the table and if the type field is nonzero, the ITS
+ discards any writes to the interrupt translation page.
+ 1 = Memory has been allocated to the table by software. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_gits_baserx_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid:
+ 0 = No memory has been allocated to the table and if the type field is nonzero, the ITS
+ discards any writes to the interrupt translation page.
+ 1 = Memory has been allocated to the table by software. */
+ uint64_t indirect : 1; /**< [ 62: 62](RO) Indirect.This field indicates whether an implemented register specifies a single, flat
+ table or a two-level table where the first level
+ contains a list of descriptors. Note: this field is RAZ/WI for implementations that only
+ support flat tables.
+ 0 = Single level. [SIZE] indicates a number of pages used by the ITS to store data
+ associated with each table entry.
+ 1 = Two level. [SIZE] indicates a number of pages which contain an array of 64-bit
+ descriptors to pages that are used
+ to store the data associated with each table entry. Each 64-bit descriptor has the
+ following format:
+ * Bits\<63\> = Valid.
+ * Bits\<62:48\> = Reserved.
+ * Bits\<47:N\> = Physical address.
+ * Bits\<N-1:0\> = Reserved.
+ * Where N is the number of bits required to specify the page size.
+ Note: software must ensure that each pointer in the first level table specifies a unique
+ physical address otherwise the effects are unpredictable.
+ For a two level table, if an entry is invalid:
+ * If the type field specifies a valid table type other than interrupt collections, the
+ ITS
+ discards any writes to the interrupt translation page.
+ * If the type field specifies the interrupt collections table and GITS_TYPER.HCC is
+ zero,
+ the ITS discards any writes to the interrupt translation page. */
+ uint64_t cacheability : 3; /**< [ 61: 59](R/W) Cacheability. The cacheability attributes of accesses to the table. If the Type field is
+ zero this field is RAZ/WI.
+ 0x0 = Device-nGnRnE.
+ 0x1 = Normal inner noncacheable.
+ 0x2 = Normal inner cacheable read-allocate, write-through.
+ 0x3 = Normal inner cacheable read-allocate, write-back.
+ 0x4 = Normal inner cacheable write-allocate,write-through.
+ 0x5 = Normal inner cacheable write-allocate,write-back.
+ 0x6 = Normal inner cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal inner cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t tbl_type : 3; /**< [ 58: 56](RO) This field is read-only and specifies the type of entity that requires entries in the
+ associated table. The field may have the following values:
+ 0x0 = Unimplemented. This register does not correspond to an ITS table and requires no
+ memory.
+ 0x1 = Devices. This register corresponds to a table that scales according to the number of
+ devices serviced by the ITS and requires
+ (Entry-size * number-of-devices) bytes of memory.
+ 0x2 = Virtual processors. This register corresponds to a table that scales according to
+ the number of virtual processors in the system and
+ requires (Entry-size * number-of-processors) bytes ofmemory.
+ 0x3 = Physical processors.
+ 0x4 = Interrupt collections.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ Software must always provision memory for GITS_BASER() registers where this field
+ indicate "devices","interrupt collections" or "physical processors". */
+ uint64_t outer_cacheability : 3; /**< [ 55: 53](R/W) Outer cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Memory type defined in bits[61:59]; for normal memory outer cacheability is the same
+ as the inner cacheable.
+ 0x1 = Normal outer noncacheable.
+ 0x2 = Normal outer cacheable read-allocate, write-through.
+ 0x3 = Normal outer cacheable read-allocate, write-back.
+ 0x4 = Normal outer cacheable write-allocate, write-through.
+ 0x5 = Normal outer cacheable write-allocate, write-back.
+ 0x6 = Normal outer cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal outer cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t entry_size : 5; /**< [ 52: 48](RO) This field is read-only and specifies the number of bytes per entry, minus one. */
+ uint64_t arsvd : 6; /**< [ 47: 42](R/W) Reserved; must be zero. This field will be ignored if not zero. */
+ uint64_t physical_address : 30; /**< [ 41: 12](R/W) Physical address. This field provides bits [41:12] of the base physical address of the
+ table.
+ Bits [11:0] of the base physical address are zero. The address must be aligned to the size
+ specified in the page size field. Otherwise the effect is CONSTRAINED UNPREDICTABLE, and
+ can
+ be one of the following:
+ * Bits X:12 (where X is derived from the page size) are treated as zero.
+ * The value of bits X:12 are used when calculating the address of a table access.
+
+ In CNXXXX where the address must be in DRAM this contains fewer than 48 bits of
+ physical address bits. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t pagesize : 2; /**< [ 9: 8](R/W) Page size:
+ 0x0 = 4 KB pages.
+ 0x1 = 16 KB pages (not supported, reserved).
+ 0x2 = 64 KB pages.
+ 0x3 = Reserved. Treated as 64 KB pages. */
+ uint64_t size : 8; /**< [ 7: 0](R/W) Size. The number of pages of memory allocated to the table, minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t size : 8; /**< [ 7: 0](R/W) Size. The number of pages of memory allocated to the table, minus one. */
+ uint64_t pagesize : 2; /**< [ 9: 8](R/W) Page size:
+ 0x0 = 4 KB pages.
+ 0x1 = 16 KB pages (not supported, reserved).
+ 0x2 = 64 KB pages.
+ 0x3 = Reserved. Treated as 64 KB pages. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t physical_address : 30; /**< [ 41: 12](R/W) Physical address. This field provides bits [41:12] of the base physical address of the
+ table.
+ Bits [11:0] of the base physical address are zero. The address must be aligned to the size
+ specified in the page size field. Otherwise the effect is CONSTRAINED UNPREDICTABLE, and
+ can
+ be one of the following:
+ * Bits X:12 (where X is derived from the page size) are treated as zero.
+ * The value of bits X:12 are used when calculating the address of a table access.
+
+ In CNXXXX where the address must be in DRAM this contains fewer than 48 bits of
+ physical address bits. */
+ uint64_t arsvd : 6; /**< [ 47: 42](R/W) Reserved; must be zero. This field will be ignored if not zero. */
+ uint64_t entry_size : 5; /**< [ 52: 48](RO) This field is read-only and specifies the number of bytes per entry, minus one. */
+ uint64_t outer_cacheability : 3; /**< [ 55: 53](R/W) Outer cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Memory type defined in bits[61:59]; for normal memory outer cacheability is the same
+ as the inner cacheable.
+ 0x1 = Normal outer noncacheable.
+ 0x2 = Normal outer cacheable read-allocate, write-through.
+ 0x3 = Normal outer cacheable read-allocate, write-back.
+ 0x4 = Normal outer cacheable write-allocate, write-through.
+ 0x5 = Normal outer cacheable write-allocate, write-back.
+ 0x6 = Normal outer cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal outer cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t tbl_type : 3; /**< [ 58: 56](RO) This field is read-only and specifies the type of entity that requires entries in the
+ associated table. The field may have the following values:
+ 0x0 = Unimplemented. This register does not correspond to an ITS table and requires no
+ memory.
+ 0x1 = Devices. This register corresponds to a table that scales according to the number of
+ devices serviced by the ITS and requires
+ (Entry-size * number-of-devices) bytes of memory.
+ 0x2 = Virtual processors. This register corresponds to a table that scales according to
+ the number of virtual processors in the system and
+ requires (Entry-size * number-of-processors) bytes ofmemory.
+ 0x3 = Physical processors.
+ 0x4 = Interrupt collections.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ Software must always provision memory for GITS_BASER() registers where this field
+ indicate "devices","interrupt collections" or "physical processors". */
+ uint64_t cacheability : 3; /**< [ 61: 59](R/W) Cacheability. The cacheability attributes of accesses to the table. If the Type field is
+ zero this field is RAZ/WI.
+ 0x0 = Device-nGnRnE.
+ 0x1 = Normal inner noncacheable.
+ 0x2 = Normal inner cacheable read-allocate, write-through.
+ 0x3 = Normal inner cacheable read-allocate, write-back.
+ 0x4 = Normal inner cacheable write-allocate,write-through.
+ 0x5 = Normal inner cacheable write-allocate,write-back.
+ 0x6 = Normal inner cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal inner cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t indirect : 1; /**< [ 62: 62](RO) Indirect.This field indicates whether an implemented register specifies a single, flat
+ table or a two-level table where the first level
+ contains a list of descriptors. Note: this field is RAZ/WI for implementations that only
+ support flat tables.
+ 0 = Single level. [SIZE] indicates a number of pages used by the ITS to store data
+ associated with each table entry.
+ 1 = Two level. [SIZE] indicates a number of pages which contain an array of 64-bit
+ descriptors to pages that are used
+ to store the data associated with each table entry. Each 64-bit descriptor has the
+ following format:
+ * Bits\<63\> = Valid.
+ * Bits\<62:48\> = Reserved.
+ * Bits\<47:N\> = Physical address.
+ * Bits\<N-1:0\> = Reserved.
+ * Where N is the number of bits required to specify the page size.
+ Note: software must ensure that each pointer in the first level table specifies a unique
+ physical address otherwise the effects are unpredictable.
+ For a two level table, if an entry is invalid:
+ * If the type field specifies a valid table type other than interrupt collections, the
+ ITS
+ discards any writes to the interrupt translation page.
+ * If the type field specifies the interrupt collections table and GITS_TYPER.HCC is
+ zero,
+ the ITS discards any writes to the interrupt translation page. */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid:
+ 0 = No memory has been allocated to the table and if the type field is nonzero, the ITS
+ discards any writes to the interrupt translation page.
+ 1 = Memory has been allocated to the table by software. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gits_baserx_cn81xx cn83xx; */
+ /* struct bdk_gits_baserx_cn81xx cn88xxp2; */
+};
+typedef union bdk_gits_baserx bdk_gits_baserx_t;
+
+static inline uint64_t BDK_GITS_BASERX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_BASERX(unsigned long a)
+{
+ if (a==0)
+ return 0x801000020100ll + 8ll * ((a) & 0x0);
+ __bdk_csr_fatal("GITS_BASERX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GITS_BASERX(a) bdk_gits_baserx_t
+#define bustype_BDK_GITS_BASERX(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GITS_BASERX(a) "GITS_BASERX"
+#define device_bar_BDK_GITS_BASERX(a) 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_BASERX(a) (a)
+#define arguments_BDK_GITS_BASERX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gits_baser#_rowi
+ *
+ * GIC ITS Table Registers
+ * This set of 64-bit registers specify the base address and size of a number of implementation
+ * defined tables required by the ITS:
+ * An implementation can provide up to eight such registers.
+ * Where a register is not implemented, it is RES0.
+ */
+union bdk_gits_baserx_rowi
+{
+ uint64_t u;
+ struct bdk_gits_baserx_rowi_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_baserx_rowi_s cn; */
+};
+typedef union bdk_gits_baserx_rowi bdk_gits_baserx_rowi_t;
+
+static inline uint64_t BDK_GITS_BASERX_ROWI(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_BASERX_ROWI(unsigned long a)
+{
+ if ((a>=1)&&(a<=7))
+ return 0x801000020100ll + 8ll * ((a) & 0x7);
+ __bdk_csr_fatal("GITS_BASERX_ROWI", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GITS_BASERX_ROWI(a) bdk_gits_baserx_rowi_t
+#define bustype_BDK_GITS_BASERX_ROWI(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_GITS_BASERX_ROWI(a) "GITS_BASERX_ROWI"
+#define device_bar_BDK_GITS_BASERX_ROWI(a) 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_BASERX_ROWI(a) (a)
+#define arguments_BDK_GITS_BASERX_ROWI(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) gits_cbaser
+ *
+ * GIC ITS Command Queue Base Register
+ * This register holds the physical memory address of the ITS command queue.
+ * Note: when GITS_CBASER is successfully written, the value of GITS_CREADR is set to zero. See
+ * GIC
+ * spec for details on the ITS initialization sequence. Bits [63:32] and bits [31:0] may be
+ * accessed
+ * independently. When GITS_CTLR[ENABLED] is one or GITS_CTLR[QUIESCENT] is zero, this register is
+ * read-only.
+ */
+union bdk_gits_cbaser
+{
+ uint64_t u;
+ struct bdk_gits_cbaser_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid.
+ When set to one, indicates that memory has been allocated by software for the command
+ queue
+ When set to zero, no memory has been allocated to the command queue and the ITS discards
+ any writes to the interrupt translation page. */
+ uint64_t reserved_62 : 1;
+ uint64_t cacheability : 3; /**< [ 61: 59](RO) Cacheability attribute:
+ 0x0 = Noncacheable, nonbufferable.
+ 0x1 = Noncacheable.
+ 0x2 = Read-allocate, writethrough.
+ 0x3 = Read-allocate, writeback.
+ 0x4 = Write-allocate, writethrough.
+ 0x5 = Write-allocate, writeback.
+ 0x6 = Read-allocate, write-allocate, writethrough.
+ 0x7 = Read-allocate, write-allocate, writeback.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_56_58 : 3;
+ uint64_t outer_cacheability : 3; /**< [ 55: 53](R/W) Outer cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Memory type defined in bits[61:59]; for normal memory outer cacheability is the same
+ as the inner cacheable.
+ 0x1 = Normal outer noncacheable.
+ 0x2 = Normal outer cacheable read-allocate, write-through.
+ 0x3 = Normal outer cacheable read-allocate, write-back.
+ 0x4 = Normal outer cacheable write-allocate, write-through.
+ 0x5 = Normal outer cacheable write-allocate, write-back.
+ 0x6 = Normal outer cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal outer cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_12_52 : 41;
+ uint64_t shareability : 2; /**< [ 11: 10](RO) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t reserved_8_9 : 2;
+ uint64_t size : 8; /**< [ 7: 0](R/W) The number of 4 KB pages of physical memory provided for the command queue, minus one.
+ The command queue is a circular buffer and wraps at physical address \<47:0\> + (4096 *
+ (SIZE+1)). */
+#else /* Word 0 - Little Endian */
+ uint64_t size : 8; /**< [ 7: 0](R/W) The number of 4 KB pages of physical memory provided for the command queue, minus one.
+ The command queue is a circular buffer and wraps at physical address \<47:0\> + (4096 *
+ (SIZE+1)). */
+ uint64_t reserved_8_9 : 2;
+ uint64_t shareability : 2; /**< [ 11: 10](RO) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t reserved_12_52 : 41;
+ uint64_t outer_cacheability : 3; /**< [ 55: 53](R/W) Outer cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Memory type defined in bits[61:59]; for normal memory outer cacheability is the same
+ as the inner cacheable.
+ 0x1 = Normal outer noncacheable.
+ 0x2 = Normal outer cacheable read-allocate, write-through.
+ 0x3 = Normal outer cacheable read-allocate, write-back.
+ 0x4 = Normal outer cacheable write-allocate, write-through.
+ 0x5 = Normal outer cacheable write-allocate, write-back.
+ 0x6 = Normal outer cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal outer cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_56_58 : 3;
+ uint64_t cacheability : 3; /**< [ 61: 59](RO) Cacheability attribute:
+ 0x0 = Noncacheable, nonbufferable.
+ 0x1 = Noncacheable.
+ 0x2 = Read-allocate, writethrough.
+ 0x3 = Read-allocate, writeback.
+ 0x4 = Write-allocate, writethrough.
+ 0x5 = Write-allocate, writeback.
+ 0x6 = Read-allocate, write-allocate, writethrough.
+ 0x7 = Read-allocate, write-allocate, writeback.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_62 : 1;
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid.
+ When set to one, indicates that memory has been allocated by software for the command
+ queue
+ When set to zero, no memory has been allocated to the command queue and the ITS discards
+ any writes to the interrupt translation page. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gits_cbaser_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid.
+ When set to one, indicates that memory has been allocated by software for the command
+ queue
+ When set to zero, no memory has been allocated to the command queue and the ITS discards
+ any writes to the interrupt translation page. */
+ uint64_t reserved_62 : 1;
+ uint64_t cacheability : 3; /**< [ 61: 59](RO) Cacheability attribute:
+ 0x0 = Noncacheable, nonbufferable.
+ 0x1 = Noncacheable.
+ 0x2 = Read-allocate, writethrough.
+ 0x3 = Read-allocate, writeback.
+ 0x4 = Write-allocate, writethrough.
+ 0x5 = Write-allocate, writeback.
+ 0x6 = Read-allocate, write-allocate, writethrough.
+ 0x7 = Read-allocate, write-allocate, writeback.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_48_58 : 11;
+ uint64_t arsvd : 6; /**< [ 47: 42](R/W) Reserved; must be zero. This field will be ignored if not zero. */
+ uint64_t physical_address : 30; /**< [ 41: 12](R/W) Physical address. Provides bits \<47:12\> of the physical address of the memory
+ containing the command queue. Bits \<11:0\> of the base address of the queue are
+ zero. */
+ uint64_t shareability : 2; /**< [ 11: 10](RO) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t reserved_8_9 : 2;
+ uint64_t size : 8; /**< [ 7: 0](R/W) The number of 4 KB pages of physical memory provided for the command queue, minus one.
+ The command queue is a circular buffer and wraps at physical address \<47:0\> + (4096 *
+ (SIZE+1)). */
+#else /* Word 0 - Little Endian */
+ uint64_t size : 8; /**< [ 7: 0](R/W) The number of 4 KB pages of physical memory provided for the command queue, minus one.
+ The command queue is a circular buffer and wraps at physical address \<47:0\> + (4096 *
+ (SIZE+1)). */
+ uint64_t reserved_8_9 : 2;
+ uint64_t shareability : 2; /**< [ 11: 10](RO) Shareability attribute:
+ 0x0 = Accesses are nonshareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ Ignored in CNXXXX. */
+ uint64_t physical_address : 30; /**< [ 41: 12](R/W) Physical address. Provides bits \<47:12\> of the physical address of the memory
+ containing the command queue. Bits \<11:0\> of the base address of the queue are
+ zero. */
+ uint64_t arsvd : 6; /**< [ 47: 42](R/W) Reserved; must be zero. This field will be ignored if not zero. */
+ uint64_t reserved_48_58 : 11;
+ uint64_t cacheability : 3; /**< [ 61: 59](RO) Cacheability attribute:
+ 0x0 = Noncacheable, nonbufferable.
+ 0x1 = Noncacheable.
+ 0x2 = Read-allocate, writethrough.
+ 0x3 = Read-allocate, writeback.
+ 0x4 = Write-allocate, writethrough.
+ 0x5 = Write-allocate, writeback.
+ 0x6 = Read-allocate, write-allocate, writethrough.
+ 0x7 = Read-allocate, write-allocate, writeback.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_62 : 1;
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid.
+ When set to one, indicates that memory has been allocated by software for the command
+ queue
+ When set to zero, no memory has been allocated to the command queue and the ITS discards
+ any writes to the interrupt translation page. */
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_gits_cbaser_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid.
+ When set to one, indicates that memory has been allocated by software for the command
+ queue
+ When set to zero, no memory has been allocated to the command queue and the ITS discards
+ any writes to the interrupt translation page. */
+ uint64_t reserved_62 : 1;
+ uint64_t cacheability : 3; /**< [ 61: 59](R/W) Cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Device-nGnRnE.
+ 0x1 = Normal inner noncacheable.
+ 0x2 = Normal inner cacheable read-allocate, write-through.
+ 0x3 = Normal inner cacheable read-allocate, write-back.
+ 0x4 = Normal inner cacheable write-allocate, write-through.
+ 0x5 = Normal inner cacheable write-allocate, write-back.
+ 0x6 = Normal inner cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal inner cacheable read-allocate, write-allocate, write-back.
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_56_58 : 3;
+ uint64_t outer_cacheability : 3; /**< [ 55: 53](R/W) Outer cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Memory type defined in bits[61:59]; for normal memory outer cacheability is the same
+ as the inner cacheable.
+ 0x1 = Normal outer noncacheable.
+ 0x2 = Normal outer cacheable read-allocate, write-through.
+ 0x3 = Normal outer cacheable read-allocate, write-back.
+ 0x4 = Normal outer cacheable write-allocate, write-through.
+ 0x5 = Normal outer cacheable write-allocate, write-back.
+ 0x6 = Normal outer cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal outer cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_52 : 1;
+ uint64_t physical_address : 40; /**< [ 51: 12](R/W) Physical address. Provides bits \<51:12\> of the physical address of the memory
+ containing the command queue. Bits \<11:0\> of the base address of the queue are
+ zero.
+ Software must configure this field to point to a valid DRAM base address.
+ If bits \<15:12\> are not all zeros, behavior is CONSTRAINED UNPREDICTABLE
+ and the result of the calculation of an address for a command queue read
+ can be corrupted. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attribute. The shareability attributes of accesses to the table.
+ 0x0 = Accesses are non-shareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_8_9 : 2;
+ uint64_t size : 8; /**< [ 7: 0](R/W) The number of 4 KB pages of physical memory provided for the command queue, minus one.
+ The command queue is a circular buffer and wraps at physical address \<47:0\> + (4096 *
+ (SIZE+1)). */
+#else /* Word 0 - Little Endian */
+ uint64_t size : 8; /**< [ 7: 0](R/W) The number of 4 KB pages of physical memory provided for the command queue, minus one.
+ The command queue is a circular buffer and wraps at physical address \<47:0\> + (4096 *
+ (SIZE+1)). */
+ uint64_t reserved_8_9 : 2;
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attribute. The shareability attributes of accesses to the table.
+ 0x0 = Accesses are non-shareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t physical_address : 40; /**< [ 51: 12](R/W) Physical address. Provides bits \<51:12\> of the physical address of the memory
+ containing the command queue. Bits \<11:0\> of the base address of the queue are
+ zero.
+ Software must configure this field to point to a valid DRAM base address.
+ If bits \<15:12\> are not all zeros, behavior is CONSTRAINED UNPREDICTABLE
+ and the result of the calculation of an address for a command queue read
+ can be corrupted. */
+ uint64_t reserved_52 : 1;
+ uint64_t outer_cacheability : 3; /**< [ 55: 53](R/W) Outer cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Memory type defined in bits[61:59]; for normal memory outer cacheability is the same
+ as the inner cacheable.
+ 0x1 = Normal outer noncacheable.
+ 0x2 = Normal outer cacheable read-allocate, write-through.
+ 0x3 = Normal outer cacheable read-allocate, write-back.
+ 0x4 = Normal outer cacheable write-allocate, write-through.
+ 0x5 = Normal outer cacheable write-allocate, write-back.
+ 0x6 = Normal outer cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal outer cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_56_58 : 3;
+ uint64_t cacheability : 3; /**< [ 61: 59](R/W) Cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Device-nGnRnE.
+ 0x1 = Normal inner noncacheable.
+ 0x2 = Normal inner cacheable read-allocate, write-through.
+ 0x3 = Normal inner cacheable read-allocate, write-back.
+ 0x4 = Normal inner cacheable write-allocate, write-through.
+ 0x5 = Normal inner cacheable write-allocate, write-back.
+ 0x6 = Normal inner cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal inner cacheable read-allocate, write-allocate, write-back.
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_62 : 1;
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid.
+ When set to one, indicates that memory has been allocated by software for the command
+ queue
+ When set to zero, no memory has been allocated to the command queue and the ITS discards
+ any writes to the interrupt translation page. */
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_gits_cbaser_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid.
+ When set to one, indicates that memory has been allocated by software for the command
+ queue
+ When set to zero, no memory has been allocated to the command queue and the ITS discards
+ any writes to the interrupt translation page. */
+ uint64_t reserved_62 : 1;
+ uint64_t cacheability : 3; /**< [ 61: 59](R/W) Cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Device-nGnRnE.
+ 0x1 = Normal inner noncacheable.
+ 0x2 = Normal inner cacheable read-allocate, write-through.
+ 0x3 = Normal inner cacheable read-allocate, write-back.
+ 0x4 = Normal inner cacheable write-allocate, write-through.
+ 0x5 = Normal inner cacheable write-allocate, write-back.
+ 0x6 = Normal inner cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal inner cacheable read-allocate, write-allocate, write-back.
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_56_58 : 3;
+ uint64_t outer_cacheability : 3; /**< [ 55: 53](R/W) Outer cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Memory type defined in bits[61:59]; for normal memory outer cacheability is the same
+ as the inner cacheable.
+ 0x1 = Normal outer noncacheable.
+ 0x2 = Normal outer cacheable read-allocate, write-through.
+ 0x3 = Normal outer cacheable read-allocate, write-back.
+ 0x4 = Normal outer cacheable write-allocate, write-through.
+ 0x5 = Normal outer cacheable write-allocate, write-back.
+ 0x6 = Normal outer cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal outer cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_48_52 : 5;
+ uint64_t arsvd : 6; /**< [ 47: 42](R/W) Reserved; must be zero. This field will be ignored if not zero. */
+ uint64_t physical_address : 30; /**< [ 41: 12](R/W) Physical address. Provides bits \<47:12\> of the physical address of the memory
+ containing the command queue. Bits \<11:0\> of the base address of the queue are
+ zero. */
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attribute. The shareability attributes of accesses to the table.
+ 0x0 = Accesses are non-shareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_8_9 : 2;
+ uint64_t size : 8; /**< [ 7: 0](R/W) The number of 4 KB pages of physical memory provided for the command queue, minus one.
+ The command queue is a circular buffer and wraps at physical address \<47:0\> + (4096 *
+ (SIZE+1)). */
+#else /* Word 0 - Little Endian */
+ uint64_t size : 8; /**< [ 7: 0](R/W) The number of 4 KB pages of physical memory provided for the command queue, minus one.
+ The command queue is a circular buffer and wraps at physical address \<47:0\> + (4096 *
+ (SIZE+1)). */
+ uint64_t reserved_8_9 : 2;
+ uint64_t shareability : 2; /**< [ 11: 10](R/W) Shareability attribute. The shareability attributes of accesses to the table.
+ 0x0 = Accesses are non-shareable.
+ 0x1 = Accesses are inner-shareable.
+ 0x2 = Accesses are outer-shareable.
+ 0x3 = Reserved. Treated as 0x0.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t physical_address : 30; /**< [ 41: 12](R/W) Physical address. Provides bits \<47:12\> of the physical address of the memory
+ containing the command queue. Bits \<11:0\> of the base address of the queue are
+ zero. */
+ uint64_t arsvd : 6; /**< [ 47: 42](R/W) Reserved; must be zero. This field will be ignored if not zero. */
+ uint64_t reserved_48_52 : 5;
+ uint64_t outer_cacheability : 3; /**< [ 55: 53](R/W) Outer cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Memory type defined in bits[61:59]; for normal memory outer cacheability is the same
+ as the inner cacheable.
+ 0x1 = Normal outer noncacheable.
+ 0x2 = Normal outer cacheable read-allocate, write-through.
+ 0x3 = Normal outer cacheable read-allocate, write-back.
+ 0x4 = Normal outer cacheable write-allocate, write-through.
+ 0x5 = Normal outer cacheable write-allocate, write-back.
+ 0x6 = Normal outer cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal outer cacheable read-allocate, write-allocate, write-back.
+
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_56_58 : 3;
+ uint64_t cacheability : 3; /**< [ 61: 59](R/W) Cacheability. The cacheability attributes of accesses to the table.
+ 0x0 = Device-nGnRnE.
+ 0x1 = Normal inner noncacheable.
+ 0x2 = Normal inner cacheable read-allocate, write-through.
+ 0x3 = Normal inner cacheable read-allocate, write-back.
+ 0x4 = Normal inner cacheable write-allocate, write-through.
+ 0x5 = Normal inner cacheable write-allocate, write-back.
+ 0x6 = Normal inner cacheable read-allocate, write-allocate, write-through.
+ 0x7 = Normal inner cacheable read-allocate, write-allocate, write-back.
+ In CNXXXX not implemented, ignored. */
+ uint64_t reserved_62 : 1;
+ uint64_t valid : 1; /**< [ 63: 63](R/W) Valid.
+ When set to one, indicates that memory has been allocated by software for the command
+ queue
+ When set to zero, no memory has been allocated to the command queue and the ITS discards
+ any writes to the interrupt translation page. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gits_cbaser_cn81xx cn83xx; */
+ /* struct bdk_gits_cbaser_cn81xx cn88xxp2; */
+};
+typedef union bdk_gits_cbaser bdk_gits_cbaser_t;
+
+#define BDK_GITS_CBASER BDK_GITS_CBASER_FUNC()
+static inline uint64_t BDK_GITS_CBASER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_CBASER_FUNC(void)
+{
+ return 0x801000020080ll;
+}
+
+#define typedef_BDK_GITS_CBASER bdk_gits_cbaser_t
+#define bustype_BDK_GITS_CBASER BDK_CSR_TYPE_NCB
+#define basename_BDK_GITS_CBASER "GITS_CBASER"
+#define device_bar_BDK_GITS_CBASER 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_CBASER 0
+#define arguments_BDK_GITS_CBASER -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_cidr0
+ *
+ * GIC ITS Component Identification Register 0
+ */
+union bdk_gits_cidr0
+{
+ uint32_t u;
+ struct bdk_gits_cidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_cidr0_s cn; */
+};
+typedef union bdk_gits_cidr0 bdk_gits_cidr0_t;
+
+#define BDK_GITS_CIDR0 BDK_GITS_CIDR0_FUNC()
+static inline uint64_t BDK_GITS_CIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_CIDR0_FUNC(void)
+{
+ return 0x80100002fff0ll;
+}
+
+#define typedef_BDK_GITS_CIDR0 bdk_gits_cidr0_t
+#define bustype_BDK_GITS_CIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_CIDR0 "GITS_CIDR0"
+#define device_bar_BDK_GITS_CIDR0 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_CIDR0 0
+#define arguments_BDK_GITS_CIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_cidr1
+ *
+ * GIC ITS Component Identification Register 1
+ */
+union bdk_gits_cidr1
+{
+ uint32_t u;
+ struct bdk_gits_cidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_cidr1_s cn; */
+};
+typedef union bdk_gits_cidr1 bdk_gits_cidr1_t;
+
+#define BDK_GITS_CIDR1 BDK_GITS_CIDR1_FUNC()
+static inline uint64_t BDK_GITS_CIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_CIDR1_FUNC(void)
+{
+ return 0x80100002fff4ll;
+}
+
+#define typedef_BDK_GITS_CIDR1 bdk_gits_cidr1_t
+#define bustype_BDK_GITS_CIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_CIDR1 "GITS_CIDR1"
+#define device_bar_BDK_GITS_CIDR1 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_CIDR1 0
+#define arguments_BDK_GITS_CIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_cidr2
+ *
+ * GIC ITS Component Identification Register 2
+ */
+union bdk_gits_cidr2
+{
+ uint32_t u;
+ struct bdk_gits_cidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_cidr2_s cn; */
+};
+typedef union bdk_gits_cidr2 bdk_gits_cidr2_t;
+
+#define BDK_GITS_CIDR2 BDK_GITS_CIDR2_FUNC()
+static inline uint64_t BDK_GITS_CIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_CIDR2_FUNC(void)
+{
+ return 0x80100002fff8ll;
+}
+
+#define typedef_BDK_GITS_CIDR2 bdk_gits_cidr2_t
+#define bustype_BDK_GITS_CIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_CIDR2 "GITS_CIDR2"
+#define device_bar_BDK_GITS_CIDR2 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_CIDR2 0
+#define arguments_BDK_GITS_CIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_cidr3
+ *
+ * GIC ITS Component Identification Register 3
+ */
+union bdk_gits_cidr3
+{
+ uint32_t u;
+ struct bdk_gits_cidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+#else /* Word 0 - Little Endian */
+ uint32_t preamble : 8; /**< [ 7: 0](RO) Preamble identification value. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_cidr3_s cn; */
+};
+typedef union bdk_gits_cidr3 bdk_gits_cidr3_t;
+
+#define BDK_GITS_CIDR3 BDK_GITS_CIDR3_FUNC()
+static inline uint64_t BDK_GITS_CIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_CIDR3_FUNC(void)
+{
+ return 0x80100002fffcll;
+}
+
+#define typedef_BDK_GITS_CIDR3 bdk_gits_cidr3_t
+#define bustype_BDK_GITS_CIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_CIDR3 "GITS_CIDR3"
+#define device_bar_BDK_GITS_CIDR3 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_CIDR3 0
+#define arguments_BDK_GITS_CIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB) gits_creadr
+ *
+ * GIC ITS Command Queue Read Register
+ * Offset in the ITS command queue from GITS_CBASER where the next command will be read by the
+ * ITS.
+ *
+ * The command queue is considered to be empty when GITS_CWRITER is equal to GITS_CREADR.
+ *
+ * The command queue is considered to be full when GITS_CWRITER is equal to (GITS_CREADR minus
+ * 32), taking wrapping into account.
+ *
+ * Note: when GITS_CBASER is written, the value of GITS_CREADR is set to zero.
+ */
+union bdk_gits_creadr
+{
+ uint64_t u;
+ struct bdk_gits_creadr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t offset : 15; /**< [ 19: 5](RO/H) Offset. Provides bits \<19:5\> of the offset from GITS_CBASER where the ITS will
+ read the next command. Bits \<4:0\> of the offset are zero. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t stalled : 1; /**< [ 0: 0](RAZ) ITS commands are not stalled due to an error. */
+#else /* Word 0 - Little Endian */
+ uint64_t stalled : 1; /**< [ 0: 0](RAZ) ITS commands are not stalled due to an error. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t offset : 15; /**< [ 19: 5](RO/H) Offset. Provides bits \<19:5\> of the offset from GITS_CBASER where the ITS will
+ read the next command. Bits \<4:0\> of the offset are zero. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gits_creadr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t offset : 15; /**< [ 19: 5](RO/H) Offset. Provides bits \<19:5\> of the offset from GITS_CBASER where the ITS will
+ read the next command. Bits \<4:0\> of the offset are zero. */
+ uint64_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_4 : 5;
+ uint64_t offset : 15; /**< [ 19: 5](RO/H) Offset. Provides bits \<19:5\> of the offset from GITS_CBASER where the ITS will
+ read the next command. Bits \<4:0\> of the offset are zero. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gits_creadr_s cn9; */
+};
+typedef union bdk_gits_creadr bdk_gits_creadr_t;
+
+#define BDK_GITS_CREADR BDK_GITS_CREADR_FUNC()
+static inline uint64_t BDK_GITS_CREADR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_CREADR_FUNC(void)
+{
+ return 0x801000020090ll;
+}
+
+#define typedef_BDK_GITS_CREADR bdk_gits_creadr_t
+#define bustype_BDK_GITS_CREADR BDK_CSR_TYPE_NCB
+#define basename_BDK_GITS_CREADR "GITS_CREADR"
+#define device_bar_BDK_GITS_CREADR 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_CREADR 0
+#define arguments_BDK_GITS_CREADR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_ctlr
+ *
+ * GIC ITS Control Register
+ * This register controls the behavior of the interrupt translation service.
+ */
+union bdk_gits_ctlr
+{
+ uint32_t u;
+ struct bdk_gits_ctlr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t quiescent : 1; /**< [ 31: 31](RO/H) This bit indicates whether the ITS has completed all operations following a write of
+ enable to zero.
+ 0 = The ITS is not quiescent.
+ 1 = The ITS is quiescent, has completed all operations required to make any mapping data
+ consistent with external memory and may be powered off. Note: in CCPI
+ implementations,
+ the ITS must also have forwarded any required operations to the redistributors and
+ received confirmation that they have reached the appropriate redistributor. */
+ uint32_t reserved_1_30 : 30;
+ uint32_t enabled : 1; /**< [ 0: 0](R/W) Enabled:
+ 0 = ITS is disabled. Writes to the interrupt translation space will be ignored and no
+ further command queue entries will be processed.
+ 1 = ITS is enabled. Writes to the interrupt translation space will result in interrupt
+ translations and the command queue will be processed.
+
+ If a write to this register changes enabled from one to zero, the ITS must ensure that any
+ caches containing mapping data must be made
+ consistent with external memory and [QUIESCENT] must read as one until this has been
+ completed. */
+#else /* Word 0 - Little Endian */
+ uint32_t enabled : 1; /**< [ 0: 0](R/W) Enabled:
+ 0 = ITS is disabled. Writes to the interrupt translation space will be ignored and no
+ further command queue entries will be processed.
+ 1 = ITS is enabled. Writes to the interrupt translation space will result in interrupt
+ translations and the command queue will be processed.
+
+ If a write to this register changes enabled from one to zero, the ITS must ensure that any
+ caches containing mapping data must be made
+ consistent with external memory and [QUIESCENT] must read as one until this has been
+ completed. */
+ uint32_t reserved_1_30 : 30;
+ uint32_t quiescent : 1; /**< [ 31: 31](RO/H) This bit indicates whether the ITS has completed all operations following a write of
+ enable to zero.
+ 0 = The ITS is not quiescent.
+ 1 = The ITS is quiescent, has completed all operations required to make any mapping data
+ consistent with external memory and may be powered off. Note: in CCPI
+ implementations,
+ the ITS must also have forwarded any required operations to the redistributors and
+ received confirmation that they have reached the appropriate redistributor. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_ctlr_s cn; */
+};
+typedef union bdk_gits_ctlr bdk_gits_ctlr_t;
+
+#define BDK_GITS_CTLR BDK_GITS_CTLR_FUNC()
+static inline uint64_t BDK_GITS_CTLR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_CTLR_FUNC(void)
+{
+ return 0x801000020000ll;
+}
+
+#define typedef_BDK_GITS_CTLR bdk_gits_ctlr_t
+#define bustype_BDK_GITS_CTLR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_CTLR "GITS_CTLR"
+#define device_bar_BDK_GITS_CTLR 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_CTLR 0
+#define arguments_BDK_GITS_CTLR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gits_cwriter
+ *
+ * GIC ITS Command Queue Write Register
+ * Offset in the ITS command queue from GITS_CBASER where the next command will be written by
+ * software.
+ *
+ * The command queue is considered to be empty when GITS_CWRITER is equal to GITS_CREADR.
+ *
+ * The command queue is considered to be full when GITS_CWRITER is equal to (GITS_CREADR minus
+ * 32), taking wrapping into account.
+ *
+ * Each command in the queue comprises 32 bytes. See section 5.13 for details of the commands
+ * supported and the format of each command.
+ */
+union bdk_gits_cwriter
+{
+ uint64_t u;
+ struct bdk_gits_cwriter_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t offset : 15; /**< [ 19: 5](R/W) Offset. Provides bits \<19:5\> of the offset from GITS_CBASER where software will
+ write the next command. Bits \<4:0\> of the offset are zero. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t retry : 1; /**< [ 0: 0](RAZ) Retry of processing of ITS commands not supported. */
+#else /* Word 0 - Little Endian */
+ uint64_t retry : 1; /**< [ 0: 0](RAZ) Retry of processing of ITS commands not supported. */
+ uint64_t reserved_1_4 : 4;
+ uint64_t offset : 15; /**< [ 19: 5](R/W) Offset. Provides bits \<19:5\> of the offset from GITS_CBASER where software will
+ write the next command. Bits \<4:0\> of the offset are zero. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gits_cwriter_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t offset : 15; /**< [ 19: 5](R/W) Offset. Provides bits \<19:5\> of the offset from GITS_CBASER where software will
+ write the next command. Bits \<4:0\> of the offset are zero. */
+ uint64_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_4 : 5;
+ uint64_t offset : 15; /**< [ 19: 5](R/W) Offset. Provides bits \<19:5\> of the offset from GITS_CBASER where software will
+ write the next command. Bits \<4:0\> of the offset are zero. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gits_cwriter_s cn9; */
+};
+typedef union bdk_gits_cwriter bdk_gits_cwriter_t;
+
+#define BDK_GITS_CWRITER BDK_GITS_CWRITER_FUNC()
+static inline uint64_t BDK_GITS_CWRITER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_CWRITER_FUNC(void)
+{
+ return 0x801000020088ll;
+}
+
+#define typedef_BDK_GITS_CWRITER bdk_gits_cwriter_t
+#define bustype_BDK_GITS_CWRITER BDK_CSR_TYPE_NCB
+#define basename_BDK_GITS_CWRITER "GITS_CWRITER"
+#define device_bar_BDK_GITS_CWRITER 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_CWRITER 0
+#define arguments_BDK_GITS_CWRITER -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_iidr
+ *
+ * GIC ITS Implementation Identification Register
+ * This 32-bit register is read-only and specifies the version and features supported by the ITS.
+ */
+union bdk_gits_iidr
+{
+ uint32_t u;
+ struct bdk_gits_iidr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t productid : 8; /**< [ 31: 24](RO) An implementation defined product number for the device.
+ In CNXXXX, enumerated by PCC_PROD_E. */
+ uint32_t reserved_20_23 : 4;
+ uint32_t variant : 4; /**< [ 19: 16](RO) Indicates the major revision or variant of the product.
+ On CNXXXX, this is the major revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t revision : 4; /**< [ 15: 12](RO) Indicates the minor revision of the product.
+ On CNXXXX, this is the minor revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t implementer : 12; /**< [ 11: 0](RO) Indicates the implementer:
+ 0x34C = Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t implementer : 12; /**< [ 11: 0](RO) Indicates the implementer:
+ 0x34C = Cavium. */
+ uint32_t revision : 4; /**< [ 15: 12](RO) Indicates the minor revision of the product.
+ On CNXXXX, this is the minor revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t variant : 4; /**< [ 19: 16](RO) Indicates the major revision or variant of the product.
+ On CNXXXX, this is the major revision. See FUS_FUSE_NUM_E::CHIP_ID(). */
+ uint32_t reserved_20_23 : 4;
+ uint32_t productid : 8; /**< [ 31: 24](RO) An implementation defined product number for the device.
+ In CNXXXX, enumerated by PCC_PROD_E. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_iidr_s cn; */
+};
+typedef union bdk_gits_iidr bdk_gits_iidr_t;
+
+#define BDK_GITS_IIDR BDK_GITS_IIDR_FUNC()
+static inline uint64_t BDK_GITS_IIDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_IIDR_FUNC(void)
+{
+ return 0x801000020004ll;
+}
+
+#define typedef_BDK_GITS_IIDR bdk_gits_iidr_t
+#define bustype_BDK_GITS_IIDR BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_IIDR "GITS_IIDR"
+#define device_bar_BDK_GITS_IIDR 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_IIDR 0
+#define arguments_BDK_GITS_IIDR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gits_imp_cseir
+ *
+ * GIC ITS Implementation Defined Command SEI Register
+ * This register holds the SEI status of the ITS command error.
+ */
+union bdk_gits_imp_cseir
+{
+ uint64_t u;
+ struct bdk_gits_imp_cseir_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t creadr : 15; /**< [ 51: 37](RO/H) The read pointer of the first command with error. */
+ uint64_t reserved_26_36 : 11;
+ uint64_t cwriter_oor : 1; /**< [ 25: 25](RO/H) When set, it means command write pointer is out of range. */
+ uint64_t m : 1; /**< [ 24: 24](RO/H) When set, it means multiple command errors have happened. */
+ uint64_t reserved_17_23 : 7;
+ uint64_t v : 1; /**< [ 16: 16](R/W1C/H) When set, the command error is valid. For meaning/encoding of {7'b0, V, CMD,
+ ERROR}, please see ITS Command error encodings in the GIC specfication. Writing
+ one to this field, will clear the whole register. */
+ uint64_t cmd : 8; /**< [ 15: 8](RO/H) Type field of first ITS command that has the error. */
+ uint64_t error : 8; /**< [ 7: 0](RO/H) Error code for the first error. */
+#else /* Word 0 - Little Endian */
+ uint64_t error : 8; /**< [ 7: 0](RO/H) Error code for the first error. */
+ uint64_t cmd : 8; /**< [ 15: 8](RO/H) Type field of first ITS command that has the error. */
+ uint64_t v : 1; /**< [ 16: 16](R/W1C/H) When set, the command error is valid. For meaning/encoding of {7'b0, V, CMD,
+ ERROR}, please see ITS Command error encodings in the GIC specfication. Writing
+ one to this field, will clear the whole register. */
+ uint64_t reserved_17_23 : 7;
+ uint64_t m : 1; /**< [ 24: 24](RO/H) When set, it means multiple command errors have happened. */
+ uint64_t cwriter_oor : 1; /**< [ 25: 25](RO/H) When set, it means command write pointer is out of range. */
+ uint64_t reserved_26_36 : 11;
+ uint64_t creadr : 15; /**< [ 51: 37](RO/H) The read pointer of the first command with error. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_imp_cseir_s cn; */
+};
+typedef union bdk_gits_imp_cseir bdk_gits_imp_cseir_t;
+
+#define BDK_GITS_IMP_CSEIR BDK_GITS_IMP_CSEIR_FUNC()
+static inline uint64_t BDK_GITS_IMP_CSEIR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_IMP_CSEIR_FUNC(void)
+{
+ return 0x801000020020ll;
+}
+
+#define typedef_BDK_GITS_IMP_CSEIR bdk_gits_imp_cseir_t
+#define bustype_BDK_GITS_IMP_CSEIR BDK_CSR_TYPE_NCB
+#define basename_BDK_GITS_IMP_CSEIR "GITS_IMP_CSEIR"
+#define device_bar_BDK_GITS_IMP_CSEIR 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_IMP_CSEIR 0
+#define arguments_BDK_GITS_IMP_CSEIR -1,-1,-1,-1
+
+/**
+ * Register (NCB) gits_imp_tseir
+ *
+ * GIC ITS Implementation Defined Translator SEI Register
+ * This register holds the SEI status of the ITS translator error.
+ */
+union bdk_gits_imp_tseir
+{
+ uint64_t u;
+ struct bdk_gits_imp_tseir_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t v : 1; /**< [ 63: 63](R/W1C/H) When set, the translator error is valid. Write one to this field will clear [V], [M],
+ [DEV_ID], [INT_ID], and [ERROR]. */
+ uint64_t m : 1; /**< [ 62: 62](RO/H) When set, it means multiple errors have happened. */
+ uint64_t reserved_56_61 : 6;
+ uint64_t dev_id : 24; /**< [ 55: 32](RO/H) Input device ID to the interrupt translator. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t int_id : 20; /**< [ 27: 8](RO/H) Input interrupt ID to the interrupt translator. */
+ uint64_t error : 8; /**< [ 7: 0](RO/H) Error code for the first error. Valid encoding is enumerated by GITS_CMD_ERR_E
+ and one of GITS_CMD_ERR_E::CSEI_UNMAPPED_DEVICE,
+ GITS_CMD_ERR_E::CSEI_DEVICE_OOR, GITS_CMD_ERR_E::CSEI_ID_OOR,
+ GITS_CMD_ERR_E::CSEI_UNMAPPED_INTERRUPT, or
+ GITS_CMD_ERR_E::CSEI_UNMAPPED_COLLECTION. */
+#else /* Word 0 - Little Endian */
+ uint64_t error : 8; /**< [ 7: 0](RO/H) Error code for the first error. Valid encoding is enumerated by GITS_CMD_ERR_E
+ and one of GITS_CMD_ERR_E::CSEI_UNMAPPED_DEVICE,
+ GITS_CMD_ERR_E::CSEI_DEVICE_OOR, GITS_CMD_ERR_E::CSEI_ID_OOR,
+ GITS_CMD_ERR_E::CSEI_UNMAPPED_INTERRUPT, or
+ GITS_CMD_ERR_E::CSEI_UNMAPPED_COLLECTION. */
+ uint64_t int_id : 20; /**< [ 27: 8](RO/H) Input interrupt ID to the interrupt translator. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t dev_id : 24; /**< [ 55: 32](RO/H) Input device ID to the interrupt translator. */
+ uint64_t reserved_56_61 : 6;
+ uint64_t m : 1; /**< [ 62: 62](RO/H) When set, it means multiple errors have happened. */
+ uint64_t v : 1; /**< [ 63: 63](R/W1C/H) When set, the translator error is valid. Write one to this field will clear [V], [M],
+ [DEV_ID], [INT_ID], and [ERROR]. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gits_imp_tseir_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t v : 1; /**< [ 63: 63](R/W1C/H) When set, the translator error is valid. Write one to this field will clear [V], [M],
+ [DEV_ID], [INT_ID], and [ERROR]. */
+ uint64_t m : 1; /**< [ 62: 62](RO/H) When set, it means multiple errors have happened. */
+ uint64_t reserved_53_61 : 9;
+ uint64_t dev_id : 21; /**< [ 52: 32](RO/H) Input device ID to the interrupt translator. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t int_id : 20; /**< [ 27: 8](RO/H) Input interrupt ID to the interrupt translator. */
+ uint64_t error : 8; /**< [ 7: 0](RO/H) Error code for the first error. Valid encoding is enumerated by GITS_CMD_ERR_E
+ and one of GITS_CMD_ERR_E::CSEI_UNMAPPED_DEVICE,
+ GITS_CMD_ERR_E::CSEI_DEVICE_OOR, GITS_CMD_ERR_E::CSEI_ID_OOR,
+ GITS_CMD_ERR_E::CSEI_UNMAPPED_INTERRUPT, or
+ GITS_CMD_ERR_E::CSEI_UNMAPPED_COLLECTION. */
+#else /* Word 0 - Little Endian */
+ uint64_t error : 8; /**< [ 7: 0](RO/H) Error code for the first error. Valid encoding is enumerated by GITS_CMD_ERR_E
+ and one of GITS_CMD_ERR_E::CSEI_UNMAPPED_DEVICE,
+ GITS_CMD_ERR_E::CSEI_DEVICE_OOR, GITS_CMD_ERR_E::CSEI_ID_OOR,
+ GITS_CMD_ERR_E::CSEI_UNMAPPED_INTERRUPT, or
+ GITS_CMD_ERR_E::CSEI_UNMAPPED_COLLECTION. */
+ uint64_t int_id : 20; /**< [ 27: 8](RO/H) Input interrupt ID to the interrupt translator. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t dev_id : 21; /**< [ 52: 32](RO/H) Input device ID to the interrupt translator. */
+ uint64_t reserved_53_61 : 9;
+ uint64_t m : 1; /**< [ 62: 62](RO/H) When set, it means multiple errors have happened. */
+ uint64_t v : 1; /**< [ 63: 63](R/W1C/H) When set, the translator error is valid. Write one to this field will clear [V], [M],
+ [DEV_ID], [INT_ID], and [ERROR]. */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_gits_imp_tseir_s cn9; */
+};
+typedef union bdk_gits_imp_tseir bdk_gits_imp_tseir_t;
+
+#define BDK_GITS_IMP_TSEIR BDK_GITS_IMP_TSEIR_FUNC()
+static inline uint64_t BDK_GITS_IMP_TSEIR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_IMP_TSEIR_FUNC(void)
+{
+ return 0x801000020028ll;
+}
+
+#define typedef_BDK_GITS_IMP_TSEIR bdk_gits_imp_tseir_t
+#define bustype_BDK_GITS_IMP_TSEIR BDK_CSR_TYPE_NCB
+#define basename_BDK_GITS_IMP_TSEIR "GITS_IMP_TSEIR"
+#define device_bar_BDK_GITS_IMP_TSEIR 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_IMP_TSEIR 0
+#define arguments_BDK_GITS_IMP_TSEIR -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_pidr0
+ *
+ * GIC ITS Peripheral Identification Register 0
+ */
+union bdk_gits_pidr0
+{
+ uint32_t u;
+ struct bdk_gits_pidr0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GITS. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum0 : 8; /**< [ 7: 0](RO) Part number \<7:0\>. Indicates PCC_PIDR_PARTNUM0_E::GITS. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_pidr0_s cn; */
+};
+typedef union bdk_gits_pidr0 bdk_gits_pidr0_t;
+
+#define BDK_GITS_PIDR0 BDK_GITS_PIDR0_FUNC()
+static inline uint64_t BDK_GITS_PIDR0_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_PIDR0_FUNC(void)
+{
+ return 0x80100002ffe0ll;
+}
+
+#define typedef_BDK_GITS_PIDR0 bdk_gits_pidr0_t
+#define bustype_BDK_GITS_PIDR0 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_PIDR0 "GITS_PIDR0"
+#define device_bar_BDK_GITS_PIDR0 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_PIDR0 0
+#define arguments_BDK_GITS_PIDR0 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_pidr1
+ *
+ * GIC ITS Peripheral Identification Register 1
+ */
+union bdk_gits_pidr1
+{
+ uint32_t u;
+ struct bdk_gits_pidr1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+#else /* Word 0 - Little Endian */
+ uint32_t partnum1 : 4; /**< [ 3: 0](RO) Part number \<11:8\>. Indicates PCC_PIDR_PARTNUM1_E::COMP. */
+ uint32_t idcode : 4; /**< [ 7: 4](RO) JEP106 identification code \<3:0\>. Cavium code is 0x4C. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_pidr1_s cn; */
+};
+typedef union bdk_gits_pidr1 bdk_gits_pidr1_t;
+
+#define BDK_GITS_PIDR1 BDK_GITS_PIDR1_FUNC()
+static inline uint64_t BDK_GITS_PIDR1_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_PIDR1_FUNC(void)
+{
+ return 0x80100002ffe4ll;
+}
+
+#define typedef_BDK_GITS_PIDR1 bdk_gits_pidr1_t
+#define bustype_BDK_GITS_PIDR1 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_PIDR1 "GITS_PIDR1"
+#define device_bar_BDK_GITS_PIDR1 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_PIDR1 0
+#define arguments_BDK_GITS_PIDR1 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_pidr2
+ *
+ * GIC ITS Peripheral Identification Register 2
+ */
+union bdk_gits_pidr2
+{
+ uint32_t u;
+ struct bdk_gits_pidr2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t archrev : 4; /**< [ 7: 4](RO) Architectural revision:
+ 0x1 = GICv1.
+ 0x2 = GICV2.
+ 0x3 = GICv3.
+ 0x4 = GICv4.
+ 0x5-0xF = Reserved. */
+ uint32_t usesjepcode : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t jepid : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+#else /* Word 0 - Little Endian */
+ uint32_t jepid : 3; /**< [ 2: 0](RO) JEP106 identification code \<6:4\>. Cavium code is 0x4C. */
+ uint32_t usesjepcode : 1; /**< [ 3: 3](RO) JEDEC assigned. */
+ uint32_t archrev : 4; /**< [ 7: 4](RO) Architectural revision:
+ 0x1 = GICv1.
+ 0x2 = GICV2.
+ 0x3 = GICv3.
+ 0x4 = GICv4.
+ 0x5-0xF = Reserved. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_pidr2_s cn; */
+};
+typedef union bdk_gits_pidr2 bdk_gits_pidr2_t;
+
+#define BDK_GITS_PIDR2 BDK_GITS_PIDR2_FUNC()
+static inline uint64_t BDK_GITS_PIDR2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_PIDR2_FUNC(void)
+{
+ return 0x80100002ffe8ll;
+}
+
+#define typedef_BDK_GITS_PIDR2 bdk_gits_pidr2_t
+#define bustype_BDK_GITS_PIDR2 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_PIDR2 "GITS_PIDR2"
+#define device_bar_BDK_GITS_PIDR2 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_PIDR2 0
+#define arguments_BDK_GITS_PIDR2 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_pidr3
+ *
+ * GIC ITS Peripheral Identification Register 3
+ */
+union bdk_gits_pidr3
+{
+ uint32_t u;
+ struct bdk_gits_pidr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t cmod : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+#else /* Word 0 - Little Endian */
+ uint32_t cmod : 4; /**< [ 3: 0](RO) Customer modified. 0x1 = Overall product information should be consulted for
+ product, major and minor pass numbers. */
+ uint32_t revand : 4; /**< [ 7: 4](RO) Manufacturer revision number. For CNXXXX always 0x0. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_pidr3_s cn; */
+};
+typedef union bdk_gits_pidr3 bdk_gits_pidr3_t;
+
+#define BDK_GITS_PIDR3 BDK_GITS_PIDR3_FUNC()
+static inline uint64_t BDK_GITS_PIDR3_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_PIDR3_FUNC(void)
+{
+ return 0x80100002ffecll;
+}
+
+#define typedef_BDK_GITS_PIDR3 bdk_gits_pidr3_t
+#define bustype_BDK_GITS_PIDR3 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_PIDR3 "GITS_PIDR3"
+#define device_bar_BDK_GITS_PIDR3 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_PIDR3 0
+#define arguments_BDK_GITS_PIDR3 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_pidr4
+ *
+ * GIC ITS Peripheral Identification Register 1
+ */
+union bdk_gits_pidr4
+{
+ uint32_t u;
+ struct bdk_gits_pidr4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cnt_4k : 4; /**< [ 7: 4](RO) 4 KB Count. This field is 0x4, indicating this is a 64 KB software-visible page. */
+ uint32_t continuation_code : 4; /**< [ 3: 0](RO) JEP106 continuation code, least significant nibble. Indicates Cavium. */
+#else /* Word 0 - Little Endian */
+ uint32_t continuation_code : 4; /**< [ 3: 0](RO) JEP106 continuation code, least significant nibble. Indicates Cavium. */
+ uint32_t cnt_4k : 4; /**< [ 7: 4](RO) 4 KB Count. This field is 0x4, indicating this is a 64 KB software-visible page. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_pidr4_s cn; */
+};
+typedef union bdk_gits_pidr4 bdk_gits_pidr4_t;
+
+#define BDK_GITS_PIDR4 BDK_GITS_PIDR4_FUNC()
+static inline uint64_t BDK_GITS_PIDR4_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_PIDR4_FUNC(void)
+{
+ return 0x80100002ffd0ll;
+}
+
+#define typedef_BDK_GITS_PIDR4 bdk_gits_pidr4_t
+#define bustype_BDK_GITS_PIDR4 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_PIDR4 "GITS_PIDR4"
+#define device_bar_BDK_GITS_PIDR4 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_PIDR4 0
+#define arguments_BDK_GITS_PIDR4 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_pidr5
+ *
+ * GIC ITS Peripheral Identification Register 5
+ */
+union bdk_gits_pidr5
+{
+ uint32_t u;
+ struct bdk_gits_pidr5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_pidr5_s cn; */
+};
+typedef union bdk_gits_pidr5 bdk_gits_pidr5_t;
+
+#define BDK_GITS_PIDR5 BDK_GITS_PIDR5_FUNC()
+static inline uint64_t BDK_GITS_PIDR5_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_PIDR5_FUNC(void)
+{
+ return 0x80100002ffd4ll;
+}
+
+#define typedef_BDK_GITS_PIDR5 bdk_gits_pidr5_t
+#define bustype_BDK_GITS_PIDR5 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_PIDR5 "GITS_PIDR5"
+#define device_bar_BDK_GITS_PIDR5 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_PIDR5 0
+#define arguments_BDK_GITS_PIDR5 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_pidr6
+ *
+ * GIC ITS Peripheral Identification Register 6
+ */
+union bdk_gits_pidr6
+{
+ uint32_t u;
+ struct bdk_gits_pidr6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_pidr6_s cn; */
+};
+typedef union bdk_gits_pidr6 bdk_gits_pidr6_t;
+
+#define BDK_GITS_PIDR6 BDK_GITS_PIDR6_FUNC()
+static inline uint64_t BDK_GITS_PIDR6_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_PIDR6_FUNC(void)
+{
+ return 0x80100002ffd8ll;
+}
+
+#define typedef_BDK_GITS_PIDR6 bdk_gits_pidr6_t
+#define bustype_BDK_GITS_PIDR6 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_PIDR6 "GITS_PIDR6"
+#define device_bar_BDK_GITS_PIDR6 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_PIDR6 0
+#define arguments_BDK_GITS_PIDR6 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_pidr7
+ *
+ * GIC ITS Peripheral Identification Register 7
+ */
+union bdk_gits_pidr7
+{
+ uint32_t u;
+ struct bdk_gits_pidr7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_pidr7_s cn; */
+};
+typedef union bdk_gits_pidr7 bdk_gits_pidr7_t;
+
+#define BDK_GITS_PIDR7 BDK_GITS_PIDR7_FUNC()
+static inline uint64_t BDK_GITS_PIDR7_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_PIDR7_FUNC(void)
+{
+ return 0x80100002ffdcll;
+}
+
+#define typedef_BDK_GITS_PIDR7 bdk_gits_pidr7_t
+#define bustype_BDK_GITS_PIDR7 BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_PIDR7 "GITS_PIDR7"
+#define device_bar_BDK_GITS_PIDR7 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_PIDR7 0
+#define arguments_BDK_GITS_PIDR7 -1,-1,-1,-1
+
+/**
+ * Register (NCB32b) gits_translater
+ *
+ * GIC ITS Translate Register
+ * This 32-bit register is write-only. The value written to this register specifies an interrupt
+ * identifier to be translated for the requesting device.
+ * A unique device identifier is provided for each requesting device and this is presented to the
+ * ITS on writes to this register. This device identifier
+ * is used to index a device table that maps the incoming device identifier to an interrupt
+ * translation table for that device.
+ *
+ * Note that writes to this register with a device identifier that has not been mapped will be
+ * ignored.
+ *
+ * Note that writes to this register with a device identifier that exceed the supported device
+ * identifier size will be ignored.
+ *
+ * Note that this register is provided to enable the generation (and translation) of message
+ * based interrupts from devices (e.g. MSI or MSI-X writes from PCIe devices).
+ *
+ * The register is at the same offset as GICD_SETSPI_NSR in the distributor and GICR()_SETLPIR in
+ * the redistributor to allow virtualization of guest operating systems
+ * that directly program devices simply by ensuring the address programmed by the guest can be
+ * translated by an SMMU to target GITS_TRANSLATER.
+ */
+union bdk_gits_translater
+{
+ uint32_t u;
+ struct bdk_gits_translater_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t int_id : 32; /**< [ 31: 0](WO/H) Interrupt ID. The ID of interrupt to be translated for the requesting device.
+
+ Note: the number of interrupt identifier bits is defined by
+ GITS_TYPER[IDBITS]. Nonzero identifier bits outside this range are ignored.
+
+ Note: 16-bit access to bits \<15:0\> of this register must be supported. When written by a
+ 16-bit transaction, bits \<31:16\> are written as zero. This register can not be accessed by
+ CPU. */
+#else /* Word 0 - Little Endian */
+ uint32_t int_id : 32; /**< [ 31: 0](WO/H) Interrupt ID. The ID of interrupt to be translated for the requesting device.
+
+ Note: the number of interrupt identifier bits is defined by
+ GITS_TYPER[IDBITS]. Nonzero identifier bits outside this range are ignored.
+
+ Note: 16-bit access to bits \<15:0\> of this register must be supported. When written by a
+ 16-bit transaction, bits \<31:16\> are written as zero. This register can not be accessed by
+ CPU. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gits_translater_s cn; */
+};
+typedef union bdk_gits_translater bdk_gits_translater_t;
+
+#define BDK_GITS_TRANSLATER BDK_GITS_TRANSLATER_FUNC()
+static inline uint64_t BDK_GITS_TRANSLATER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_TRANSLATER_FUNC(void)
+{
+ return 0x801000030040ll;
+}
+
+#define typedef_BDK_GITS_TRANSLATER bdk_gits_translater_t
+#define bustype_BDK_GITS_TRANSLATER BDK_CSR_TYPE_NCB32b
+#define basename_BDK_GITS_TRANSLATER "GITS_TRANSLATER"
+#define device_bar_BDK_GITS_TRANSLATER 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_TRANSLATER 0
+#define arguments_BDK_GITS_TRANSLATER -1,-1,-1,-1
+
+/**
+ * Register (NCB) gits_typer
+ *
+ * GIC ITS Type Register
+ * This register describes features supported by the ITS.
+ */
+union bdk_gits_typer
+{
+ uint64_t u;
+ struct bdk_gits_typer_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_37_63 : 27;
+ uint64_t cil : 1; /**< [ 36: 36](RAZ) 0 = ITS supports 16-bit collection ID, GITS_TYPER[CID_BITS] is RES0.
+ 1 = GITS_TYPER[CID_BITS] indicates supported collection ID size
+ CNXXXX implementations do not support collections in external memory, this bit
+ reads as zero and number of collections supported is reported by GITS_TYPER[HCC]. */
+ uint64_t cid_bits : 4; /**< [ 35: 32](RAZ) Number of collection ID bits. The number of bits of collection ID - 1.
+ When GITS_TYPER.CIL==0, this field is RES0. */
+ uint64_t hcc : 8; /**< [ 31: 24](RO) Hardware collection count. The number of collections supported by the ITS without
+ provisioning of external memory. If this field is nonzero,
+ collections in the range zero to (HCC minus one) are solely maintained in storage within
+ the ITS.
+
+ Internal:
+ Note when this field is nonzero and an ITS is dynamically powered-off and back
+ on,
+ software must ensure that any hardware collections
+ are re-mapped following power-on. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t pta : 1; /**< [ 19: 19](RO) Physical target addresses supported. See section 4.9.16.
+ 0 = Target addresses correspond to linear processor numbers. See section 5.4.6.
+ 1 = Target addresses correspond to the base physical address of re-distributors. */
+ uint64_t seis : 1; /**< [ 18: 18](RO) Locally generated system error interrupts supported. */
+ uint64_t devbits : 5; /**< [ 17: 13](RO) The number of device identifier bits supported, minus one. The 21-its device ID is defined
+ as {node_id[1:0], iob_id[2:0], stream_id[15:0]}. */
+ uint64_t idbits : 5; /**< [ 12: 8](RO) The number of interrupt identifier bits supported, minus one. */
+ uint64_t itte_size : 4; /**< [ 7: 4](RO) ITT entry size. Number of bytes per entry, minus one. The ITT entry size
+ implemented is four bytes (32-bit). */
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed ITS implementation supported. */
+ uint64_t cct : 1; /**< [ 2: 2](RAZ) Memory backed collection is not supported. */
+ uint64_t vlpi : 1; /**< [ 1: 1](RAZ) Reserved. Virtual LPIs and direct injection of Virtual LPIs supported.
+ This field is zero in GICv3 implementations. */
+ uint64_t physical : 1; /**< [ 0: 0](RO) Reserved, one. */
+#else /* Word 0 - Little Endian */
+ uint64_t physical : 1; /**< [ 0: 0](RO) Reserved, one. */
+ uint64_t vlpi : 1; /**< [ 1: 1](RAZ) Reserved. Virtual LPIs and direct injection of Virtual LPIs supported.
+ This field is zero in GICv3 implementations. */
+ uint64_t cct : 1; /**< [ 2: 2](RAZ) Memory backed collection is not supported. */
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed ITS implementation supported. */
+ uint64_t itte_size : 4; /**< [ 7: 4](RO) ITT entry size. Number of bytes per entry, minus one. The ITT entry size
+ implemented is four bytes (32-bit). */
+ uint64_t idbits : 5; /**< [ 12: 8](RO) The number of interrupt identifier bits supported, minus one. */
+ uint64_t devbits : 5; /**< [ 17: 13](RO) The number of device identifier bits supported, minus one. The 21-its device ID is defined
+ as {node_id[1:0], iob_id[2:0], stream_id[15:0]}. */
+ uint64_t seis : 1; /**< [ 18: 18](RO) Locally generated system error interrupts supported. */
+ uint64_t pta : 1; /**< [ 19: 19](RO) Physical target addresses supported. See section 4.9.16.
+ 0 = Target addresses correspond to linear processor numbers. See section 5.4.6.
+ 1 = Target addresses correspond to the base physical address of re-distributors. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t hcc : 8; /**< [ 31: 24](RO) Hardware collection count. The number of collections supported by the ITS without
+ provisioning of external memory. If this field is nonzero,
+ collections in the range zero to (HCC minus one) are solely maintained in storage within
+ the ITS.
+
+ Internal:
+ Note when this field is nonzero and an ITS is dynamically powered-off and back
+ on,
+ software must ensure that any hardware collections
+ are re-mapped following power-on. */
+ uint64_t cid_bits : 4; /**< [ 35: 32](RAZ) Number of collection ID bits. The number of bits of collection ID - 1.
+ When GITS_TYPER.CIL==0, this field is RES0. */
+ uint64_t cil : 1; /**< [ 36: 36](RAZ) 0 = ITS supports 16-bit collection ID, GITS_TYPER[CID_BITS] is RES0.
+ 1 = GITS_TYPER[CID_BITS] indicates supported collection ID size
+ CNXXXX implementations do not support collections in external memory, this bit
+ reads as zero and number of collections supported is reported by GITS_TYPER[HCC]. */
+ uint64_t reserved_37_63 : 27;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gits_typer_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t hcc : 8; /**< [ 31: 24](RO) Hardware collection count. The number of collections supported by the ITS without
+ provisioning of external memory. If this field is nonzero,
+ collections in the range zero to (HCC minus one) are solely maintained in storage within
+ the ITS.
+
+ Internal:
+ Note when this field is nonzero and an ITS is dynamically powered-off and back
+ on,
+ software must ensure that any hardware collections
+ are re-mapped following power-on. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t pta : 1; /**< [ 19: 19](RO) Physical target addresses supported. See section 4.9.16.
+ 0 = Target addresses correspond to linear processor numbers. See section 5.4.6.
+ 1 = Target addresses correspond to the base physical address of re-distributors. */
+ uint64_t seis : 1; /**< [ 18: 18](RO) Locally generated system error interrupts supported. */
+ uint64_t devbits : 5; /**< [ 17: 13](RO) The number of device identifier bits supported, minus one. The 21-its device ID is defined
+ as {node_id[1:0], iob_id[2:0], stream_id[15:0]}. */
+ uint64_t idbits : 5; /**< [ 12: 8](RO) The number of interrupt identifier bits supported, minus one. */
+ uint64_t itte_size : 4; /**< [ 7: 4](RO) ITT entry size. Number of bytes per entry, minus one. The ITT entry size
+ implemented is four bytes (32-bit). */
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed ITS implementation supported. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t physical : 1; /**< [ 0: 0](RO) Reserved, one. */
+#else /* Word 0 - Little Endian */
+ uint64_t physical : 1; /**< [ 0: 0](RO) Reserved, one. */
+ uint64_t reserved_1_2 : 2;
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed ITS implementation supported. */
+ uint64_t itte_size : 4; /**< [ 7: 4](RO) ITT entry size. Number of bytes per entry, minus one. The ITT entry size
+ implemented is four bytes (32-bit). */
+ uint64_t idbits : 5; /**< [ 12: 8](RO) The number of interrupt identifier bits supported, minus one. */
+ uint64_t devbits : 5; /**< [ 17: 13](RO) The number of device identifier bits supported, minus one. The 21-its device ID is defined
+ as {node_id[1:0], iob_id[2:0], stream_id[15:0]}. */
+ uint64_t seis : 1; /**< [ 18: 18](RO) Locally generated system error interrupts supported. */
+ uint64_t pta : 1; /**< [ 19: 19](RO) Physical target addresses supported. See section 4.9.16.
+ 0 = Target addresses correspond to linear processor numbers. See section 5.4.6.
+ 1 = Target addresses correspond to the base physical address of re-distributors. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t hcc : 8; /**< [ 31: 24](RO) Hardware collection count. The number of collections supported by the ITS without
+ provisioning of external memory. If this field is nonzero,
+ collections in the range zero to (HCC minus one) are solely maintained in storage within
+ the ITS.
+
+ Internal:
+ Note when this field is nonzero and an ITS is dynamically powered-off and back
+ on,
+ software must ensure that any hardware collections
+ are re-mapped following power-on. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_gits_typer_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_37_63 : 27;
+ uint64_t cil : 1; /**< [ 36: 36](RAZ) 0 = ITS supports 16-bit collection ID, GITS_TYPER[CID_BITS] is RES0.
+ 1 = GITS_TYPER[CID_BITS] indicates supported collection ID size
+ CNXXXX implementations do not support collections in external memory, this bit
+ reads as zero and number of collections supported is reported by GITS_TYPER[HCC]. */
+ uint64_t cid_bits : 4; /**< [ 35: 32](RAZ) Number of collection ID bits. The number of bits of collection ID - 1.
+ When GITS_TYPER.CIL==0, this field is RES0. */
+ uint64_t hcc : 8; /**< [ 31: 24](RO) Hardware collection count. The number of collections supported by the ITS without
+ provisioning of external memory. If this field is nonzero,
+ collections in the range zero to (HCC minus one) are solely maintained in storage within
+ the ITS.
+ NOTE: Note when this field is nonzero and an ITS is dynamically powered-off and back
+ on, software must ensure that any hardware collections are remapped following power-on.
+ A powered back on event is defined as cold reset is asserted and the deasserted from ITS
+ point of view. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t pta : 1; /**< [ 19: 19](RO) Physical target addresses supported.
+ 0 = Target addresses correspond to linear processor numbers.
+ 1 = Target addresses correspond to the base physical address of re-distributors. */
+ uint64_t seis : 1; /**< [ 18: 18](RO) Locally generated system error interrupts supported. */
+ uint64_t devbits : 5; /**< [ 17: 13](RO) The number of device identifier bits supported, minus one. The 24-bit device ID is defined
+ as {node_id[1:0], iob_id[1:0], ecam_id[3:0], stream_id[15:0]}. */
+ uint64_t idbits : 5; /**< [ 12: 8](RO) The number of interrupt identifier bits supported, minus one. */
+ uint64_t itte_size : 4; /**< [ 7: 4](RO) ITT entry size. Number of bytes per entry, minus one. The ITT entry size
+ implemented is four bytes (32-bit). */
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed ITS implementation supported. */
+ uint64_t cct : 1; /**< [ 2: 2](RAZ) Memory backed collection is not supported. */
+ uint64_t vlpi : 1; /**< [ 1: 1](RAZ) Reserved. Virtual LPIs and direct injection of Virtual LPIs supported.
+ This field is zero in GICv3 implementations. */
+ uint64_t physical : 1; /**< [ 0: 0](RO) Reserved, one. */
+#else /* Word 0 - Little Endian */
+ uint64_t physical : 1; /**< [ 0: 0](RO) Reserved, one. */
+ uint64_t vlpi : 1; /**< [ 1: 1](RAZ) Reserved. Virtual LPIs and direct injection of Virtual LPIs supported.
+ This field is zero in GICv3 implementations. */
+ uint64_t cct : 1; /**< [ 2: 2](RAZ) Memory backed collection is not supported. */
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed ITS implementation supported. */
+ uint64_t itte_size : 4; /**< [ 7: 4](RO) ITT entry size. Number of bytes per entry, minus one. The ITT entry size
+ implemented is four bytes (32-bit). */
+ uint64_t idbits : 5; /**< [ 12: 8](RO) The number of interrupt identifier bits supported, minus one. */
+ uint64_t devbits : 5; /**< [ 17: 13](RO) The number of device identifier bits supported, minus one. The 24-bit device ID is defined
+ as {node_id[1:0], iob_id[1:0], ecam_id[3:0], stream_id[15:0]}. */
+ uint64_t seis : 1; /**< [ 18: 18](RO) Locally generated system error interrupts supported. */
+ uint64_t pta : 1; /**< [ 19: 19](RO) Physical target addresses supported.
+ 0 = Target addresses correspond to linear processor numbers.
+ 1 = Target addresses correspond to the base physical address of re-distributors. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t hcc : 8; /**< [ 31: 24](RO) Hardware collection count. The number of collections supported by the ITS without
+ provisioning of external memory. If this field is nonzero,
+ collections in the range zero to (HCC minus one) are solely maintained in storage within
+ the ITS.
+ NOTE: Note when this field is nonzero and an ITS is dynamically powered-off and back
+ on, software must ensure that any hardware collections are remapped following power-on.
+ A powered back on event is defined as cold reset is asserted and the deasserted from ITS
+ point of view. */
+ uint64_t cid_bits : 4; /**< [ 35: 32](RAZ) Number of collection ID bits. The number of bits of collection ID - 1.
+ When GITS_TYPER.CIL==0, this field is RES0. */
+ uint64_t cil : 1; /**< [ 36: 36](RAZ) 0 = ITS supports 16-bit collection ID, GITS_TYPER[CID_BITS] is RES0.
+ 1 = GITS_TYPER[CID_BITS] indicates supported collection ID size
+ CNXXXX implementations do not support collections in external memory, this bit
+ reads as zero and number of collections supported is reported by GITS_TYPER[HCC]. */
+ uint64_t reserved_37_63 : 27;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_gits_typer_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_37_63 : 27;
+ uint64_t cil : 1; /**< [ 36: 36](RAZ) 0 = ITS supports 16-bit collection ID, GITS_TYPER[CID_BITS] is RES0.
+ 1 = GITS_TYPER[CID_BITS] indicates supported collection ID size
+ CNXXXX implementations do not support collections in external memory, this bit
+ reads as zero and number of collections supported is reported by GITS_TYPER[HCC]. */
+ uint64_t cid_bits : 4; /**< [ 35: 32](RAZ) Number of collection ID bits. The number of bits of collection ID - 1.
+ When GITS_TYPER.CIL==0, this field is RES0. */
+ uint64_t hcc : 8; /**< [ 31: 24](RO) Hardware collection count. The number of collections supported by the ITS without
+ provisioning of external memory. If this field is nonzero,
+ collections in the range zero to (HCC minus one) are solely maintained in storage within
+ the ITS.
+ NOTE: Note when this field is nonzero and an ITS is dynamically powered-off and back
+ on, software must ensure that any hardware collections are remapped following power-on.
+ A powered back on event is defined as cold reset is asserted and the deasserted from ITS
+ point of view. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t pta : 1; /**< [ 19: 19](RO) Physical target addresses supported.
+ 0 = Target addresses correspond to linear processor numbers.
+ 1 = Target addresses correspond to the base physical address of re-distributors. */
+ uint64_t seis : 1; /**< [ 18: 18](RO) Locally generated system error interrupts supported. */
+ uint64_t devbits : 5; /**< [ 17: 13](RO) The number of device identifier bits supported, minus one. The 21-bit device ID is defined
+ as {node_id[1:0], iob_id[2:0], stream_id[15:0]}. */
+ uint64_t idbits : 5; /**< [ 12: 8](RO) The number of interrupt identifier bits supported, minus one. */
+ uint64_t itte_size : 4; /**< [ 7: 4](RO) ITT entry size. Number of bytes per entry, minus one. The ITT entry size
+ implemented is four bytes (32-bit). */
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed ITS implementation supported. */
+ uint64_t reserved_2 : 1;
+ uint64_t vlpi : 1; /**< [ 1: 1](RAZ) Reserved. Virtual LPIs and direct injection of Virtual LPIs supported.
+ This field is zero in GICv3 implementations. */
+ uint64_t physical : 1; /**< [ 0: 0](RO) Reserved, one. */
+#else /* Word 0 - Little Endian */
+ uint64_t physical : 1; /**< [ 0: 0](RO) Reserved, one. */
+ uint64_t vlpi : 1; /**< [ 1: 1](RAZ) Reserved. Virtual LPIs and direct injection of Virtual LPIs supported.
+ This field is zero in GICv3 implementations. */
+ uint64_t reserved_2 : 1;
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed ITS implementation supported. */
+ uint64_t itte_size : 4; /**< [ 7: 4](RO) ITT entry size. Number of bytes per entry, minus one. The ITT entry size
+ implemented is four bytes (32-bit). */
+ uint64_t idbits : 5; /**< [ 12: 8](RO) The number of interrupt identifier bits supported, minus one. */
+ uint64_t devbits : 5; /**< [ 17: 13](RO) The number of device identifier bits supported, minus one. The 21-bit device ID is defined
+ as {node_id[1:0], iob_id[2:0], stream_id[15:0]}. */
+ uint64_t seis : 1; /**< [ 18: 18](RO) Locally generated system error interrupts supported. */
+ uint64_t pta : 1; /**< [ 19: 19](RO) Physical target addresses supported.
+ 0 = Target addresses correspond to linear processor numbers.
+ 1 = Target addresses correspond to the base physical address of re-distributors. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t hcc : 8; /**< [ 31: 24](RO) Hardware collection count. The number of collections supported by the ITS without
+ provisioning of external memory. If this field is nonzero,
+ collections in the range zero to (HCC minus one) are solely maintained in storage within
+ the ITS.
+ NOTE: Note when this field is nonzero and an ITS is dynamically powered-off and back
+ on, software must ensure that any hardware collections are remapped following power-on.
+ A powered back on event is defined as cold reset is asserted and the deasserted from ITS
+ point of view. */
+ uint64_t cid_bits : 4; /**< [ 35: 32](RAZ) Number of collection ID bits. The number of bits of collection ID - 1.
+ When GITS_TYPER.CIL==0, this field is RES0. */
+ uint64_t cil : 1; /**< [ 36: 36](RAZ) 0 = ITS supports 16-bit collection ID, GITS_TYPER[CID_BITS] is RES0.
+ 1 = GITS_TYPER[CID_BITS] indicates supported collection ID size
+ CNXXXX implementations do not support collections in external memory, this bit
+ reads as zero and number of collections supported is reported by GITS_TYPER[HCC]. */
+ uint64_t reserved_37_63 : 27;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gits_typer_cn81xx cn83xx; */
+ struct bdk_gits_typer_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_37_63 : 27;
+ uint64_t cil : 1; /**< [ 36: 36](RAZ) 0 = ITS supports 16-bit Collection ID, GITS_TYPER[CIDbits] is RES0.
+ 1 = GITS_TYPER[CIDBITS] indicates supported collection ID size
+ CNXXXX implementations do not support collections in external memory, this bit is
+ RAZ and number of Collections supported is reported by GITS_TYPER[HCC]. */
+ uint64_t cid_bits : 4; /**< [ 35: 32](RAZ) Number of collection ID bits. The number of bits of collection ID - 1.
+ When GITS_TYPER.CIL==0, this field is RES0. */
+ uint64_t hcc : 8; /**< [ 31: 24](RO) Hardware collection count. The number of collections supported by the ITS without
+ provisioning of external memory. If this field is nonzero,
+ collections in the range zero to (HCC minus one) are solely maintained in storage within
+ the ITS.
+
+ NOTE: Note when this field is nonzero and an ITS is dynamically powered-off and back
+ on, software must ensure that any hardware collections are remapped following power-on.
+ A powered back on event is defined as cold reset is asserted and the deasserted from ITS
+ point of view. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t pta : 1; /**< [ 19: 19](RO) Physical target addresses supported.
+ 0 = Target addresses correspond to linear processor numbers.
+ 1 = Target addresses correspond to the base physical address of re-distributors. */
+ uint64_t seis : 1; /**< [ 18: 18](RO) Locally generated system error interrupts supported. */
+ uint64_t devbits : 5; /**< [ 17: 13](RO) The number of device identifier bits supported, minus one. The 21-bit device ID is defined
+ as {node_id[1:0], iob_id[2:0], stream_id[15:0]}. */
+ uint64_t idbits : 5; /**< [ 12: 8](RO) The number of interrupt identifier bits supported, minus one. */
+ uint64_t itte_size : 4; /**< [ 7: 4](RO) ITT entry size. Number of bytes per entry, minus one. The ITT entry size
+ implemented is four bytes (32-bit). */
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed ITS implementation supported. */
+ uint64_t reserved_2 : 1;
+ uint64_t vlpi : 1; /**< [ 1: 1](RAZ) Reserved. Virtual LPIs and Direct injection of Virtual LPIs supported. This field is
+ clear in GICv3 implementations. */
+ uint64_t physical : 1; /**< [ 0: 0](RO) Reserved, one. */
+#else /* Word 0 - Little Endian */
+ uint64_t physical : 1; /**< [ 0: 0](RO) Reserved, one. */
+ uint64_t vlpi : 1; /**< [ 1: 1](RAZ) Reserved. Virtual LPIs and Direct injection of Virtual LPIs supported. This field is
+ clear in GICv3 implementations. */
+ uint64_t reserved_2 : 1;
+ uint64_t distributed : 1; /**< [ 3: 3](RO) Distributed ITS implementation supported. */
+ uint64_t itte_size : 4; /**< [ 7: 4](RO) ITT entry size. Number of bytes per entry, minus one. The ITT entry size
+ implemented is four bytes (32-bit). */
+ uint64_t idbits : 5; /**< [ 12: 8](RO) The number of interrupt identifier bits supported, minus one. */
+ uint64_t devbits : 5; /**< [ 17: 13](RO) The number of device identifier bits supported, minus one. The 21-bit device ID is defined
+ as {node_id[1:0], iob_id[2:0], stream_id[15:0]}. */
+ uint64_t seis : 1; /**< [ 18: 18](RO) Locally generated system error interrupts supported. */
+ uint64_t pta : 1; /**< [ 19: 19](RO) Physical target addresses supported.
+ 0 = Target addresses correspond to linear processor numbers.
+ 1 = Target addresses correspond to the base physical address of re-distributors. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t hcc : 8; /**< [ 31: 24](RO) Hardware collection count. The number of collections supported by the ITS without
+ provisioning of external memory. If this field is nonzero,
+ collections in the range zero to (HCC minus one) are solely maintained in storage within
+ the ITS.
+
+ NOTE: Note when this field is nonzero and an ITS is dynamically powered-off and back
+ on, software must ensure that any hardware collections are remapped following power-on.
+ A powered back on event is defined as cold reset is asserted and the deasserted from ITS
+ point of view. */
+ uint64_t cid_bits : 4; /**< [ 35: 32](RAZ) Number of collection ID bits. The number of bits of collection ID - 1.
+ When GITS_TYPER.CIL==0, this field is RES0. */
+ uint64_t cil : 1; /**< [ 36: 36](RAZ) 0 = ITS supports 16-bit Collection ID, GITS_TYPER[CIDbits] is RES0.
+ 1 = GITS_TYPER[CIDBITS] indicates supported collection ID size
+ CNXXXX implementations do not support collections in external memory, this bit is
+ RAZ and number of Collections supported is reported by GITS_TYPER[HCC]. */
+ uint64_t reserved_37_63 : 27;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_gits_typer bdk_gits_typer_t;
+
+#define BDK_GITS_TYPER BDK_GITS_TYPER_FUNC()
+static inline uint64_t BDK_GITS_TYPER_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GITS_TYPER_FUNC(void)
+{
+ return 0x801000020008ll;
+}
+
+#define typedef_BDK_GITS_TYPER bdk_gits_typer_t
+#define bustype_BDK_GITS_TYPER BDK_CSR_TYPE_NCB
+#define basename_BDK_GITS_TYPER "GITS_TYPER"
+#define device_bar_BDK_GITS_TYPER 0x2 /* PF_BAR2 */
+#define busnum_BDK_GITS_TYPER 0
+#define arguments_BDK_GITS_TYPER -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_GIC_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gser.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gser.h
new file mode 100644
index 0000000000..3e887fac7e
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gser.h
@@ -0,0 +1,12340 @@
+#ifndef __BDK_CSRS_GSER_H__
+#define __BDK_CSRS_GSER_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium GSER.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration gser_bar_e
+ *
+ * GSER Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_GSER_BAR_E_GSERX_PF_BAR0(a) (0x87e090000000ll + 0x1000000ll * (a))
+#define BDK_GSER_BAR_E_GSERX_PF_BAR0_SIZE 0x800000ull
+
+/**
+ * Enumeration gser_lmode_e
+ *
+ * GSER Lane Mode Enumeration
+ * Enumerates the SerDes lane modes. See GSER()_LANE_MODE[LMODE].
+ */
+#define BDK_GSER_LMODE_E_R_103125G_REFCLK15625_KR (5)
+#define BDK_GSER_LMODE_E_R_125G_REFCLK15625_KX (3)
+#define BDK_GSER_LMODE_E_R_125G_REFCLK15625_SGMII (6)
+#define BDK_GSER_LMODE_E_R_25G_REFCLK100 (0)
+#define BDK_GSER_LMODE_E_R_25G_REFCLK125 (9)
+#define BDK_GSER_LMODE_E_R_3125G_REFCLK15625_XAUI (4)
+#define BDK_GSER_LMODE_E_R_5G_REFCLK100 (1)
+#define BDK_GSER_LMODE_E_R_5G_REFCLK125 (0xa)
+#define BDK_GSER_LMODE_E_R_5G_REFCLK15625_QSGMII (7)
+#define BDK_GSER_LMODE_E_R_625G_REFCLK15625_RXAUI (8)
+#define BDK_GSER_LMODE_E_R_8G_REFCLK100 (2)
+#define BDK_GSER_LMODE_E_R_8G_REFCLK125 (0xb)
+
+/**
+ * Enumeration gser_qlm_e
+ *
+ * GSER QLM/CCPI Enumeration
+ * Enumerates the GSER to QLM.
+ */
+#define BDK_GSER_QLM_E_GSER0 (0)
+#define BDK_GSER_QLM_E_GSER1 (1)
+#define BDK_GSER_QLM_E_GSER10 (0xa)
+#define BDK_GSER_QLM_E_GSER11 (0xb)
+#define BDK_GSER_QLM_E_GSER12 (0xc)
+#define BDK_GSER_QLM_E_GSER13 (0xd)
+#define BDK_GSER_QLM_E_GSER2 (2)
+#define BDK_GSER_QLM_E_GSER3 (3)
+#define BDK_GSER_QLM_E_GSER4 (4)
+#define BDK_GSER_QLM_E_GSER5 (5)
+#define BDK_GSER_QLM_E_GSER6 (6)
+#define BDK_GSER_QLM_E_GSER7 (7)
+#define BDK_GSER_QLM_E_GSER8 (8)
+#define BDK_GSER_QLM_E_GSER9 (9)
+
+/**
+ * Register (RSL) gser#_ana_atest
+ *
+ * GSER Analog Test Register
+ */
+union bdk_gserx_ana_atest
+{
+ uint64_t u;
+ struct bdk_gserx_ana_atest_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ana_dac_b : 7; /**< [ 11: 5](R/W) Controls the B-side DAC input to the analog test block. Note that the GSER2
+ register
+ is tied to the analog test block. The other GSER()_ANA_ATEST registers are
+ unused. For diagnostic use only. */
+ uint64_t ana_dac_a : 5; /**< [ 4: 0](R/W) Controls the A-side DAC input to the analog test block. Note that the GSER2 register is
+ tied to the analog test block. The other GSER()_ANA_ATEST registers are unused.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t ana_dac_a : 5; /**< [ 4: 0](R/W) Controls the A-side DAC input to the analog test block. Note that the GSER2 register is
+ tied to the analog test block. The other GSER()_ANA_ATEST registers are unused.
+ For diagnostic use only. */
+ uint64_t ana_dac_b : 7; /**< [ 11: 5](R/W) Controls the B-side DAC input to the analog test block. Note that the GSER2
+ register
+ is tied to the analog test block. The other GSER()_ANA_ATEST registers are
+ unused. For diagnostic use only. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_ana_atest_s cn81xx; */
+ struct bdk_gserx_ana_atest_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ana_dac_b : 7; /**< [ 11: 5](R/W) Controls the B-side DAC input to the analog test block. Note that the QLM4 register
+ is tied to the analog test block, for non-CCPI links. Note that the CCPI4 register is tied
+ to the analog test block, for CCPI links. The other GSER()_ANA_ATEST registers are
+ unused. For diagnostic use only. */
+ uint64_t ana_dac_a : 5; /**< [ 4: 0](R/W) Controls the A-side DAC input to the analog test block. Note that the QLM4 register is
+ tied to the analog test block, for non-CCPI links. Note that the CCPI4 register is tied to
+ the analog test block, for CCPI links. The other GSER()_ANA_ATEST registers are unused.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t ana_dac_a : 5; /**< [ 4: 0](R/W) Controls the A-side DAC input to the analog test block. Note that the QLM4 register is
+ tied to the analog test block, for non-CCPI links. Note that the CCPI4 register is tied to
+ the analog test block, for CCPI links. The other GSER()_ANA_ATEST registers are unused.
+ For diagnostic use only. */
+ uint64_t ana_dac_b : 7; /**< [ 11: 5](R/W) Controls the B-side DAC input to the analog test block. Note that the QLM4 register
+ is tied to the analog test block, for non-CCPI links. Note that the CCPI4 register is tied
+ to the analog test block, for CCPI links. The other GSER()_ANA_ATEST registers are
+ unused. For diagnostic use only. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_ana_atest_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t ana_dac_b : 7; /**< [ 11: 5](R/W) Controls the B-side DAC input to the analog test block. Note that only
+ the GSER(4)_ANA_TEST[ANA_DAC_B] is tied to the analog test block.
+ The other GSER(0..3,5..6)_ANA_ATEST[ANA_DAC_B] are unused.
+ For diagnostic use only. */
+ uint64_t ana_dac_a : 5; /**< [ 4: 0](R/W) Controls A-side DAC input to the analog test block. Note that only
+ the GSER(4)_ANA_TEST[ANA_DAC_A] is tied to the analog test bloc.
+ The other GSER(0..3,5..6)_ANA_ATEST[ANA_DAC_A] are unused.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t ana_dac_a : 5; /**< [ 4: 0](R/W) Controls A-side DAC input to the analog test block. Note that only
+ the GSER(4)_ANA_TEST[ANA_DAC_A] is tied to the analog test bloc.
+ The other GSER(0..3,5..6)_ANA_ATEST[ANA_DAC_A] are unused.
+ For diagnostic use only. */
+ uint64_t ana_dac_b : 7; /**< [ 11: 5](R/W) Controls the B-side DAC input to the analog test block. Note that only
+ the GSER(4)_ANA_TEST[ANA_DAC_B] is tied to the analog test block.
+ The other GSER(0..3,5..6)_ANA_ATEST[ANA_DAC_B] are unused.
+ For diagnostic use only. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_ana_atest bdk_gserx_ana_atest_t;
+
+static inline uint64_t BDK_GSERX_ANA_ATEST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_ANA_ATEST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000800ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000800ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000800ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_ANA_ATEST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_ANA_ATEST(a) bdk_gserx_ana_atest_t
+#define bustype_BDK_GSERX_ANA_ATEST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_ANA_ATEST(a) "GSERX_ANA_ATEST"
+#define device_bar_BDK_GSERX_ANA_ATEST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_ANA_ATEST(a) (a)
+#define arguments_BDK_GSERX_ANA_ATEST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_ana_sel
+ *
+ * GSER Analog Select Register
+ */
+union bdk_gserx_ana_sel
+{
+ uint64_t u;
+ struct bdk_gserx_ana_sel_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t ana_sel : 9; /**< [ 8: 0](R/W) Controls the adr_global input to the analog test block. Note that the
+ GSER(2)_ANA_SEL.ANA_SEL register is tied to the analog test block.
+ The other GSER()_ANA_SEL registers are unused.
+ For diagnostic use only.
+
+ Used to power down the common clock input receiver to reduce power
+ consumption if the common clock input is not used.
+ If the common clock DLMC_REFCLK1_P/N input is unused program the GSER(2)_ANA_SEL.ANA_SEL
+ field to 0x1fd.
+ If the common clock DLMC_REFCLK0_P/N input is unused program the GSER(2)_ANA_SEL.ANA_SEL
+ field to 0x1fe.
+ If both common clock DLMC_REFCLK0_P/N and DLMC_REFCLK1_P/N inputs are unused program the
+ GSER(2)_ANA_SEL.ANA_SEL field to 0x1fc. */
+#else /* Word 0 - Little Endian */
+ uint64_t ana_sel : 9; /**< [ 8: 0](R/W) Controls the adr_global input to the analog test block. Note that the
+ GSER(2)_ANA_SEL.ANA_SEL register is tied to the analog test block.
+ The other GSER()_ANA_SEL registers are unused.
+ For diagnostic use only.
+
+ Used to power down the common clock input receiver to reduce power
+ consumption if the common clock input is not used.
+ If the common clock DLMC_REFCLK1_P/N input is unused program the GSER(2)_ANA_SEL.ANA_SEL
+ field to 0x1fd.
+ If the common clock DLMC_REFCLK0_P/N input is unused program the GSER(2)_ANA_SEL.ANA_SEL
+ field to 0x1fe.
+ If both common clock DLMC_REFCLK0_P/N and DLMC_REFCLK1_P/N inputs are unused program the
+ GSER(2)_ANA_SEL.ANA_SEL field to 0x1fc. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_ana_sel_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t ana_sel : 9; /**< [ 8: 0](R/W) Controls the adr_global input to the analog test block. Note that the QLM0 register
+ is tied to the analog test block, for non-CCPI links. Note that the QLM8 register is tied
+ to the analog test block, for CCPI links. The other GSER()_ANA_SEL registers are unused.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t ana_sel : 9; /**< [ 8: 0](R/W) Controls the adr_global input to the analog test block. Note that the QLM0 register
+ is tied to the analog test block, for non-CCPI links. Note that the QLM8 register is tied
+ to the analog test block, for CCPI links. The other GSER()_ANA_SEL registers are unused.
+ For diagnostic use only. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_gserx_ana_sel_s cn81xx; */
+ struct bdk_gserx_ana_sel_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t ana_sel : 9; /**< [ 8: 0](R/W) Controls the adr_global input to the analog test block. Note that only
+ the GSER(4)_ANA_SEL.ANA_SEL register is tied to the analog test block.
+ The GSER(0..3,5..6)_ANA_SEL.ANA_SEL registers are unused.
+
+ Used to power down the common clock input receiver to reduce power consumption
+ if the common clock input is not used.
+ If the common clock QLMC_REFCLK1_P/N input is unused program the
+ GSER(4)_ANA_SEL.ANA_SEL field to 0x1FD.
+ If the common clock QLMC_REFCLK0_P/N input is unused program the
+ GSER(4)_ANA_SEL.ANA_SEL field to 0x1FE.
+ If both common clock QLMC_REFCLK0_P/N and QLMC_REFCLK1_P/N inputs are unused program the
+ GSER(4)_ANA_SEL[ANA_SEL] field to 0x1FC.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t ana_sel : 9; /**< [ 8: 0](R/W) Controls the adr_global input to the analog test block. Note that only
+ the GSER(4)_ANA_SEL.ANA_SEL register is tied to the analog test block.
+ The GSER(0..3,5..6)_ANA_SEL.ANA_SEL registers are unused.
+
+ Used to power down the common clock input receiver to reduce power consumption
+ if the common clock input is not used.
+ If the common clock QLMC_REFCLK1_P/N input is unused program the
+ GSER(4)_ANA_SEL.ANA_SEL field to 0x1FD.
+ If the common clock QLMC_REFCLK0_P/N input is unused program the
+ GSER(4)_ANA_SEL.ANA_SEL field to 0x1FE.
+ If both common clock QLMC_REFCLK0_P/N and QLMC_REFCLK1_P/N inputs are unused program the
+ GSER(4)_ANA_SEL[ANA_SEL] field to 0x1FC.
+ For diagnostic use only. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_gserx_ana_sel_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t ana_sel : 9; /**< [ 8: 0](R/W) Controls the adr_global input to the analog test block. Note that the
+ GSER(0)_ANA_SEL.ANA_SEL register is tied to the analog test block, for non-CCPI links.
+ Note that the GSER(8)_ANA_SEL.ANA_SEL register is tied to the analog test block, for
+ CCPI links. The other GSER()_ANA_SEL registers are unused.
+ For diagnostic use only.
+
+ For non-CCPI links used to power down the common clock input receiver to reduce power
+ consumption if the common clock input is not used.
+ If the common clock QLMC_REFCLK1_P/N input is unused, program GSER(0)_ANA_SEL[ANA_SEL]
+ to 0x1FD.
+ If the common clock QLMC_REFCLK0_P/N input is unused, program GSER(0)_ANA_SEL[ANA_SEL]
+ to 0x1FE.
+ If both common clock QLMC_REFCLK0_P/N and QLMC_REFCLK1_P/N inputs are unused, program
+ GSER(0)_ANA_SEL[ANA_SEL] to 0x1FC.
+
+ For CCPI links used to power down the common clock input receiver to reduce power
+ consumption if the common clock input is not used.
+ If the common clock OCIC_REF_CLK_P/N input is unused, program GSER(8)_ANA_SEL[ANA_SEL]
+ field to 0x1FC. */
+#else /* Word 0 - Little Endian */
+ uint64_t ana_sel : 9; /**< [ 8: 0](R/W) Controls the adr_global input to the analog test block. Note that the
+ GSER(0)_ANA_SEL.ANA_SEL register is tied to the analog test block, for non-CCPI links.
+ Note that the GSER(8)_ANA_SEL.ANA_SEL register is tied to the analog test block, for
+ CCPI links. The other GSER()_ANA_SEL registers are unused.
+ For diagnostic use only.
+
+ For non-CCPI links used to power down the common clock input receiver to reduce power
+ consumption if the common clock input is not used.
+ If the common clock QLMC_REFCLK1_P/N input is unused, program GSER(0)_ANA_SEL[ANA_SEL]
+ to 0x1FD.
+ If the common clock QLMC_REFCLK0_P/N input is unused, program GSER(0)_ANA_SEL[ANA_SEL]
+ to 0x1FE.
+ If both common clock QLMC_REFCLK0_P/N and QLMC_REFCLK1_P/N inputs are unused, program
+ GSER(0)_ANA_SEL[ANA_SEL] to 0x1FC.
+
+ For CCPI links used to power down the common clock input receiver to reduce power
+ consumption if the common clock input is not used.
+ If the common clock OCIC_REF_CLK_P/N input is unused, program GSER(8)_ANA_SEL[ANA_SEL]
+ field to 0x1FC. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_gserx_ana_sel bdk_gserx_ana_sel_t;
+
+static inline uint64_t BDK_GSERX_ANA_SEL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_ANA_SEL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000808ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000808ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000808ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_ANA_SEL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_ANA_SEL(a) bdk_gserx_ana_sel_t
+#define bustype_BDK_GSERX_ANA_SEL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_ANA_SEL(a) "GSERX_ANA_SEL"
+#define device_bar_BDK_GSERX_ANA_SEL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_ANA_SEL(a) (a)
+#define arguments_BDK_GSERX_ANA_SEL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_br_rx#_ctl
+ *
+ * GSER Base-R RX Control Register
+ */
+union bdk_gserx_br_rxx_ctl
+{
+ uint64_t u;
+ struct bdk_gserx_br_rxx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t rxt_adtmout_disable : 1; /**< [ 3: 3](R/W) For BASE-R links the terminating condition for link training receiver adaptation
+ is a 330 milliseconds time-out timer. When the receiver adaptation time-out timer
+ expires the receiver adaptation process is concluded and the link is considered good.
+ Note that when BASE-R link training is performed under software control,
+ (GSER()_BR_RX()_CTL[RXT_SWM] is set), the receiver adaptation time-out timer is disabled
+ and not used.
+
+ Set this bit to a one to disable the link training receiver adaptation time-out
+ timer during BASE-R link training under hardware control. For diagnostic use only. */
+ uint64_t rxt_swm : 1; /**< [ 2: 2](R/W) Set when RX BASE-R link training is to be performed under software control.
+
+ See GSER()_BR_RX()_EER[EXT_EER]. */
+ uint64_t rxt_preset : 1; /**< [ 1: 1](R/W) For all link training, this bit determines how to configure the preset bit in the
+ coefficient update message that is sent to the far end transmitter. When set, a one time
+ request is made that the coefficients be set to a state where equalization is turned off.
+
+ To perform a preset, set this bit prior to link training. Link training needs to be
+ disabled to complete the request and get the rxtrain state machine back to idle. Note that
+ it is illegal to set both the preset and initialize bits at the same time. For diagnostic
+ use only. */
+ uint64_t rxt_initialize : 1; /**< [ 0: 0](R/W) For all link training, this bit determines how to configure the initialize bit in the
+ coefficient update message that is sent to the far end transmitter of RX training. When
+ set, a request is made that the coefficients be set to its INITIALIZE state. To perform an
+ initialize prior to link training, set this bit prior to performing link training. Note
+ that it is illegal to set both the preset and initialize bits at the same time. Since the
+ far end transmitter is required to be initialized prior to starting link training, it is
+ not expected that software will need to set this bit. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxt_initialize : 1; /**< [ 0: 0](R/W) For all link training, this bit determines how to configure the initialize bit in the
+ coefficient update message that is sent to the far end transmitter of RX training. When
+ set, a request is made that the coefficients be set to its INITIALIZE state. To perform an
+ initialize prior to link training, set this bit prior to performing link training. Note
+ that it is illegal to set both the preset and initialize bits at the same time. Since the
+ far end transmitter is required to be initialized prior to starting link training, it is
+ not expected that software will need to set this bit. For diagnostic use only. */
+ uint64_t rxt_preset : 1; /**< [ 1: 1](R/W) For all link training, this bit determines how to configure the preset bit in the
+ coefficient update message that is sent to the far end transmitter. When set, a one time
+ request is made that the coefficients be set to a state where equalization is turned off.
+
+ To perform a preset, set this bit prior to link training. Link training needs to be
+ disabled to complete the request and get the rxtrain state machine back to idle. Note that
+ it is illegal to set both the preset and initialize bits at the same time. For diagnostic
+ use only. */
+ uint64_t rxt_swm : 1; /**< [ 2: 2](R/W) Set when RX BASE-R link training is to be performed under software control.
+
+ See GSER()_BR_RX()_EER[EXT_EER]. */
+ uint64_t rxt_adtmout_disable : 1; /**< [ 3: 3](R/W) For BASE-R links the terminating condition for link training receiver adaptation
+ is a 330 milliseconds time-out timer. When the receiver adaptation time-out timer
+ expires the receiver adaptation process is concluded and the link is considered good.
+ Note that when BASE-R link training is performed under software control,
+ (GSER()_BR_RX()_CTL[RXT_SWM] is set), the receiver adaptation time-out timer is disabled
+ and not used.
+
+ Set this bit to a one to disable the link training receiver adaptation time-out
+ timer during BASE-R link training under hardware control. For diagnostic use only. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_br_rxx_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t reserved_3 : 1;
+ uint64_t rxt_swm : 1; /**< [ 2: 2](R/W) Set when RX BASE-R link training is to be performed under software control.
+
+ See GSER()_BR_RX()_EER[EXT_EER]. */
+ uint64_t rxt_preset : 1; /**< [ 1: 1](R/W) For all link training, this bit determines how to configure the preset bit in the
+ coefficient update message that is sent to the far end transmitter. When set, a one time
+ request is made that the coefficients be set to a state where equalization is turned off.
+
+ To perform a preset, set this bit prior to link training. Link training needs to be
+ disabled to complete the request and get the rxtrain state machine back to idle. Note that
+ it is illegal to set both the preset and initialize bits at the same time. For diagnostic
+ use only. */
+ uint64_t rxt_initialize : 1; /**< [ 0: 0](R/W) For all link training, this bit determines how to configure the initialize bit in the
+ coefficient update message that is sent to the far end transmitter of RX training. When
+ set, a request is made that the coefficients be set to its INITIALIZE state. To perform an
+ initialize prior to link training, set this bit prior to performing link training. Note
+ that it is illegal to set both the preset and initialize bits at the same time. Since the
+ far end transmitter is required to be initialized prior to starting link training, it is
+ not expected that software will need to set this bit. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxt_initialize : 1; /**< [ 0: 0](R/W) For all link training, this bit determines how to configure the initialize bit in the
+ coefficient update message that is sent to the far end transmitter of RX training. When
+ set, a request is made that the coefficients be set to its INITIALIZE state. To perform an
+ initialize prior to link training, set this bit prior to performing link training. Note
+ that it is illegal to set both the preset and initialize bits at the same time. Since the
+ far end transmitter is required to be initialized prior to starting link training, it is
+ not expected that software will need to set this bit. For diagnostic use only. */
+ uint64_t rxt_preset : 1; /**< [ 1: 1](R/W) For all link training, this bit determines how to configure the preset bit in the
+ coefficient update message that is sent to the far end transmitter. When set, a one time
+ request is made that the coefficients be set to a state where equalization is turned off.
+
+ To perform a preset, set this bit prior to link training. Link training needs to be
+ disabled to complete the request and get the rxtrain state machine back to idle. Note that
+ it is illegal to set both the preset and initialize bits at the same time. For diagnostic
+ use only. */
+ uint64_t rxt_swm : 1; /**< [ 2: 2](R/W) Set when RX BASE-R link training is to be performed under software control.
+
+ See GSER()_BR_RX()_EER[EXT_EER]. */
+ uint64_t reserved_3 : 1;
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_gserx_br_rxx_ctl_s cn81xx; */
+ /* struct bdk_gserx_br_rxx_ctl_s cn83xx; */
+ /* struct bdk_gserx_br_rxx_ctl_s cn88xxp2; */
+};
+typedef union bdk_gserx_br_rxx_ctl bdk_gserx_br_rxx_ctl_t;
+
+static inline uint64_t BDK_GSERX_BR_RXX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_BR_RXX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=3)))
+ return 0x87e090000400ll + 0x1000000ll * ((a) & 0x3) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090000400ll + 0x1000000ll * ((a) & 0x7) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090000400ll + 0x1000000ll * ((a) & 0xf) + 0x80ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_BR_RXX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_BR_RXX_CTL(a,b) bdk_gserx_br_rxx_ctl_t
+#define bustype_BDK_GSERX_BR_RXX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_BR_RXX_CTL(a,b) "GSERX_BR_RXX_CTL"
+#define device_bar_BDK_GSERX_BR_RXX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_BR_RXX_CTL(a,b) (a)
+#define arguments_BDK_GSERX_BR_RXX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_br_rx#_eer
+ *
+ * GSER Base-R RX Equalization Evaluation Request Register
+ * GSER software BASE-R RX link training equalization evaluation request (EER). A write to
+ * [RXT_EER] initiates a equalization request to the RAW PCS. A read of this register returns the
+ * equalization status message and a valid bit indicating it was updated. These registers are for
+ * diagnostic use only.
+ */
+union bdk_gserx_br_rxx_eer
+{
+ uint64_t u;
+ struct bdk_gserx_br_rxx_eer_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t rxt_eer : 1; /**< [ 15: 15](WO) When RX BASE-R link training is being performed under software control,
+ (GSER()_BR_RX()_CTL[RXT_SWM] is set), writing this bit initiates an equalization
+ request to the RAW PCS. Reading this bit always returns a zero.
+
+ When auto-negotiated link training is not present and link speed \>= 5 Gbaud,
+ including XFI, receiver (only) equalization should be manually performed.
+ After GSER()_BR_RX()_CTL[RXT_SWM] is set, writing this CSR with
+ [RXT_EER]=1 initiates this manual equalization. The operation may take up to
+ 2 milliseconds, and then hardware sets [RXT_ESV]. The SerDes input should
+ be a pattern (something similar to the BASE-R training sequence, ideally)
+ during this receiver-only training. If DFE is to be disabled
+ (recommended for 5 Gbaud and below), do it prior to this receiver-only
+ initialization. (GSER()_LANE()_RX_VALBBD_CTRL_0, GSER()_LANE()_RX_VALBBD_CTRL_1,
+ and GSER()_LANE()_RX_VALBBD_CTRL_2 configure the DFE.) */
+ uint64_t rxt_esv : 1; /**< [ 14: 14](R/W) When performing an equalization request (RXT_EER), this bit, when set, indicates that the
+ Equalization Status (RXT_ESM) is valid. When issuing a RXT_EER request, it is expected
+ that RXT_ESV will get written to zero so that a valid RXT_ESM can be determined. */
+ uint64_t rxt_esm : 14; /**< [ 13: 0](RO) When performing an equalization request (RXT_EER), this is the equalization status message
+ from the RAW PCS. It is valid when RXT_ESV is set.
+
+ _ \<13:6\>: Figure of merit. An 8-bit output from the PHY indicating the quality of the
+ received data eye. A higher value indicates better link equalization, with 8'd0 indicating
+ worst equalization setting and 8'd255 indicating the best equalization setting.
+
+ _ \<5:4\>: RX recommended TXPOST direction change.
+
+ _ \<3:2\>: RX recommended TXMAIN direction change.
+
+ _ \<1:0\>: RX recommended TXPRE direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxt_esm : 14; /**< [ 13: 0](RO) When performing an equalization request (RXT_EER), this is the equalization status message
+ from the RAW PCS. It is valid when RXT_ESV is set.
+
+ _ \<13:6\>: Figure of merit. An 8-bit output from the PHY indicating the quality of the
+ received data eye. A higher value indicates better link equalization, with 8'd0 indicating
+ worst equalization setting and 8'd255 indicating the best equalization setting.
+
+ _ \<5:4\>: RX recommended TXPOST direction change.
+
+ _ \<3:2\>: RX recommended TXMAIN direction change.
+
+ _ \<1:0\>: RX recommended TXPRE direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_esv : 1; /**< [ 14: 14](R/W) When performing an equalization request (RXT_EER), this bit, when set, indicates that the
+ Equalization Status (RXT_ESM) is valid. When issuing a RXT_EER request, it is expected
+ that RXT_ESV will get written to zero so that a valid RXT_ESM can be determined. */
+ uint64_t rxt_eer : 1; /**< [ 15: 15](WO) When RX BASE-R link training is being performed under software control,
+ (GSER()_BR_RX()_CTL[RXT_SWM] is set), writing this bit initiates an equalization
+ request to the RAW PCS. Reading this bit always returns a zero.
+
+ When auto-negotiated link training is not present and link speed \>= 5 Gbaud,
+ including XFI, receiver (only) equalization should be manually performed.
+ After GSER()_BR_RX()_CTL[RXT_SWM] is set, writing this CSR with
+ [RXT_EER]=1 initiates this manual equalization. The operation may take up to
+ 2 milliseconds, and then hardware sets [RXT_ESV]. The SerDes input should
+ be a pattern (something similar to the BASE-R training sequence, ideally)
+ during this receiver-only training. If DFE is to be disabled
+ (recommended for 5 Gbaud and below), do it prior to this receiver-only
+ initialization. (GSER()_LANE()_RX_VALBBD_CTRL_0, GSER()_LANE()_RX_VALBBD_CTRL_1,
+ and GSER()_LANE()_RX_VALBBD_CTRL_2 configure the DFE.) */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_br_rxx_eer_s cn; */
+};
+typedef union bdk_gserx_br_rxx_eer bdk_gserx_br_rxx_eer_t;
+
+static inline uint64_t BDK_GSERX_BR_RXX_EER(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_BR_RXX_EER(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=3)))
+ return 0x87e090000418ll + 0x1000000ll * ((a) & 0x3) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090000418ll + 0x1000000ll * ((a) & 0x7) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090000418ll + 0x1000000ll * ((a) & 0xf) + 0x80ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_BR_RXX_EER", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_BR_RXX_EER(a,b) bdk_gserx_br_rxx_eer_t
+#define bustype_BDK_GSERX_BR_RXX_EER(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_BR_RXX_EER(a,b) "GSERX_BR_RXX_EER"
+#define device_bar_BDK_GSERX_BR_RXX_EER(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_BR_RXX_EER(a,b) (a)
+#define arguments_BDK_GSERX_BR_RXX_EER(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_br_tx#_ctl
+ *
+ * GSER Base-R TX Control Register
+ */
+union bdk_gserx_br_txx_ctl
+{
+ uint64_t u;
+ struct bdk_gserx_br_txx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t txt_swm : 1; /**< [ 0: 0](R/W) Set when TX BASE-R link training is to be performed under software control. For diagnostic
+ use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t txt_swm : 1; /**< [ 0: 0](R/W) Set when TX BASE-R link training is to be performed under software control. For diagnostic
+ use only. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_br_txx_ctl_s cn; */
+};
+typedef union bdk_gserx_br_txx_ctl bdk_gserx_br_txx_ctl_t;
+
+static inline uint64_t BDK_GSERX_BR_TXX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_BR_TXX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=3)))
+ return 0x87e090000420ll + 0x1000000ll * ((a) & 0x3) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090000420ll + 0x1000000ll * ((a) & 0x7) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090000420ll + 0x1000000ll * ((a) & 0xf) + 0x80ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_BR_TXX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_BR_TXX_CTL(a,b) bdk_gserx_br_txx_ctl_t
+#define bustype_BDK_GSERX_BR_TXX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_BR_TXX_CTL(a,b) "GSERX_BR_TXX_CTL"
+#define device_bar_BDK_GSERX_BR_TXX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_BR_TXX_CTL(a,b) (a)
+#define arguments_BDK_GSERX_BR_TXX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_br_tx#_cur
+ *
+ * GSER Base-R TX Coefficient Update Register
+ */
+union bdk_gserx_br_txx_cur
+{
+ uint64_t u;
+ struct bdk_gserx_br_txx_cur_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t txt_cur : 14; /**< [ 13: 0](R/W) When TX BASE-R link training is being performed under software control,
+ (GSER()_BR_TX()_CTL[TXT_SWM] is set), this is the coefficient update to be written to the
+ PHY.
+ For diagnostic use only.
+ \<13:9\> = TX_POST\<4:0\>.
+ \<8:4\> = TX_SWING\<4:0\>.
+ \<3:0\> = TX_PRE\<3:0\>. */
+#else /* Word 0 - Little Endian */
+ uint64_t txt_cur : 14; /**< [ 13: 0](R/W) When TX BASE-R link training is being performed under software control,
+ (GSER()_BR_TX()_CTL[TXT_SWM] is set), this is the coefficient update to be written to the
+ PHY.
+ For diagnostic use only.
+ \<13:9\> = TX_POST\<4:0\>.
+ \<8:4\> = TX_SWING\<4:0\>.
+ \<3:0\> = TX_PRE\<3:0\>. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_br_txx_cur_s cn; */
+};
+typedef union bdk_gserx_br_txx_cur bdk_gserx_br_txx_cur_t;
+
+static inline uint64_t BDK_GSERX_BR_TXX_CUR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_BR_TXX_CUR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=3)))
+ return 0x87e090000438ll + 0x1000000ll * ((a) & 0x3) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090000438ll + 0x1000000ll * ((a) & 0x7) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090000438ll + 0x1000000ll * ((a) & 0xf) + 0x80ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_BR_TXX_CUR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_BR_TXX_CUR(a,b) bdk_gserx_br_txx_cur_t
+#define bustype_BDK_GSERX_BR_TXX_CUR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_BR_TXX_CUR(a,b) "GSERX_BR_TXX_CUR"
+#define device_bar_BDK_GSERX_BR_TXX_CUR(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_BR_TXX_CUR(a,b) (a)
+#define arguments_BDK_GSERX_BR_TXX_CUR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_br_tx#_ini
+ *
+ * GSER Base-R TX Coefficient Tap Initialize Register
+ * GSER BASE-R link training TX taps equalization initialize value. When BASE-R hardware link
+ * training is enabled the transmitter
+ * equalizer taps (Pre/Swing/Post) are initialized with the values in this register. Also,
+ * during 10GBase-KR hardware link training if a
+ * coefficient update request message is received from the link partner with the initialize
+ * control bit set the local device transmitter
+ * taps (Pre/Swing/Post) will be updated with the values in this register.
+ */
+union bdk_gserx_br_txx_ini
+{
+ uint64_t u;
+ struct bdk_gserx_br_txx_ini_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t txt_post_init : 5; /**< [ 13: 9](R/W/H) During TX BASE-R link training, the TX post-tap value that is used
+ when the initialize coefficients update is received. It is also the TX post-tap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_swing_init : 5; /**< [ 8: 4](R/W/H) During TX BASE-R link training, the TX swing-tap value that is used
+ when the initialize coefficients update is received. It is also the TX swing-tap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_pre_init : 4; /**< [ 3: 0](R/W/H) During TX BASE-R link training, the TX pre-tap value that is used
+ when the initialize coefficients update is received. It is also the TX pre-tap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t txt_pre_init : 4; /**< [ 3: 0](R/W/H) During TX BASE-R link training, the TX pre-tap value that is used
+ when the initialize coefficients update is received. It is also the TX pre-tap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_swing_init : 5; /**< [ 8: 4](R/W/H) During TX BASE-R link training, the TX swing-tap value that is used
+ when the initialize coefficients update is received. It is also the TX swing-tap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_post_init : 5; /**< [ 13: 9](R/W/H) During TX BASE-R link training, the TX post-tap value that is used
+ when the initialize coefficients update is received. It is also the TX post-tap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_br_txx_ini_s cn; */
+};
+typedef union bdk_gserx_br_txx_ini bdk_gserx_br_txx_ini_t;
+
+static inline uint64_t BDK_GSERX_BR_TXX_INI(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_BR_TXX_INI(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=3)))
+ return 0x87e090000448ll + 0x1000000ll * ((a) & 0x3) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090000448ll + 0x1000000ll * ((a) & 0x7) + 0x80ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_BR_TXX_INI", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_BR_TXX_INI(a,b) bdk_gserx_br_txx_ini_t
+#define bustype_BDK_GSERX_BR_TXX_INI(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_BR_TXX_INI(a,b) "GSERX_BR_TXX_INI"
+#define device_bar_BDK_GSERX_BR_TXX_INI(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_BR_TXX_INI(a,b) (a)
+#define arguments_BDK_GSERX_BR_TXX_INI(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_br_tx#_tap
+ *
+ * GSER Base-R TX Coefficient Tap Register
+ */
+union bdk_gserx_br_txx_tap
+{
+ uint64_t u;
+ struct bdk_gserx_br_txx_tap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t txt_pre : 4; /**< [ 13: 10](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_swing : 5; /**< [ 9: 5](RO/H) After TX BASE-R link training, this is the resultant SWING Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_post : 5; /**< [ 4: 0](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t txt_post : 5; /**< [ 4: 0](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_swing : 5; /**< [ 9: 5](RO/H) After TX BASE-R link training, this is the resultant SWING Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_pre : 4; /**< [ 13: 10](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_br_txx_tap_s cn; */
+};
+typedef union bdk_gserx_br_txx_tap bdk_gserx_br_txx_tap_t;
+
+static inline uint64_t BDK_GSERX_BR_TXX_TAP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_BR_TXX_TAP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=3)))
+ return 0x87e090000440ll + 0x1000000ll * ((a) & 0x3) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090000440ll + 0x1000000ll * ((a) & 0x7) + 0x80ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090000440ll + 0x1000000ll * ((a) & 0xf) + 0x80ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_BR_TXX_TAP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_BR_TXX_TAP(a,b) bdk_gserx_br_txx_tap_t
+#define bustype_BDK_GSERX_BR_TXX_TAP(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_BR_TXX_TAP(a,b) "GSERX_BR_TXX_TAP"
+#define device_bar_BDK_GSERX_BR_TXX_TAP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_BR_TXX_TAP(a,b) (a)
+#define arguments_BDK_GSERX_BR_TXX_TAP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_cfg
+ *
+ * GSER Configuration Register
+ */
+union bdk_gserx_cfg
+{
+ uint64_t u;
+ struct bdk_gserx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t sata : 1; /**< [ 5: 5](R/W) When set, indicates the GSER is configured for SATA mode. [SATA] must not be set
+ when either of [BGX,PCIE] are set. */
+ uint64_t bgx_quad : 1; /**< [ 4: 4](R/W) When set, indicates the QLM is in BGX quad aggregation mode. [BGX_QUAD] must only be
+ set when [BGX] is set and [BGX_DUAL] is clear.
+
+ When [BGX_QUAD] is set, GSER bundles all four lanes for one BGX controller.
+ [BGX_QUAD] must only be set for the XAUI/DXAUI protocols.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx_dual : 1; /**< [ 3: 3](R/W) When set, indicates the QLM is in BGX dual aggregation mode. [BGX_DUAL] must only be
+ set when [BGX] is also set and [BGX_QUAD] is clear.
+
+ When [BGX_DUAL] is set, GSER bundles lanes 0 and 1 for one BGX controller and bundles
+ lanes 2 and 3 for another BGX controller. [BGX_DUAL] must only be set for the RXAUI
+ protocol.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx : 1; /**< [ 2: 2](R/W) When set, indicates the GSER is configured for BGX mode. [BGX] must not be set
+ when either of [PCIE,SATA] are set.
+
+ When [BGX] is set and both [BGX_DUAL,BGX_QUAD] are clear, GSER exposes each lane to an
+ independent BGX controller.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t ila : 1; /**< [ 1: 1](R/W) Reserved. */
+ uint64_t pcie : 1; /**< [ 0: 0](R/W/H) When set, indicates the GSER is configured for PCIE mode. [PCIE] must not be
+ set when either of [BGX,SATA] is set.
+
+ Internal:
+ Not used in CCPI QLMs. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie : 1; /**< [ 0: 0](R/W/H) When set, indicates the GSER is configured for PCIE mode. [PCIE] must not be
+ set when either of [BGX,SATA] is set.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t ila : 1; /**< [ 1: 1](R/W) Reserved. */
+ uint64_t bgx : 1; /**< [ 2: 2](R/W) When set, indicates the GSER is configured for BGX mode. [BGX] must not be set
+ when either of [PCIE,SATA] are set.
+
+ When [BGX] is set and both [BGX_DUAL,BGX_QUAD] are clear, GSER exposes each lane to an
+ independent BGX controller.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx_dual : 1; /**< [ 3: 3](R/W) When set, indicates the QLM is in BGX dual aggregation mode. [BGX_DUAL] must only be
+ set when [BGX] is also set and [BGX_QUAD] is clear.
+
+ When [BGX_DUAL] is set, GSER bundles lanes 0 and 1 for one BGX controller and bundles
+ lanes 2 and 3 for another BGX controller. [BGX_DUAL] must only be set for the RXAUI
+ protocol.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx_quad : 1; /**< [ 4: 4](R/W) When set, indicates the QLM is in BGX quad aggregation mode. [BGX_QUAD] must only be
+ set when [BGX] is set and [BGX_DUAL] is clear.
+
+ When [BGX_QUAD] is set, GSER bundles all four lanes for one BGX controller.
+ [BGX_QUAD] must only be set for the XAUI/DXAUI protocols.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t sata : 1; /**< [ 5: 5](R/W) When set, indicates the GSER is configured for SATA mode. [SATA] must not be set
+ when either of [BGX,PCIE] are set. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_cfg_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t sata : 1; /**< [ 5: 5](R/W) When set, indicates the GSER is configured for SATA mode. [SATA] must not be set
+ when either of [BGX,PCIE] are set. [SATA] must only be set for DLM3 (i.e. GSER3). */
+ uint64_t bgx_quad : 1; /**< [ 4: 4](R/W) When set, indicates the QLM is in BGX quad aggregation mode. [BGX_QUAD] must only be
+ set when [BGX] is set and [BGX_DUAL] is clear.
+
+ When [BGX_QUAD] is set, GSER bundles all four lanes for one BGX controller.
+ [BGX_QUAD] must only be set for the XAUI/DXAUI protocols.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx_dual : 1; /**< [ 3: 3](R/W) When set, indicates the QLM is in BGX dual aggregation mode. [BGX_DUAL] must only be
+ set when [BGX] is also set and [BGX_QUAD] is clear.
+
+ When [BGX_DUAL] is set, GSER bundles lanes 0 and 1 for one BGX controller and bundles
+ lanes 2 and 3 for another BGX controller. [BGX_DUAL] must only be set for the RXAUI
+ protocol.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx : 1; /**< [ 2: 2](R/W) When set, indicates the GSER is configured for BGX mode. [BGX] must not be set
+ when either of [PCIE,SATA] are set.
+
+ When [BGX] is set and both [BGX_DUAL,BGX_QUAD] are clear, GSER exposes each lane to an
+ independent BGX controller.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t ila : 1; /**< [ 1: 1](R/W) Reserved. */
+ uint64_t pcie : 1; /**< [ 0: 0](R/W/H) When set, indicates the GSER is configured for PCIE mode. [PCIE] must not be
+ set when either of [BGX,SATA] is set.
+
+ Internal:
+ Not used in CCPI QLMs. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie : 1; /**< [ 0: 0](R/W/H) When set, indicates the GSER is configured for PCIE mode. [PCIE] must not be
+ set when either of [BGX,SATA] is set.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t ila : 1; /**< [ 1: 1](R/W) Reserved. */
+ uint64_t bgx : 1; /**< [ 2: 2](R/W) When set, indicates the GSER is configured for BGX mode. [BGX] must not be set
+ when either of [PCIE,SATA] are set.
+
+ When [BGX] is set and both [BGX_DUAL,BGX_QUAD] are clear, GSER exposes each lane to an
+ independent BGX controller.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx_dual : 1; /**< [ 3: 3](R/W) When set, indicates the QLM is in BGX dual aggregation mode. [BGX_DUAL] must only be
+ set when [BGX] is also set and [BGX_QUAD] is clear.
+
+ When [BGX_DUAL] is set, GSER bundles lanes 0 and 1 for one BGX controller and bundles
+ lanes 2 and 3 for another BGX controller. [BGX_DUAL] must only be set for the RXAUI
+ protocol.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx_quad : 1; /**< [ 4: 4](R/W) When set, indicates the QLM is in BGX quad aggregation mode. [BGX_QUAD] must only be
+ set when [BGX] is set and [BGX_DUAL] is clear.
+
+ When [BGX_QUAD] is set, GSER bundles all four lanes for one BGX controller.
+ [BGX_QUAD] must only be set for the XAUI/DXAUI protocols.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t sata : 1; /**< [ 5: 5](R/W) When set, indicates the GSER is configured for SATA mode. [SATA] must not be set
+ when either of [BGX,PCIE] are set. [SATA] must only be set for DLM3 (i.e. GSER3). */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_gserx_cfg_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t sata : 1; /**< [ 5: 5](R/W) When set, indicates the GSER is configured for SATA mode. [SATA] must not be set
+ when either of [BGX,PCIE] are set. */
+ uint64_t bgx_quad : 1; /**< [ 4: 4](R/W) When set, indicates the QLM is in BGX quad aggregation mode. [BGX_QUAD] must only be
+ set when [BGX] is set and [BGX_DUAL] is clear.
+
+ When [BGX_QUAD] is set, GSER bundles all four lanes for one BGX controller.
+ [BGX_QUAD] must only be set for the XAUI/DXAUI and XLAUI protocols.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx_dual : 1; /**< [ 3: 3](R/W) When set, indicates the QLM is in BGX dual aggregation mode. [BGX_DUAL] must only be
+ set when [BGX] is also set and [BGX_QUAD] is clear.
+
+ When [BGX_DUAL] is set, GSER bundles lanes 0 and 1 for one BGX controller and bundles
+ lanes 2 and 3 for another BGX controller. [BGX_DUAL] must only be set for the RXAUI
+ protocol.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx : 1; /**< [ 2: 2](R/W) When set, indicates the GSER is configured for BGX mode. [BGX] must not be set
+ when either of [PCIE,SATA] are set. For CCPI links, [BGX] must be clear.
+
+ When [BGX] is set and both [BGX_DUAL,BGX_QUAD] are clear, GSER exposes each lane to an
+ independent BGX controller.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t ila : 1; /**< [ 1: 1](R/W) Reserved. */
+ uint64_t pcie : 1; /**< [ 0: 0](R/W/H) When set, indicates the GSER is configured for PCIE mode. [PCIE] must not be
+ set when either of [BGX,SATA] is set. For CCPI QLMs, [PCIE] must be clear.
+
+ Internal:
+ Not used in CCPI QLMs. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie : 1; /**< [ 0: 0](R/W/H) When set, indicates the GSER is configured for PCIE mode. [PCIE] must not be
+ set when either of [BGX,SATA] is set. For CCPI QLMs, [PCIE] must be clear.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t ila : 1; /**< [ 1: 1](R/W) Reserved. */
+ uint64_t bgx : 1; /**< [ 2: 2](R/W) When set, indicates the GSER is configured for BGX mode. [BGX] must not be set
+ when either of [PCIE,SATA] are set. For CCPI links, [BGX] must be clear.
+
+ When [BGX] is set and both [BGX_DUAL,BGX_QUAD] are clear, GSER exposes each lane to an
+ independent BGX controller.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx_dual : 1; /**< [ 3: 3](R/W) When set, indicates the QLM is in BGX dual aggregation mode. [BGX_DUAL] must only be
+ set when [BGX] is also set and [BGX_QUAD] is clear.
+
+ When [BGX_DUAL] is set, GSER bundles lanes 0 and 1 for one BGX controller and bundles
+ lanes 2 and 3 for another BGX controller. [BGX_DUAL] must only be set for the RXAUI
+ protocol.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t bgx_quad : 1; /**< [ 4: 4](R/W) When set, indicates the QLM is in BGX quad aggregation mode. [BGX_QUAD] must only be
+ set when [BGX] is set and [BGX_DUAL] is clear.
+
+ When [BGX_QUAD] is set, GSER bundles all four lanes for one BGX controller.
+ [BGX_QUAD] must only be set for the XAUI/DXAUI and XLAUI protocols.
+
+ Internal:
+ Not used in CCPI QLMs. */
+ uint64_t sata : 1; /**< [ 5: 5](R/W) When set, indicates the GSER is configured for SATA mode. [SATA] must not be set
+ when either of [BGX,PCIE] are set. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_cfg_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t sata : 1; /**< [ 5: 5](R/W) When set, indicates the GSER is configured for SATA mode. [SATA] must not be set
+ when either of [BGX,PCIE] are set. */
+ uint64_t bgx_quad : 1; /**< [ 4: 4](R/W) When set, indicates the QLM is in BGX quad aggregation mode. [BGX_QUAD] must only be
+ set when [BGX] is set and [BGX_DUAL] is clear.
+
+ When [BGX_QUAD] is set, GSER bundles all four lanes for one BGX controller.
+ [BGX_QUAD] must only be set for the XAUI/DXAUI and XLAUI protocols.
+
+ Internal:
+ There is hardware to pair DLM 5 and 6 together when [BGX_QUAD] is set in DLM5.
+ But we currently do not support XAUI/DXAUI/XLAUI on DLM's. */
+ uint64_t bgx_dual : 1; /**< [ 3: 3](R/W) When set, indicates the QLM is in BGX dual aggregation mode. [BGX_DUAL] must only be
+ set when [BGX] is also set and [BGX_QUAD] is clear.
+
+ When [BGX_DUAL] is set, GSER bundles lanes 0 and 1 for one BGX controller and bundles
+ lanes 2 and 3 for another BGX controller. [BGX_DUAL] must only be set for the RXAUI
+ protocol.
+
+ [BGX_DUAL] must not be set in a DLM.
+
+ Internal:
+ [BGX_DUAL] should work in a DLM (lanes 0 and 1 bundled for one BGX controller), but
+ we currently do not support RXAUI in a DLM. */
+ uint64_t bgx : 1; /**< [ 2: 2](R/W) When set, indicates the GSER is configured for BGX mode. [BGX] must not be set
+ when either of [PCIE,SATA] are set.
+
+ When [BGX] is set and both [BGX_DUAL,BGX_QUAD] are clear, GSER exposes each lane to an
+ independent BGX controller. */
+ uint64_t ila : 1; /**< [ 1: 1](R/W) Reserved. */
+ uint64_t pcie : 1; /**< [ 0: 0](R/W/H) When set, indicates the GSER is configured for PCIE mode. [PCIE] must not be
+ set when either of [BGX,SATA] is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie : 1; /**< [ 0: 0](R/W/H) When set, indicates the GSER is configured for PCIE mode. [PCIE] must not be
+ set when either of [BGX,SATA] is set. */
+ uint64_t ila : 1; /**< [ 1: 1](R/W) Reserved. */
+ uint64_t bgx : 1; /**< [ 2: 2](R/W) When set, indicates the GSER is configured for BGX mode. [BGX] must not be set
+ when either of [PCIE,SATA] are set.
+
+ When [BGX] is set and both [BGX_DUAL,BGX_QUAD] are clear, GSER exposes each lane to an
+ independent BGX controller. */
+ uint64_t bgx_dual : 1; /**< [ 3: 3](R/W) When set, indicates the QLM is in BGX dual aggregation mode. [BGX_DUAL] must only be
+ set when [BGX] is also set and [BGX_QUAD] is clear.
+
+ When [BGX_DUAL] is set, GSER bundles lanes 0 and 1 for one BGX controller and bundles
+ lanes 2 and 3 for another BGX controller. [BGX_DUAL] must only be set for the RXAUI
+ protocol.
+
+ [BGX_DUAL] must not be set in a DLM.
+
+ Internal:
+ [BGX_DUAL] should work in a DLM (lanes 0 and 1 bundled for one BGX controller), but
+ we currently do not support RXAUI in a DLM. */
+ uint64_t bgx_quad : 1; /**< [ 4: 4](R/W) When set, indicates the QLM is in BGX quad aggregation mode. [BGX_QUAD] must only be
+ set when [BGX] is set and [BGX_DUAL] is clear.
+
+ When [BGX_QUAD] is set, GSER bundles all four lanes for one BGX controller.
+ [BGX_QUAD] must only be set for the XAUI/DXAUI and XLAUI protocols.
+
+ Internal:
+ There is hardware to pair DLM 5 and 6 together when [BGX_QUAD] is set in DLM5.
+ But we currently do not support XAUI/DXAUI/XLAUI on DLM's. */
+ uint64_t sata : 1; /**< [ 5: 5](R/W) When set, indicates the GSER is configured for SATA mode. [SATA] must not be set
+ when either of [BGX,PCIE] are set. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_cfg bdk_gserx_cfg_t;
+
+static inline uint64_t BDK_GSERX_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000080ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000080ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000080ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_CFG(a) bdk_gserx_cfg_t
+#define bustype_BDK_GSERX_CFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_CFG(a) "GSERX_CFG"
+#define device_bar_BDK_GSERX_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_CFG(a) (a)
+#define arguments_BDK_GSERX_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_dbg
+ *
+ * GSER Debug Control Register
+ */
+union bdk_gserx_dbg
+{
+ uint64_t u;
+ struct bdk_gserx_dbg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t rxqtm_on : 1; /**< [ 0: 0](R/W) For non-BGX configurations, setting this bit enables the RX FIFOs. This allows
+ received data to become visible to the RSL debug port. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxqtm_on : 1; /**< [ 0: 0](R/W) For non-BGX configurations, setting this bit enables the RX FIFOs. This allows
+ received data to become visible to the RSL debug port. For diagnostic use only. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_dbg_s cn; */
+};
+typedef union bdk_gserx_dbg bdk_gserx_dbg_t;
+
+static inline uint64_t BDK_GSERX_DBG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_DBG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000098ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000098ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000098ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_DBG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_DBG(a) bdk_gserx_dbg_t
+#define bustype_BDK_GSERX_DBG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_DBG(a) "GSERX_DBG"
+#define device_bar_BDK_GSERX_DBG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_DBG(a) (a)
+#define arguments_BDK_GSERX_DBG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_eq_wait_time
+ *
+ * GSER TX and RX Equalization Wait Times Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_eq_wait_time
+{
+ uint64_t u;
+ struct bdk_gserx_eq_wait_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rxeq_wait_cnt : 4; /**< [ 7: 4](R/W) Determines the wait time after VMA RX-EQ completes and before sampling
+ tap1 and starting the precorrelation check. */
+ uint64_t txeq_wait_cnt : 4; /**< [ 3: 0](R/W) Determines the wait time from applying the TX-EQ controls (swing/pre/post)
+ to the sampling of the sds_pcs_tx_comp_out. */
+#else /* Word 0 - Little Endian */
+ uint64_t txeq_wait_cnt : 4; /**< [ 3: 0](R/W) Determines the wait time from applying the TX-EQ controls (swing/pre/post)
+ to the sampling of the sds_pcs_tx_comp_out. */
+ uint64_t rxeq_wait_cnt : 4; /**< [ 7: 4](R/W) Determines the wait time after VMA RX-EQ completes and before sampling
+ tap1 and starting the precorrelation check. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_eq_wait_time_s cn; */
+};
+typedef union bdk_gserx_eq_wait_time bdk_gserx_eq_wait_time_t;
+
+static inline uint64_t BDK_GSERX_EQ_WAIT_TIME(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_EQ_WAIT_TIME(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904e0000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904e0000ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904e0000ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_EQ_WAIT_TIME", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_EQ_WAIT_TIME(a) bdk_gserx_eq_wait_time_t
+#define bustype_BDK_GSERX_EQ_WAIT_TIME(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_EQ_WAIT_TIME(a) "GSERX_EQ_WAIT_TIME"
+#define device_bar_BDK_GSERX_EQ_WAIT_TIME(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_EQ_WAIT_TIME(a) (a)
+#define arguments_BDK_GSERX_EQ_WAIT_TIME(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_glbl_misc_config_1
+ *
+ * GSER Global Miscellaneous Configuration 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_glbl_misc_config_1
+{
+ uint64_t u;
+ struct bdk_gserx_glbl_misc_config_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t pcs_sds_vref_tr : 4; /**< [ 9: 6](R/W) Trim the BGR (band gap reference) reference (all external and internal currents
+ are affected).
+ For diagnostic use only. */
+ uint64_t pcs_sds_trim_chp_reg : 2; /**< [ 5: 4](R/W) Trim current going to CML-CMOS stage at output of VCO.
+ For diagnostic use only. */
+ uint64_t pcs_sds_vco_reg_tr : 2; /**< [ 3: 2](R/W) Trims regulator voltage.
+ For diagnostic use only. */
+ uint64_t pcs_sds_cvbg_en : 1; /**< [ 1: 1](R/W) Forces 0.6 V from VDDHV onto VBG node.
+ For diagnostic use only. */
+ uint64_t pcs_sds_extvbg_en : 1; /**< [ 0: 0](R/W) Force external VBG through AMON pin in TMA5 mode.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_extvbg_en : 1; /**< [ 0: 0](R/W) Force external VBG through AMON pin in TMA5 mode.
+ For diagnostic use only. */
+ uint64_t pcs_sds_cvbg_en : 1; /**< [ 1: 1](R/W) Forces 0.6 V from VDDHV onto VBG node.
+ For diagnostic use only. */
+ uint64_t pcs_sds_vco_reg_tr : 2; /**< [ 3: 2](R/W) Trims regulator voltage.
+ For diagnostic use only. */
+ uint64_t pcs_sds_trim_chp_reg : 2; /**< [ 5: 4](R/W) Trim current going to CML-CMOS stage at output of VCO.
+ For diagnostic use only. */
+ uint64_t pcs_sds_vref_tr : 4; /**< [ 9: 6](R/W) Trim the BGR (band gap reference) reference (all external and internal currents
+ are affected).
+ For diagnostic use only. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_glbl_misc_config_1_s cn; */
+};
+typedef union bdk_gserx_glbl_misc_config_1 bdk_gserx_glbl_misc_config_1_t;
+
+static inline uint64_t BDK_GSERX_GLBL_MISC_CONFIG_1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_GLBL_MISC_CONFIG_1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460030ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460030ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460030ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_GLBL_MISC_CONFIG_1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_GLBL_MISC_CONFIG_1(a) bdk_gserx_glbl_misc_config_1_t
+#define bustype_BDK_GSERX_GLBL_MISC_CONFIG_1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_GLBL_MISC_CONFIG_1(a) "GSERX_GLBL_MISC_CONFIG_1"
+#define device_bar_BDK_GSERX_GLBL_MISC_CONFIG_1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_GLBL_MISC_CONFIG_1(a) (a)
+#define arguments_BDK_GSERX_GLBL_MISC_CONFIG_1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_glbl_pll_cfg_0
+ *
+ * GSER Global PLL Configuration 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_glbl_pll_cfg_0
+{
+ uint64_t u;
+ struct bdk_gserx_glbl_pll_cfg_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t pcs_sds_pll_vco_reset_b : 1;/**< [ 13: 13](R/W) VCO reset, active low.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_strt_cal_b : 1; /**< [ 12: 12](R/W) Start PLL calibration, active low.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_cripple : 1; /**< [ 11: 11](R/W) Ripple capacitor tuning.
+ For diagnostic use only. */
+ uint64_t reserved_8_10 : 3;
+ uint64_t pcs_sds_pll_fthresh : 2; /**< [ 7: 6](R/W/H) PLL frequency comparison threshold.
+ For diagnostic use only. */
+ uint64_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_5 : 6;
+ uint64_t pcs_sds_pll_fthresh : 2; /**< [ 7: 6](R/W/H) PLL frequency comparison threshold.
+ For diagnostic use only. */
+ uint64_t reserved_8_10 : 3;
+ uint64_t pcs_sds_pll_cripple : 1; /**< [ 11: 11](R/W) Ripple capacitor tuning.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_strt_cal_b : 1; /**< [ 12: 12](R/W) Start PLL calibration, active low.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_vco_reset_b : 1;/**< [ 13: 13](R/W) VCO reset, active low.
+ For diagnostic use only. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_glbl_pll_cfg_0_s cn; */
+};
+typedef union bdk_gserx_glbl_pll_cfg_0 bdk_gserx_glbl_pll_cfg_0_t;
+
+static inline uint64_t BDK_GSERX_GLBL_PLL_CFG_0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_GLBL_PLL_CFG_0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460000ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460000ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_GLBL_PLL_CFG_0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_GLBL_PLL_CFG_0(a) bdk_gserx_glbl_pll_cfg_0_t
+#define bustype_BDK_GSERX_GLBL_PLL_CFG_0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_GLBL_PLL_CFG_0(a) "GSERX_GLBL_PLL_CFG_0"
+#define device_bar_BDK_GSERX_GLBL_PLL_CFG_0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_GLBL_PLL_CFG_0(a) (a)
+#define arguments_BDK_GSERX_GLBL_PLL_CFG_0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_glbl_pll_cfg_1
+ *
+ * GSER Global PLL Configuration 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_glbl_pll_cfg_1
+{
+ uint64_t u;
+ struct bdk_gserx_glbl_pll_cfg_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t cfg_pll_ctrl_en : 1; /**< [ 9: 9](R/W) PLL reset control enable.
+ 0 = PLL RESETs/cal start are not active.
+ 1 = All PLL RESETs/cal start are enabled.
+
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_calmode : 3; /**< [ 8: 6](R/W) PLL calibration mode.
+ 0 = Force PLL loop into calibration mode.
+ 1 = Normal operation.
+
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_cal_ovrd_en : 1;/**< [ 5: 5](R/W) Manual PLL coarse calibration override enable.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_cal_ovrd : 5; /**< [ 4: 0](R/W) Manual PLL coarse calibration override value.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_pll_cal_ovrd : 5; /**< [ 4: 0](R/W) Manual PLL coarse calibration override value.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_cal_ovrd_en : 1;/**< [ 5: 5](R/W) Manual PLL coarse calibration override enable.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_calmode : 3; /**< [ 8: 6](R/W) PLL calibration mode.
+ 0 = Force PLL loop into calibration mode.
+ 1 = Normal operation.
+
+ For diagnostic use only. */
+ uint64_t cfg_pll_ctrl_en : 1; /**< [ 9: 9](R/W) PLL reset control enable.
+ 0 = PLL RESETs/cal start are not active.
+ 1 = All PLL RESETs/cal start are enabled.
+
+ For diagnostic use only. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_glbl_pll_cfg_1_s cn; */
+};
+typedef union bdk_gserx_glbl_pll_cfg_1 bdk_gserx_glbl_pll_cfg_1_t;
+
+static inline uint64_t BDK_GSERX_GLBL_PLL_CFG_1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_GLBL_PLL_CFG_1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460008ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460008ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460008ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_GLBL_PLL_CFG_1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_GLBL_PLL_CFG_1(a) bdk_gserx_glbl_pll_cfg_1_t
+#define bustype_BDK_GSERX_GLBL_PLL_CFG_1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_GLBL_PLL_CFG_1(a) "GSERX_GLBL_PLL_CFG_1"
+#define device_bar_BDK_GSERX_GLBL_PLL_CFG_1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_GLBL_PLL_CFG_1(a) (a)
+#define arguments_BDK_GSERX_GLBL_PLL_CFG_1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_glbl_pll_cfg_2
+ *
+ * GSER Global PLL Configuration 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_glbl_pll_cfg_2
+{
+ uint64_t u;
+ struct bdk_gserx_glbl_pll_cfg_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t pll_div_ovrrd_en : 1; /**< [ 14: 14](R/W) Override global power state machine and mac_pcs_pll_div control signal.
+ When asserted, pcs_sds_pll_div is specified from
+ GSER()_LANE()_PCS_PLL_CTLIFC_0[PLL_DIV_OVRRD_VAL],
+ global power state machine and mac_pcs_pll_div control signals are ignored.
+ For diagnostic use only. */
+ uint64_t reserved_10_13 : 4;
+ uint64_t pcs_sds_pll_lock_override : 1;/**< [ 9: 9](R/W) Not used.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_counter_resetn : 1;/**< [ 8: 8](R/W) Not used.
+ For diagnostic use only. */
+ uint64_t pll_sdsck_pd_ovrrd_val : 1; /**< [ 7: 7](R/W) Clock tree powerdown override value.
+ For diagnostic use only. */
+ uint64_t pll_sdsck_pd_ovrrd_en : 1; /**< [ 6: 6](R/W) Clock tree powerdown override enable.
+ For diagnostic use only. */
+ uint64_t pll_pd_ovrrd_val : 1; /**< [ 5: 5](R/W) PLL powerdown override value.
+ For diagnostic use only. */
+ uint64_t pll_pd_ovrrd_en : 1; /**< [ 4: 4](R/W) When asserted, overrides PLL powerdown from state machine.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_div5_byp : 1; /**< [ 3: 3](R/W) Not used.
+ For diagnostic use only. */
+ uint64_t pll_band_sel_ovrrd_val : 1; /**< [ 2: 2](R/W) State machine override value for VCO band select.
+ 0 = Low band VCO0 (RO-VCO).
+ 1 = High band VCO1 (LC-VCO).
+
+ For diagnostic use only. */
+ uint64_t pll_band_sel_ovrrd_en : 1; /**< [ 1: 1](R/W) PLL band select override enable.
+ For diagnostic use only. */
+ uint64_t pll_pcs_div_ovrrd_en : 1; /**< [ 0: 0](R/W) Override global power state machine and mac_pcs_pll_div control signal.
+ When asserted, pcs_sds_pll_div is specified from
+ GSER()_LANE()_PCS_PLL_CTLIFC_1[PLL_PCS_DIV_OVRRD_VAL],
+ global power state machine and mac_pcs_pll_div control signals are ignored.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t pll_pcs_div_ovrrd_en : 1; /**< [ 0: 0](R/W) Override global power state machine and mac_pcs_pll_div control signal.
+ When asserted, pcs_sds_pll_div is specified from
+ GSER()_LANE()_PCS_PLL_CTLIFC_1[PLL_PCS_DIV_OVRRD_VAL],
+ global power state machine and mac_pcs_pll_div control signals are ignored.
+ For diagnostic use only. */
+ uint64_t pll_band_sel_ovrrd_en : 1; /**< [ 1: 1](R/W) PLL band select override enable.
+ For diagnostic use only. */
+ uint64_t pll_band_sel_ovrrd_val : 1; /**< [ 2: 2](R/W) State machine override value for VCO band select.
+ 0 = Low band VCO0 (RO-VCO).
+ 1 = High band VCO1 (LC-VCO).
+
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_div5_byp : 1; /**< [ 3: 3](R/W) Not used.
+ For diagnostic use only. */
+ uint64_t pll_pd_ovrrd_en : 1; /**< [ 4: 4](R/W) When asserted, overrides PLL powerdown from state machine.
+ For diagnostic use only. */
+ uint64_t pll_pd_ovrrd_val : 1; /**< [ 5: 5](R/W) PLL powerdown override value.
+ For diagnostic use only. */
+ uint64_t pll_sdsck_pd_ovrrd_en : 1; /**< [ 6: 6](R/W) Clock tree powerdown override enable.
+ For diagnostic use only. */
+ uint64_t pll_sdsck_pd_ovrrd_val : 1; /**< [ 7: 7](R/W) Clock tree powerdown override value.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_counter_resetn : 1;/**< [ 8: 8](R/W) Not used.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_lock_override : 1;/**< [ 9: 9](R/W) Not used.
+ For diagnostic use only. */
+ uint64_t reserved_10_13 : 4;
+ uint64_t pll_div_ovrrd_en : 1; /**< [ 14: 14](R/W) Override global power state machine and mac_pcs_pll_div control signal.
+ When asserted, pcs_sds_pll_div is specified from
+ GSER()_LANE()_PCS_PLL_CTLIFC_0[PLL_DIV_OVRRD_VAL],
+ global power state machine and mac_pcs_pll_div control signals are ignored.
+ For diagnostic use only. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_glbl_pll_cfg_2_s cn; */
+};
+typedef union bdk_gserx_glbl_pll_cfg_2 bdk_gserx_glbl_pll_cfg_2_t;
+
+static inline uint64_t BDK_GSERX_GLBL_PLL_CFG_2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_GLBL_PLL_CFG_2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460010ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460010ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460010ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_GLBL_PLL_CFG_2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_GLBL_PLL_CFG_2(a) bdk_gserx_glbl_pll_cfg_2_t
+#define bustype_BDK_GSERX_GLBL_PLL_CFG_2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_GLBL_PLL_CFG_2(a) "GSERX_GLBL_PLL_CFG_2"
+#define device_bar_BDK_GSERX_GLBL_PLL_CFG_2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_GLBL_PLL_CFG_2(a) (a)
+#define arguments_BDK_GSERX_GLBL_PLL_CFG_2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_glbl_pll_cfg_3
+ *
+ * GSER Global PLL Configuration 3 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_glbl_pll_cfg_3
+{
+ uint64_t u;
+ struct bdk_gserx_glbl_pll_cfg_3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t pcs_sds_pll_vco_amp : 2; /**< [ 9: 8](R/W) Adjusts the VCO amplitude control current.
+ For diagnostic use only.
+ 0x0 = Add 25 uA.
+ 0x1 = OFF (default).
+ 0x2 = Sink 25 uA.
+ 0x3 = Sink 50 uA. */
+ uint64_t pll_bypass_uq : 1; /**< [ 7: 7](R/W) PLL bypass enable. When asserted, multiplexes in the feedback divider clock.
+ For diagnostic use only. */
+ uint64_t pll_vctrl_sel_ovrrd_en : 1; /**< [ 6: 6](R/W) Override enable for selecting current for Vctrl in open loop operation.
+ For diagnostic use only. */
+ uint64_t pll_vctrl_sel_ovrrd_val : 2;/**< [ 5: 4](R/W) Override value for selecting current for Vctrl in open loop operation.
+ For diagnostic use only. */
+ uint64_t pll_vctrl_sel_lcvco_val : 2;/**< [ 3: 2](R/W) Selects current for Vctrl in open loop operation for LC-tank VCO.
+ For diagnostic use only. */
+ uint64_t pll_vctrl_sel_rovco_val : 2;/**< [ 1: 0](R/W) Selects current for Vctrl in open loop operation for ring oscillator VCO.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t pll_vctrl_sel_rovco_val : 2;/**< [ 1: 0](R/W) Selects current for Vctrl in open loop operation for ring oscillator VCO.
+ For diagnostic use only. */
+ uint64_t pll_vctrl_sel_lcvco_val : 2;/**< [ 3: 2](R/W) Selects current for Vctrl in open loop operation for LC-tank VCO.
+ For diagnostic use only. */
+ uint64_t pll_vctrl_sel_ovrrd_val : 2;/**< [ 5: 4](R/W) Override value for selecting current for Vctrl in open loop operation.
+ For diagnostic use only. */
+ uint64_t pll_vctrl_sel_ovrrd_en : 1; /**< [ 6: 6](R/W) Override enable for selecting current for Vctrl in open loop operation.
+ For diagnostic use only. */
+ uint64_t pll_bypass_uq : 1; /**< [ 7: 7](R/W) PLL bypass enable. When asserted, multiplexes in the feedback divider clock.
+ For diagnostic use only. */
+ uint64_t pcs_sds_pll_vco_amp : 2; /**< [ 9: 8](R/W) Adjusts the VCO amplitude control current.
+ For diagnostic use only.
+ 0x0 = Add 25 uA.
+ 0x1 = OFF (default).
+ 0x2 = Sink 25 uA.
+ 0x3 = Sink 50 uA. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_glbl_pll_cfg_3_s cn; */
+};
+typedef union bdk_gserx_glbl_pll_cfg_3 bdk_gserx_glbl_pll_cfg_3_t;
+
+static inline uint64_t BDK_GSERX_GLBL_PLL_CFG_3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_GLBL_PLL_CFG_3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460018ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460018ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460018ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_GLBL_PLL_CFG_3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_GLBL_PLL_CFG_3(a) bdk_gserx_glbl_pll_cfg_3_t
+#define bustype_BDK_GSERX_GLBL_PLL_CFG_3(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_GLBL_PLL_CFG_3(a) "GSERX_GLBL_PLL_CFG_3"
+#define device_bar_BDK_GSERX_GLBL_PLL_CFG_3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_GLBL_PLL_CFG_3(a) (a)
+#define arguments_BDK_GSERX_GLBL_PLL_CFG_3(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_glbl_pll_monitor
+ *
+ * GSER Monitor for SerDes Global to Raw PCS Global interface Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_glbl_pll_monitor
+{
+ uint64_t u;
+ struct bdk_gserx_glbl_pll_monitor_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t sds_pcs_glbl_status : 6; /**< [ 13: 8](RO/H) Spare reserved for future use. Read data should be ignored. */
+ uint64_t sds_pcs_pll_lock : 1; /**< [ 7: 7](RO/H) Status signal from global indicates that PLL is locked. Not a true "lock" signal.
+ Used to debug/test the PLL. */
+ uint64_t sds_pcs_clock_ready : 1; /**< [ 6: 6](RO/H) Clock status signal, can be overridden with (I_PLL_CTRL_EN == 1).
+ 0 = Clock not ready.
+ 1 = Clock ready. */
+ uint64_t sds_pcs_pll_calstates : 5; /**< [ 5: 1](RO/H) PLL calibration code. */
+ uint64_t sds_pcs_pll_caldone : 1; /**< [ 0: 0](RO/H) PLL calibration done signal. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_pll_caldone : 1; /**< [ 0: 0](RO/H) PLL calibration done signal. */
+ uint64_t sds_pcs_pll_calstates : 5; /**< [ 5: 1](RO/H) PLL calibration code. */
+ uint64_t sds_pcs_clock_ready : 1; /**< [ 6: 6](RO/H) Clock status signal, can be overridden with (I_PLL_CTRL_EN == 1).
+ 0 = Clock not ready.
+ 1 = Clock ready. */
+ uint64_t sds_pcs_pll_lock : 1; /**< [ 7: 7](RO/H) Status signal from global indicates that PLL is locked. Not a true "lock" signal.
+ Used to debug/test the PLL. */
+ uint64_t sds_pcs_glbl_status : 6; /**< [ 13: 8](RO/H) Spare reserved for future use. Read data should be ignored. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_glbl_pll_monitor_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t reserved_14_15 : 2;
+ uint64_t sds_pcs_glbl_status : 6; /**< [ 13: 8](RO/H) Spare reserved for future use. Read data should be ignored. */
+ uint64_t sds_pcs_pll_lock : 1; /**< [ 7: 7](RO/H) Status signal from global indicates that PLL is locked. Not a true "lock" signal.
+ Used to debug/test the PLL. */
+ uint64_t sds_pcs_clock_ready : 1; /**< [ 6: 6](RO/H) Clock status signal, can be overridden with (I_PLL_CTRL_EN == 1).
+ 0 = Clock not ready.
+ 1 = Clock ready. */
+ uint64_t sds_pcs_pll_calstates : 5; /**< [ 5: 1](RO/H) PLL calibration code. */
+ uint64_t sds_pcs_pll_caldone : 1; /**< [ 0: 0](RO/H) PLL calibration done signal. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_pll_caldone : 1; /**< [ 0: 0](RO/H) PLL calibration done signal. */
+ uint64_t sds_pcs_pll_calstates : 5; /**< [ 5: 1](RO/H) PLL calibration code. */
+ uint64_t sds_pcs_clock_ready : 1; /**< [ 6: 6](RO/H) Clock status signal, can be overridden with (I_PLL_CTRL_EN == 1).
+ 0 = Clock not ready.
+ 1 = Clock ready. */
+ uint64_t sds_pcs_pll_lock : 1; /**< [ 7: 7](RO/H) Status signal from global indicates that PLL is locked. Not a true "lock" signal.
+ Used to debug/test the PLL. */
+ uint64_t sds_pcs_glbl_status : 6; /**< [ 13: 8](RO/H) Spare reserved for future use. Read data should be ignored. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_gserx_glbl_pll_monitor bdk_gserx_glbl_pll_monitor_t;
+
+static inline uint64_t BDK_GSERX_GLBL_PLL_MONITOR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_GLBL_PLL_MONITOR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460100ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460100ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460100ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_GLBL_PLL_MONITOR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_GLBL_PLL_MONITOR(a) bdk_gserx_glbl_pll_monitor_t
+#define bustype_BDK_GSERX_GLBL_PLL_MONITOR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_GLBL_PLL_MONITOR(a) "GSERX_GLBL_PLL_MONITOR"
+#define device_bar_BDK_GSERX_GLBL_PLL_MONITOR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_GLBL_PLL_MONITOR(a) (a)
+#define arguments_BDK_GSERX_GLBL_PLL_MONITOR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_glbl_tad
+ *
+ * GSER Global Test Analog and Digital Monitor Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_glbl_tad
+{
+ uint64_t u;
+ struct bdk_gserx_glbl_tad_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t pcs_sds_tad_8_5 : 4; /**< [ 8: 5](R/W) AMON specific mode selection.
+ Set GSER()_GLBL_TM_ADMON[AMON_ON].
+ Decodes 0x0 - 0x4 require GSER()_GLBL_TM_ADMON[LSEL] set.
+ Decodes 0x5 - 0x5 do not require GSER()_GLBL_TM_ADMON[LSEL] set.
+ In both cases, the resulting signals can be observed on the AMON pin.
+
+ 0x0 = TX txdrv DAC 100ua sink current monitor.
+ 0x1 = TX vcnt precision dcc.
+ 0x2 = RX sdll topregout.
+ 0x3 = RX ldll vctrl_i.
+ 0x4 = RX RX term VCM voltage.
+ 0x5 = Global bandgap voltage.
+ 0x6 = Global CTAT voltage.
+ 0x7 = Global internal 100ua reference current.
+ 0x8 = Global external 100ua reference current.
+ 0x9 = Global Rterm calibration reference voltage.
+ 0xA = Global Rterm calibration comparator voltage.
+ 0xB = Global force VCNT through DAC.
+ 0xC = Global VDD voltage.
+ 0xD = Global VDDCLK voltage.
+ 0xE = Global PLL regulate VCO supply.
+ 0xF = Global VCTRL for VCO varactor control. */
+ uint64_t pcs_sds_tad_4_0 : 5; /**< [ 4: 0](R/W) DMON specific mode selection.
+ Set GSER()_GLBL_TM_ADMON[DMON_ON].
+ Decodes 0x0 - 0xe require GSER()_GLBL_TM_ADMON[LSEL] set.
+ Decodes 0xf - 0x1f do not require GSER()_GLBL_TM_ADMON[LSEL] set.
+ In both cases, the resulting signals can be observed on the DMON pin.
+
+ 0x00 = DFE Data Q.
+ 0x01 = DFE Edge I.
+ 0x02 = DFE CK Q.
+ 0x03 = DFE CK I.
+ 0x04 = DLL use GSER()_SLICE()_RX_SDLL_CTRL.PCS_SDS_RX_SDLL_SWSEL to select signal
+ in the slice DLL.
+ 0x05-0x7 = Reserved.
+ 0x08 = RX ld_rx[0].
+ 0x09 = RX rx_clk.
+ 0x0A = RX q_error_stg.
+ 0x0B = RX q_data_stg.
+ 0x0C-0x0E = Reserved.
+ 0x0F = Special case to observe supply in global. Sds_vdda and a internal regulated supply
+ can be observed on DMON and DMONB
+ respectively. sds_vss can be observed on AMON. GSER()_GLBL_TM_ADMON[AMON_ON]
+ must not be set.
+ 0x10 = PLL_CLK 0 degree.
+ 0x11 = Sds_tst_fb_clk.
+ 0x12 = Buffered refclk.
+ 0x13 = Div 8 of core clock (core_clk_out).
+ 0x14-0x1F: Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_tad_4_0 : 5; /**< [ 4: 0](R/W) DMON specific mode selection.
+ Set GSER()_GLBL_TM_ADMON[DMON_ON].
+ Decodes 0x0 - 0xe require GSER()_GLBL_TM_ADMON[LSEL] set.
+ Decodes 0xf - 0x1f do not require GSER()_GLBL_TM_ADMON[LSEL] set.
+ In both cases, the resulting signals can be observed on the DMON pin.
+
+ 0x00 = DFE Data Q.
+ 0x01 = DFE Edge I.
+ 0x02 = DFE CK Q.
+ 0x03 = DFE CK I.
+ 0x04 = DLL use GSER()_SLICE()_RX_SDLL_CTRL.PCS_SDS_RX_SDLL_SWSEL to select signal
+ in the slice DLL.
+ 0x05-0x7 = Reserved.
+ 0x08 = RX ld_rx[0].
+ 0x09 = RX rx_clk.
+ 0x0A = RX q_error_stg.
+ 0x0B = RX q_data_stg.
+ 0x0C-0x0E = Reserved.
+ 0x0F = Special case to observe supply in global. Sds_vdda and a internal regulated supply
+ can be observed on DMON and DMONB
+ respectively. sds_vss can be observed on AMON. GSER()_GLBL_TM_ADMON[AMON_ON]
+ must not be set.
+ 0x10 = PLL_CLK 0 degree.
+ 0x11 = Sds_tst_fb_clk.
+ 0x12 = Buffered refclk.
+ 0x13 = Div 8 of core clock (core_clk_out).
+ 0x14-0x1F: Reserved. */
+ uint64_t pcs_sds_tad_8_5 : 4; /**< [ 8: 5](R/W) AMON specific mode selection.
+ Set GSER()_GLBL_TM_ADMON[AMON_ON].
+ Decodes 0x0 - 0x4 require GSER()_GLBL_TM_ADMON[LSEL] set.
+ Decodes 0x5 - 0x5 do not require GSER()_GLBL_TM_ADMON[LSEL] set.
+ In both cases, the resulting signals can be observed on the AMON pin.
+
+ 0x0 = TX txdrv DAC 100ua sink current monitor.
+ 0x1 = TX vcnt precision dcc.
+ 0x2 = RX sdll topregout.
+ 0x3 = RX ldll vctrl_i.
+ 0x4 = RX RX term VCM voltage.
+ 0x5 = Global bandgap voltage.
+ 0x6 = Global CTAT voltage.
+ 0x7 = Global internal 100ua reference current.
+ 0x8 = Global external 100ua reference current.
+ 0x9 = Global Rterm calibration reference voltage.
+ 0xA = Global Rterm calibration comparator voltage.
+ 0xB = Global force VCNT through DAC.
+ 0xC = Global VDD voltage.
+ 0xD = Global VDDCLK voltage.
+ 0xE = Global PLL regulate VCO supply.
+ 0xF = Global VCTRL for VCO varactor control. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_glbl_tad_s cn; */
+};
+typedef union bdk_gserx_glbl_tad bdk_gserx_glbl_tad_t;
+
+static inline uint64_t BDK_GSERX_GLBL_TAD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_GLBL_TAD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460400ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460400ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460400ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_GLBL_TAD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_GLBL_TAD(a) bdk_gserx_glbl_tad_t
+#define bustype_BDK_GSERX_GLBL_TAD(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_GLBL_TAD(a) "GSERX_GLBL_TAD"
+#define device_bar_BDK_GSERX_GLBL_TAD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_GLBL_TAD(a) (a)
+#define arguments_BDK_GSERX_GLBL_TAD(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_glbl_tm_admon
+ *
+ * GSER Global Test Mode Analog/Digital Monitor Enable Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_glbl_tm_admon
+{
+ uint64_t u;
+ struct bdk_gserx_glbl_tm_admon_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t amon_on : 1; /**< [ 7: 7](R/W) When set, AMON test mode is enabled; see GSER()_GLBL_TAD. */
+ uint64_t dmon_on : 1; /**< [ 6: 6](R/W) When set, DMON test mode is enabled; see GSER()_GLBL_TAD. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t lsel : 3; /**< [ 2: 0](R/W) Three bits to select 1 out of 4 lanes for AMON/DMON test.
+ 0x0 = Selects lane 0.
+ 0x1 = Selects lane 1.
+ 0x2 = Selects lane 2.
+ 0x3 = Selects lane 3.
+ 0x4-0x7 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t lsel : 3; /**< [ 2: 0](R/W) Three bits to select 1 out of 4 lanes for AMON/DMON test.
+ 0x0 = Selects lane 0.
+ 0x1 = Selects lane 1.
+ 0x2 = Selects lane 2.
+ 0x3 = Selects lane 3.
+ 0x4-0x7 = Reserved. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t dmon_on : 1; /**< [ 6: 6](R/W) When set, DMON test mode is enabled; see GSER()_GLBL_TAD. */
+ uint64_t amon_on : 1; /**< [ 7: 7](R/W) When set, AMON test mode is enabled; see GSER()_GLBL_TAD. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_glbl_tm_admon_s cn; */
+};
+typedef union bdk_gserx_glbl_tm_admon bdk_gserx_glbl_tm_admon_t;
+
+static inline uint64_t BDK_GSERX_GLBL_TM_ADMON(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_GLBL_TM_ADMON(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460408ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460408ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460408ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_GLBL_TM_ADMON", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_GLBL_TM_ADMON(a) bdk_gserx_glbl_tm_admon_t
+#define bustype_BDK_GSERX_GLBL_TM_ADMON(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_GLBL_TM_ADMON(a) "GSERX_GLBL_TM_ADMON"
+#define device_bar_BDK_GSERX_GLBL_TM_ADMON(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_GLBL_TM_ADMON(a) (a)
+#define arguments_BDK_GSERX_GLBL_TM_ADMON(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_iddq_mode
+ *
+ * GSER IDDQ Mode Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_iddq_mode
+{
+ uint64_t u;
+ struct bdk_gserx_iddq_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t phy_iddq_mode : 1; /**< [ 0: 0](R/W) When set, power downs all circuitry in PHY for IDDQ testing */
+#else /* Word 0 - Little Endian */
+ uint64_t phy_iddq_mode : 1; /**< [ 0: 0](R/W) When set, power downs all circuitry in PHY for IDDQ testing */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_iddq_mode_s cn; */
+};
+typedef union bdk_gserx_iddq_mode bdk_gserx_iddq_mode_t;
+
+static inline uint64_t BDK_GSERX_IDDQ_MODE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_IDDQ_MODE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000018ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000018ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000018ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_IDDQ_MODE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_IDDQ_MODE(a) bdk_gserx_iddq_mode_t
+#define bustype_BDK_GSERX_IDDQ_MODE(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_IDDQ_MODE(a) "GSERX_IDDQ_MODE"
+#define device_bar_BDK_GSERX_IDDQ_MODE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_IDDQ_MODE(a) (a)
+#define arguments_BDK_GSERX_IDDQ_MODE(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_lbert_cfg
+ *
+ * GSER Lane LBERT Configuration Registers
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_lbert_cfg
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_lbert_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t lbert_pg_err_insert : 1; /**< [ 15: 15](WO/H) Insert one bit error into the LSB of the LBERT generated
+ stream. A single write to this bit inserts a single bit
+ error. */
+ uint64_t lbert_pm_sync_start : 1; /**< [ 14: 14](WO/H) Synchronize the pattern matcher LFSR with the incoming
+ data. Writing this bit resets the error counter and
+ starts a synchronization of the PM. There is no need
+ to write this bit back to a zero to run normally. */
+ uint64_t lbert_pg_en : 1; /**< [ 13: 13](R/W) Enable the LBERT pattern generator. */
+ uint64_t lbert_pg_width : 2; /**< [ 12: 11](R/W) LBERT pattern generator data width:
+ 0x0 = 8-bit data.
+ 0x1 = 10-bit data.
+ 0x2 = 16-bit data.
+ 0x3 = 20-bit data. */
+ uint64_t lbert_pg_mode : 4; /**< [ 10: 7](R/W) LBERT pattern generator mode; when changing modes,
+ must be disabled first:
+ 0x0 = Disabled.
+ 0x1 = lfsr31 = X^31 + X^28 + 1.
+ 0x2 = lfsr23 = X^23 + X^18 + 1.
+ 0x3 = lfsr23 = X^23 + X^21 + X^16 + X^8 + X^5 + X^2 + 1.
+ 0x4 = lfsr16 = X^16 + X^5 + X^4 + X^3 + 1.
+ 0x5 = lfsr15 = X^15 + X^14 + 1.
+ 0x6 = lfsr11 = X^11 + X^9 + 1.
+ 0x7 = lfsr7 = X^7 + X^6 + 1.
+ 0x8 = Fixed word (PAT0).
+ 0x9 = DC-balanced word (PAT0, ~PAT0).
+ 0xA = Fixed Pattern (000, PAT0, 3ff, ~PAT0).
+ 0xB-F = Reserved. */
+ uint64_t lbert_pm_en : 1; /**< [ 6: 6](R/W) Enable LBERT pattern matcher. */
+ uint64_t lbert_pm_width : 2; /**< [ 5: 4](R/W) LBERT pattern matcher data width.
+ 0x0 = 8-bit data.
+ 0x1 = 10-bit data.
+ 0x2 = 16-bit data.
+ 0x3 = 20-bit data. */
+ uint64_t lbert_pm_mode : 4; /**< [ 3: 0](R/W) LBERT pattern matcher mode; when changing modes,
+ must be disabled first:
+ 0x0 = Disabled.
+ 0x1 = lfsr31 = X^31 + X^28 + 1.
+ 0x2 = lfsr23 = X^23 + X^18 + 1.
+ 0x3 = lfsr23 = X^23 + X^21 + X^16 + X^8 + X^5 + X^2 + 1.
+ 0x4 = lfsr16 = X^16 + X^5 + X^4 + X^3 + 1.
+ 0x5 = lfsr15 = X^15 + X^14 + 1.
+ 0x6 = lfsr11 = X^11 + X^9 + 1.
+ 0x7 = lfsr7 = X^7 + X^6 + 1.
+ 0x8 = Fixed word (PAT0).
+ 0x9 = DC-balanced word (PAT0, ~PAT0).
+ 0xA = Fixed Pattern: (000, PAT0, 3ff, ~PAT0).
+ 0xB-F = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t lbert_pm_mode : 4; /**< [ 3: 0](R/W) LBERT pattern matcher mode; when changing modes,
+ must be disabled first:
+ 0x0 = Disabled.
+ 0x1 = lfsr31 = X^31 + X^28 + 1.
+ 0x2 = lfsr23 = X^23 + X^18 + 1.
+ 0x3 = lfsr23 = X^23 + X^21 + X^16 + X^8 + X^5 + X^2 + 1.
+ 0x4 = lfsr16 = X^16 + X^5 + X^4 + X^3 + 1.
+ 0x5 = lfsr15 = X^15 + X^14 + 1.
+ 0x6 = lfsr11 = X^11 + X^9 + 1.
+ 0x7 = lfsr7 = X^7 + X^6 + 1.
+ 0x8 = Fixed word (PAT0).
+ 0x9 = DC-balanced word (PAT0, ~PAT0).
+ 0xA = Fixed Pattern: (000, PAT0, 3ff, ~PAT0).
+ 0xB-F = Reserved. */
+ uint64_t lbert_pm_width : 2; /**< [ 5: 4](R/W) LBERT pattern matcher data width.
+ 0x0 = 8-bit data.
+ 0x1 = 10-bit data.
+ 0x2 = 16-bit data.
+ 0x3 = 20-bit data. */
+ uint64_t lbert_pm_en : 1; /**< [ 6: 6](R/W) Enable LBERT pattern matcher. */
+ uint64_t lbert_pg_mode : 4; /**< [ 10: 7](R/W) LBERT pattern generator mode; when changing modes,
+ must be disabled first:
+ 0x0 = Disabled.
+ 0x1 = lfsr31 = X^31 + X^28 + 1.
+ 0x2 = lfsr23 = X^23 + X^18 + 1.
+ 0x3 = lfsr23 = X^23 + X^21 + X^16 + X^8 + X^5 + X^2 + 1.
+ 0x4 = lfsr16 = X^16 + X^5 + X^4 + X^3 + 1.
+ 0x5 = lfsr15 = X^15 + X^14 + 1.
+ 0x6 = lfsr11 = X^11 + X^9 + 1.
+ 0x7 = lfsr7 = X^7 + X^6 + 1.
+ 0x8 = Fixed word (PAT0).
+ 0x9 = DC-balanced word (PAT0, ~PAT0).
+ 0xA = Fixed Pattern (000, PAT0, 3ff, ~PAT0).
+ 0xB-F = Reserved. */
+ uint64_t lbert_pg_width : 2; /**< [ 12: 11](R/W) LBERT pattern generator data width:
+ 0x0 = 8-bit data.
+ 0x1 = 10-bit data.
+ 0x2 = 16-bit data.
+ 0x3 = 20-bit data. */
+ uint64_t lbert_pg_en : 1; /**< [ 13: 13](R/W) Enable the LBERT pattern generator. */
+ uint64_t lbert_pm_sync_start : 1; /**< [ 14: 14](WO/H) Synchronize the pattern matcher LFSR with the incoming
+ data. Writing this bit resets the error counter and
+ starts a synchronization of the PM. There is no need
+ to write this bit back to a zero to run normally. */
+ uint64_t lbert_pg_err_insert : 1; /**< [ 15: 15](WO/H) Insert one bit error into the LSB of the LBERT generated
+ stream. A single write to this bit inserts a single bit
+ error. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_lbert_cfg_s cn; */
+};
+typedef union bdk_gserx_lanex_lbert_cfg bdk_gserx_lanex_lbert_cfg_t;
+
+static inline uint64_t BDK_GSERX_LANEX_LBERT_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_LBERT_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904c0020ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904c0020ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904c0020ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_LBERT_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_LBERT_CFG(a,b) bdk_gserx_lanex_lbert_cfg_t
+#define bustype_BDK_GSERX_LANEX_LBERT_CFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_LBERT_CFG(a,b) "GSERX_LANEX_LBERT_CFG"
+#define device_bar_BDK_GSERX_LANEX_LBERT_CFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_LBERT_CFG(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_LBERT_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_lbert_ecnt
+ *
+ * GSER Lane LBERT Error Counter Registers
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ * The error registers are reset on a read-only when the pattern matcher is enabled.
+ * If the pattern matcher is disabled, the registers return the error count that was
+ * indicated when the pattern matcher was disabled and never reset.
+ */
+union bdk_gserx_lanex_lbert_ecnt
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_lbert_ecnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t lbert_err_ovbit14 : 1; /**< [ 15: 15](RO/H) If this bit is set, multiply [LBERT_ERR_CNT] by 128.
+ If this bit is set and [LBERT_ERR_CNT] = 2^15-1, signals
+ overflow of the counter. */
+ uint64_t lbert_err_cnt : 15; /**< [ 14: 0](RO/H) Current bit error count.
+ If [LBERT_ERR_OVBIT14] is active, then multiply
+ count by 128. */
+#else /* Word 0 - Little Endian */
+ uint64_t lbert_err_cnt : 15; /**< [ 14: 0](RO/H) Current bit error count.
+ If [LBERT_ERR_OVBIT14] is active, then multiply
+ count by 128. */
+ uint64_t lbert_err_ovbit14 : 1; /**< [ 15: 15](RO/H) If this bit is set, multiply [LBERT_ERR_CNT] by 128.
+ If this bit is set and [LBERT_ERR_CNT] = 2^15-1, signals
+ overflow of the counter. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_lbert_ecnt_s cn; */
+};
+typedef union bdk_gserx_lanex_lbert_ecnt bdk_gserx_lanex_lbert_ecnt_t;
+
+static inline uint64_t BDK_GSERX_LANEX_LBERT_ECNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_LBERT_ECNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904c0028ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904c0028ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904c0028ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_LBERT_ECNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_LBERT_ECNT(a,b) bdk_gserx_lanex_lbert_ecnt_t
+#define bustype_BDK_GSERX_LANEX_LBERT_ECNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_LBERT_ECNT(a,b) "GSERX_LANEX_LBERT_ECNT"
+#define device_bar_BDK_GSERX_LANEX_LBERT_ECNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_LBERT_ECNT(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_LBERT_ECNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_lbert_pat_cfg
+ *
+ * GSER Lane LBERT Pattern Configuration Registers
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_lbert_pat_cfg
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_lbert_pat_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t lbert_pg_pat : 10; /**< [ 9: 0](R/W) Programmable 10-bit pattern to be used in the LBERT pattern mode;
+ applies when GSER()_LANE()_LBERT_CFG[LBERT_PG_MODE]
+ is equal to 8, 9, or 10. */
+#else /* Word 0 - Little Endian */
+ uint64_t lbert_pg_pat : 10; /**< [ 9: 0](R/W) Programmable 10-bit pattern to be used in the LBERT pattern mode;
+ applies when GSER()_LANE()_LBERT_CFG[LBERT_PG_MODE]
+ is equal to 8, 9, or 10. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_lbert_pat_cfg_s cn; */
+};
+typedef union bdk_gserx_lanex_lbert_pat_cfg bdk_gserx_lanex_lbert_pat_cfg_t;
+
+static inline uint64_t BDK_GSERX_LANEX_LBERT_PAT_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_LBERT_PAT_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904c0018ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904c0018ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904c0018ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_LBERT_PAT_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_LBERT_PAT_CFG(a,b) bdk_gserx_lanex_lbert_pat_cfg_t
+#define bustype_BDK_GSERX_LANEX_LBERT_PAT_CFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_LBERT_PAT_CFG(a,b) "GSERX_LANEX_LBERT_PAT_CFG"
+#define device_bar_BDK_GSERX_LANEX_LBERT_PAT_CFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_LBERT_PAT_CFG(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_LBERT_PAT_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_misc_cfg_0
+ *
+ * GSER Lane Miscellaneous Configuration 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_misc_cfg_0
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_misc_cfg_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t use_pma_polarity : 1; /**< [ 15: 15](R/W) If set, the PMA control is used to define the polarity.
+ If not set, GSER()_LANE()_RX_CFG_0[CFG_RX_POL_INVERT]
+ is used. */
+ uint64_t cfg_pcs_loopback : 1; /**< [ 14: 14](R/W) Assert for parallel loopback raw PCS TX to Raw PCS RX. */
+ uint64_t pcs_tx_mode_ovrrd_en : 1; /**< [ 13: 13](R/W) Override enable for raw PCS TX data width. */
+ uint64_t pcs_rx_mode_ovrrd_en : 1; /**< [ 12: 12](R/W) Override enable for raw PCS RX data width. */
+ uint64_t cfg_eie_det_cnt : 4; /**< [ 11: 8](R/W) EIE detect state machine required number of consecutive
+ PHY EIE status assertions to determine EIE and assert Raw
+ PCS output pcs_mac_rx_eie_det_sts. */
+ uint64_t eie_det_stl_on_time : 3; /**< [ 7: 5](R/W) EIE detect state machine "on" delay prior to sampling
+ PHY EIE status. Software needs to set this field to 0x4 if
+ in SATA mode (GSER()_CFG[SATA] is set). */
+ uint64_t eie_det_stl_off_time : 3; /**< [ 4: 2](R/W) EIE detect state machine "off" delay prior to sampling
+ PHY EIE status. */
+ uint64_t tx_bit_order : 1; /**< [ 1: 1](R/W) Specify transmit bit order.
+ 0 = Maintain bit order of parallel data to SerDes TX.
+ 1 = Reverse bit order of parallel data to SerDes TX. */
+ uint64_t rx_bit_order : 1; /**< [ 0: 0](R/W) Specify receive bit order:
+ 0 = Maintain bit order of parallel data to SerDes RX.
+ 1 = Reverse bit order of parallel data to SerDes RX. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_bit_order : 1; /**< [ 0: 0](R/W) Specify receive bit order:
+ 0 = Maintain bit order of parallel data to SerDes RX.
+ 1 = Reverse bit order of parallel data to SerDes RX. */
+ uint64_t tx_bit_order : 1; /**< [ 1: 1](R/W) Specify transmit bit order.
+ 0 = Maintain bit order of parallel data to SerDes TX.
+ 1 = Reverse bit order of parallel data to SerDes TX. */
+ uint64_t eie_det_stl_off_time : 3; /**< [ 4: 2](R/W) EIE detect state machine "off" delay prior to sampling
+ PHY EIE status. */
+ uint64_t eie_det_stl_on_time : 3; /**< [ 7: 5](R/W) EIE detect state machine "on" delay prior to sampling
+ PHY EIE status. Software needs to set this field to 0x4 if
+ in SATA mode (GSER()_CFG[SATA] is set). */
+ uint64_t cfg_eie_det_cnt : 4; /**< [ 11: 8](R/W) EIE detect state machine required number of consecutive
+ PHY EIE status assertions to determine EIE and assert Raw
+ PCS output pcs_mac_rx_eie_det_sts. */
+ uint64_t pcs_rx_mode_ovrrd_en : 1; /**< [ 12: 12](R/W) Override enable for raw PCS RX data width. */
+ uint64_t pcs_tx_mode_ovrrd_en : 1; /**< [ 13: 13](R/W) Override enable for raw PCS TX data width. */
+ uint64_t cfg_pcs_loopback : 1; /**< [ 14: 14](R/W) Assert for parallel loopback raw PCS TX to Raw PCS RX. */
+ uint64_t use_pma_polarity : 1; /**< [ 15: 15](R/W) If set, the PMA control is used to define the polarity.
+ If not set, GSER()_LANE()_RX_CFG_0[CFG_RX_POL_INVERT]
+ is used. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_misc_cfg_0_s cn; */
+};
+typedef union bdk_gserx_lanex_misc_cfg_0 bdk_gserx_lanex_misc_cfg_0_t;
+
+static inline uint64_t BDK_GSERX_LANEX_MISC_CFG_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_MISC_CFG_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904c0000ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904c0000ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904c0000ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_MISC_CFG_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_MISC_CFG_0(a,b) bdk_gserx_lanex_misc_cfg_0_t
+#define bustype_BDK_GSERX_LANEX_MISC_CFG_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_MISC_CFG_0(a,b) "GSERX_LANEX_MISC_CFG_0"
+#define device_bar_BDK_GSERX_LANEX_MISC_CFG_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_MISC_CFG_0(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_MISC_CFG_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_misc_cfg_1
+ *
+ * GSER Lane Miscellaneous Configuration 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_misc_cfg_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_misc_cfg_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t par_tx_init : 1; /**< [ 12: 12](R/W) Performs parallel initialization of SerDes interface TX
+ FIFO pointers. */
+ uint64_t tx_polarity : 1; /**< [ 11: 11](R/W) Invert polarity of transmitted bit stream. Inversion is
+ performed in the SerDes interface transmit datapath. */
+ uint64_t rx_polarity_ovrrd_en : 1; /**< [ 10: 10](R/W) Override mac_pcs_rxX_polarity control pin values
+ When set, RX polarity inversion is specified from
+ RX_POLARITY_OVRRD_VAL, and mac_pcs_rxX_polarity is ignored. */
+ uint64_t rx_polarity_ovrrd_val : 1; /**< [ 9: 9](R/W) Controls RX polarity inversion when RX_POLARITY_OVRRD_EN
+ is set. Inversion is performed in the SerDes interface receive
+ datapath. */
+ uint64_t reserved_2_8 : 7;
+ uint64_t mac_tx_fifo_rd_ptr_ival : 2;/**< [ 1: 0](R/W/H) Initial value for MAC to PCS TX FIFO read pointer. */
+#else /* Word 0 - Little Endian */
+ uint64_t mac_tx_fifo_rd_ptr_ival : 2;/**< [ 1: 0](R/W/H) Initial value for MAC to PCS TX FIFO read pointer. */
+ uint64_t reserved_2_8 : 7;
+ uint64_t rx_polarity_ovrrd_val : 1; /**< [ 9: 9](R/W) Controls RX polarity inversion when RX_POLARITY_OVRRD_EN
+ is set. Inversion is performed in the SerDes interface receive
+ datapath. */
+ uint64_t rx_polarity_ovrrd_en : 1; /**< [ 10: 10](R/W) Override mac_pcs_rxX_polarity control pin values
+ When set, RX polarity inversion is specified from
+ RX_POLARITY_OVRRD_VAL, and mac_pcs_rxX_polarity is ignored. */
+ uint64_t tx_polarity : 1; /**< [ 11: 11](R/W) Invert polarity of transmitted bit stream. Inversion is
+ performed in the SerDes interface transmit datapath. */
+ uint64_t par_tx_init : 1; /**< [ 12: 12](R/W) Performs parallel initialization of SerDes interface TX
+ FIFO pointers. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_misc_cfg_1_s cn; */
+};
+typedef union bdk_gserx_lanex_misc_cfg_1 bdk_gserx_lanex_misc_cfg_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_MISC_CFG_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_MISC_CFG_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904c0008ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904c0008ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904c0008ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_MISC_CFG_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_MISC_CFG_1(a,b) bdk_gserx_lanex_misc_cfg_1_t
+#define bustype_BDK_GSERX_LANEX_MISC_CFG_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_MISC_CFG_1(a,b) "GSERX_LANEX_MISC_CFG_1"
+#define device_bar_BDK_GSERX_LANEX_MISC_CFG_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_MISC_CFG_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_MISC_CFG_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_pcs_ctlifc_0
+ *
+ * GSER Lane Raw PCS Control Interface Configuration 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_pcs_ctlifc_0
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_pcs_ctlifc_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t cfg_tx_vboost_en_ovrrd_val : 1;/**< [ 13: 13](R/W) Specifies TX VBOOST enable request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_VBOOST_EN_OVRRD_EN]. */
+ uint64_t cfg_tx_coeff_req_ovrrd_val : 1;/**< [ 12: 12](R/W) Specifies TX coefficient request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_COEFF_REQ_OVRRD_EN].
+ See GSER()_LANE()_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ]. */
+ uint64_t cfg_rx_cdr_coast_req_ovrrd_val : 1;/**< [ 11: 11](R/W) Specifies RX CDR coast request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_RX_COAST_REQ_OVRRD_EN]. */
+ uint64_t cfg_tx_detrx_en_req_ovrrd_val : 1;/**< [ 10: 10](R/W) Specifies TX detect RX request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_DETRX_EN_REQ_OVRRD_EN]. */
+ uint64_t cfg_soft_reset_req_ovrrd_val : 1;/**< [ 9: 9](R/W) Specifies Soft reset request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_SOFT_RESET_REQ_OVRRD_EN]. */
+ uint64_t cfg_lane_pwr_off_ovrrd_val : 1;/**< [ 8: 8](R/W) Specifies lane power off reset request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_LANE_PWR_OFF_OVRRD_EN]. */
+ uint64_t cfg_tx_mode_ovrrd_val : 2; /**< [ 7: 6](R/W) Override PCS TX mode (data width) when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_MODE_OVRRD_EN].
+ 0x0 = 8-bit raw data (not supported).
+ 0x1 = 10-bit raw data (not supported).
+ 0x2 = 16-bit raw data (for PCIe Gen3 8Gb only).
+ 0x3 = 20-bit raw data. */
+ uint64_t cfg_tx_pstate_req_ovrrd_val : 2;/**< [ 5: 4](R/W) Override TX pstate request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN]. */
+ uint64_t cfg_lane_mode_req_ovrrd_val : 4;/**< [ 3: 0](R/W) Override lane mode request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_LANE_MODE_REQ_OVRRD_EN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_lane_mode_req_ovrrd_val : 4;/**< [ 3: 0](R/W) Override lane mode request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_LANE_MODE_REQ_OVRRD_EN]. */
+ uint64_t cfg_tx_pstate_req_ovrrd_val : 2;/**< [ 5: 4](R/W) Override TX pstate request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN]. */
+ uint64_t cfg_tx_mode_ovrrd_val : 2; /**< [ 7: 6](R/W) Override PCS TX mode (data width) when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_MODE_OVRRD_EN].
+ 0x0 = 8-bit raw data (not supported).
+ 0x1 = 10-bit raw data (not supported).
+ 0x2 = 16-bit raw data (for PCIe Gen3 8Gb only).
+ 0x3 = 20-bit raw data. */
+ uint64_t cfg_lane_pwr_off_ovrrd_val : 1;/**< [ 8: 8](R/W) Specifies lane power off reset request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_LANE_PWR_OFF_OVRRD_EN]. */
+ uint64_t cfg_soft_reset_req_ovrrd_val : 1;/**< [ 9: 9](R/W) Specifies Soft reset request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_SOFT_RESET_REQ_OVRRD_EN]. */
+ uint64_t cfg_tx_detrx_en_req_ovrrd_val : 1;/**< [ 10: 10](R/W) Specifies TX detect RX request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_DETRX_EN_REQ_OVRRD_EN]. */
+ uint64_t cfg_rx_cdr_coast_req_ovrrd_val : 1;/**< [ 11: 11](R/W) Specifies RX CDR coast request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_RX_COAST_REQ_OVRRD_EN]. */
+ uint64_t cfg_tx_coeff_req_ovrrd_val : 1;/**< [ 12: 12](R/W) Specifies TX coefficient request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_COEFF_REQ_OVRRD_EN].
+ See GSER()_LANE()_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ]. */
+ uint64_t cfg_tx_vboost_en_ovrrd_val : 1;/**< [ 13: 13](R/W) Specifies TX VBOOST enable request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_VBOOST_EN_OVRRD_EN]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_pcs_ctlifc_0_s cn; */
+};
+typedef union bdk_gserx_lanex_pcs_ctlifc_0 bdk_gserx_lanex_pcs_ctlifc_0_t;
+
+static inline uint64_t BDK_GSERX_LANEX_PCS_CTLIFC_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_PCS_CTLIFC_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904c0060ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904c0060ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904c0060ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_PCS_CTLIFC_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_PCS_CTLIFC_0(a,b) bdk_gserx_lanex_pcs_ctlifc_0_t
+#define bustype_BDK_GSERX_LANEX_PCS_CTLIFC_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_PCS_CTLIFC_0(a,b) "GSERX_LANEX_PCS_CTLIFC_0"
+#define device_bar_BDK_GSERX_LANEX_PCS_CTLIFC_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_PCS_CTLIFC_0(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_PCS_CTLIFC_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_pcs_ctlifc_1
+ *
+ * GSER Lane Raw PCS Control Interface Configuration 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_pcs_ctlifc_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_pcs_ctlifc_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cfg_rx_pstate_req_ovrrd_val : 2;/**< [ 8: 7](R/W) Override RX pstate request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN]. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t cfg_rx_mode_ovrrd_val : 2; /**< [ 1: 0](R/W) Override PCS RX mode (data width) when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_RX_MODE_OVRRD_EN].
+ 0x0 = 8-bit raw data (not supported).
+ 0x1 = 10-bit raw data (not supported).
+ 0x2 = 16-bit raw data (not supported).
+ 0x3 = 20-bit raw data. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_rx_mode_ovrrd_val : 2; /**< [ 1: 0](R/W) Override PCS RX mode (data width) when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_RX_MODE_OVRRD_EN].
+ 0x0 = 8-bit raw data (not supported).
+ 0x1 = 10-bit raw data (not supported).
+ 0x2 = 16-bit raw data (not supported).
+ 0x3 = 20-bit raw data. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t cfg_rx_pstate_req_ovrrd_val : 2;/**< [ 8: 7](R/W) Override RX pstate request when its override bit
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_EN]. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_pcs_ctlifc_1_s cn; */
+};
+typedef union bdk_gserx_lanex_pcs_ctlifc_1 bdk_gserx_lanex_pcs_ctlifc_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_PCS_CTLIFC_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_PCS_CTLIFC_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904c0068ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904c0068ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904c0068ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_PCS_CTLIFC_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_PCS_CTLIFC_1(a,b) bdk_gserx_lanex_pcs_ctlifc_1_t
+#define bustype_BDK_GSERX_LANEX_PCS_CTLIFC_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_PCS_CTLIFC_1(a,b) "GSERX_LANEX_PCS_CTLIFC_1"
+#define device_bar_BDK_GSERX_LANEX_PCS_CTLIFC_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_PCS_CTLIFC_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_PCS_CTLIFC_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_pcs_ctlifc_2
+ *
+ * GSER Lane Raw PCS Control Interface Configuration 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_pcs_ctlifc_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_pcs_ctlifc_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t ctlifc_ovrrd_req : 1; /**< [ 15: 15](WO) Writing to set this bit initiates a state machine interface request
+ for GSER()_LANE()_PCS_CTLIFC_0 and GSER()_LANE()_PCS_CTLIFC_1
+ override values.
+
+ [CTLIFC_OVRRD_REQ] should be written with a one (with
+ [CFG_TX_COEFF_REQ_OVRRD_EN]=1 and
+ GSER()_LANE()_PCS_CTLIFC_0[CFG_TX_COEFF_REQ_OVRRD_VAL]=1) to initiate
+ a control interface configuration over-ride after manually programming
+ transmitter settings. See GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP]
+ and GSER()_LANE()_TX_CFG_0[CFG_TX_SWING]. */
+ uint64_t reserved_9_14 : 6;
+ uint64_t cfg_tx_vboost_en_ovrrd_en : 1;/**< [ 8: 8](R/W) Override mac_pcs_txX vboost_en signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_VBOOST_EN_OVRRD_VAL]. */
+ uint64_t cfg_tx_coeff_req_ovrrd_en : 1;/**< [ 7: 7](R/W) Override mac_pcs_txX_coeff_req signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_0[CFG_TX_COEFF_REQ_OVRRD_VAL]. See
+ [CTLIFC_OVRRD_REQ]. */
+ uint64_t cfg_rx_cdr_coast_req_ovrrd_en : 1;/**< [ 6: 6](R/W) Override mac_pcs_rxX_cdr_coast signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_RX_COAST_REQ_OVRRD_VAL]. */
+ uint64_t cfg_tx_detrx_en_req_ovrrd_en : 1;/**< [ 5: 5](R/W) Override mac_pcs_txX_detrx_en signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_DETRX_EN_REQ_OVRRD_VAL]. */
+ uint64_t cfg_soft_reset_req_ovrrd_en : 1;/**< [ 4: 4](R/W) Override mac_pcs_laneX_soft_rst signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_SOFT_RESET_REQ_OVRRD_VAL]. */
+ uint64_t cfg_lane_pwr_off_ovrrd_en : 1;/**< [ 3: 3](R/W) Override mac_pcs_laneX_pwr_off signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_LANE_PWR_OFF_OVRRD_VAL]. */
+ uint64_t cfg_tx_pstate_req_ovrrd_en : 1;/**< [ 2: 2](R/W) Override mac_pcs_txX_pstate[1:0] signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_VAL].
+ When using this field to change the TX power state, you must also set
+ the override enable bits for the lane_mode, soft_reset and lane_pwr_off
+ fields. The corresponding orrd_val fields should be programmed so as
+ not to cause undesired changes. */
+ uint64_t cfg_rx_pstate_req_ovrrd_en : 1;/**< [ 1: 1](R/W) Override mac_pcs_rxX_pstate[1:0] signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_RX_PSTATE_REQ_OVRRD_VAL].
+ When using this field to change the RX power state, you must also set
+ the override enable bits for the lane_mode, soft_reset and lane_pwr_off
+ fields. The corresponding orrd_val fields should be programmed so as
+ not to cause undesired changes. */
+ uint64_t cfg_lane_mode_req_ovrrd_en : 1;/**< [ 0: 0](R/W) Override mac_pcs_laneX_mode[3:0] signal with the value specified in
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_LANE_MODE_REQ_OVRRD_VAL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_lane_mode_req_ovrrd_en : 1;/**< [ 0: 0](R/W) Override mac_pcs_laneX_mode[3:0] signal with the value specified in
+ is asserted GSER()_LANE()_PCS_CTLIFC_2[CFG_LANE_MODE_REQ_OVRRD_VAL]. */
+ uint64_t cfg_rx_pstate_req_ovrrd_en : 1;/**< [ 1: 1](R/W) Override mac_pcs_rxX_pstate[1:0] signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_RX_PSTATE_REQ_OVRRD_VAL].
+ When using this field to change the RX power state, you must also set
+ the override enable bits for the lane_mode, soft_reset and lane_pwr_off
+ fields. The corresponding orrd_val fields should be programmed so as
+ not to cause undesired changes. */
+ uint64_t cfg_tx_pstate_req_ovrrd_en : 1;/**< [ 2: 2](R/W) Override mac_pcs_txX_pstate[1:0] signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_PSTATE_REQ_OVRRD_VAL].
+ When using this field to change the TX power state, you must also set
+ the override enable bits for the lane_mode, soft_reset and lane_pwr_off
+ fields. The corresponding orrd_val fields should be programmed so as
+ not to cause undesired changes. */
+ uint64_t cfg_lane_pwr_off_ovrrd_en : 1;/**< [ 3: 3](R/W) Override mac_pcs_laneX_pwr_off signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_LANE_PWR_OFF_OVRRD_VAL]. */
+ uint64_t cfg_soft_reset_req_ovrrd_en : 1;/**< [ 4: 4](R/W) Override mac_pcs_laneX_soft_rst signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_SOFT_RESET_REQ_OVRRD_VAL]. */
+ uint64_t cfg_tx_detrx_en_req_ovrrd_en : 1;/**< [ 5: 5](R/W) Override mac_pcs_txX_detrx_en signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_DETRX_EN_REQ_OVRRD_VAL]. */
+ uint64_t cfg_rx_cdr_coast_req_ovrrd_en : 1;/**< [ 6: 6](R/W) Override mac_pcs_rxX_cdr_coast signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_RX_COAST_REQ_OVRRD_VAL]. */
+ uint64_t cfg_tx_coeff_req_ovrrd_en : 1;/**< [ 7: 7](R/W) Override mac_pcs_txX_coeff_req signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_0[CFG_TX_COEFF_REQ_OVRRD_VAL]. See
+ [CTLIFC_OVRRD_REQ]. */
+ uint64_t cfg_tx_vboost_en_ovrrd_en : 1;/**< [ 8: 8](R/W) Override mac_pcs_txX vboost_en signal with the value specified in
+ GSER()_LANE()_PCS_CTLIFC_2[CFG_TX_VBOOST_EN_OVRRD_VAL]. */
+ uint64_t reserved_9_14 : 6;
+ uint64_t ctlifc_ovrrd_req : 1; /**< [ 15: 15](WO) Writing to set this bit initiates a state machine interface request
+ for GSER()_LANE()_PCS_CTLIFC_0 and GSER()_LANE()_PCS_CTLIFC_1
+ override values.
+
+ [CTLIFC_OVRRD_REQ] should be written with a one (with
+ [CFG_TX_COEFF_REQ_OVRRD_EN]=1 and
+ GSER()_LANE()_PCS_CTLIFC_0[CFG_TX_COEFF_REQ_OVRRD_VAL]=1) to initiate
+ a control interface configuration over-ride after manually programming
+ transmitter settings. See GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP]
+ and GSER()_LANE()_TX_CFG_0[CFG_TX_SWING]. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_pcs_ctlifc_2_s cn; */
+};
+typedef union bdk_gserx_lanex_pcs_ctlifc_2 bdk_gserx_lanex_pcs_ctlifc_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_PCS_CTLIFC_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_PCS_CTLIFC_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904c0070ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904c0070ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904c0070ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_PCS_CTLIFC_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_PCS_CTLIFC_2(a,b) bdk_gserx_lanex_pcs_ctlifc_2_t
+#define bustype_BDK_GSERX_LANEX_PCS_CTLIFC_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_PCS_CTLIFC_2(a,b) "GSERX_LANEX_PCS_CTLIFC_2"
+#define device_bar_BDK_GSERX_LANEX_PCS_CTLIFC_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_PCS_CTLIFC_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_PCS_CTLIFC_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_pcs_macifc_mon_0
+ *
+ * GSER Lane MAC to Raw PCS Interface Monitor 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_pcs_macifc_mon_0
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_pcs_macifc_mon_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t mac_pcs_tx_pstate : 2; /**< [ 15: 14](RO/H) Current state of the MAC to PCS TX power state\<2:0\> input.
+
+ Internal:
+ mac_pcs_txX_pstate[2:0]. */
+ uint64_t mac_pcs_rx_pstate : 2; /**< [ 13: 12](RO/H) Current state of the MAC to PCS RX power state\<2:0\> input.
+
+ Internal:
+ mac_pcs_rxX_pstate[2:0]. */
+ uint64_t mac_pcs_lane_pwr_off : 1; /**< [ 11: 11](RO/H) Current state of the MAC to PCS lane power off input.
+ Internal:
+ mac_pcs_laneX_pwr_off. */
+ uint64_t reserved_10 : 1;
+ uint64_t mac_pcs_lane_soft_reset : 1;/**< [ 9: 9](RO/H) Current state of the MAC to PCS soft reset input.
+ Internal:
+ mac_pcs_laneX_soft_reset. */
+ uint64_t mac_pcs_lane_loopbk_en : 1; /**< [ 8: 8](RO/H) Current state of the MAC to PCS lane loopback enable input.
+ Internal:
+ mac_pcs_laneX_loopbk_en. */
+ uint64_t mac_pcs_rx_eie_det_en : 1; /**< [ 7: 7](RO/H) Current state of the MAC to PCS receiver electrical idle exit
+ detect enable input.
+
+ Internal:
+ mac_pcs_rxX_eie_det_en. */
+ uint64_t mac_pcs_rx_cdr_coast : 1; /**< [ 6: 6](RO/H) Current state of the MAC to PCS lane receiver CDR coast input.
+ Internal:
+ mac_pcs_rxX_cdr_coast. */
+ uint64_t mac_pcs_tx_detrx_en : 1; /**< [ 5: 5](RO/H) Current state of the MAC to PCS transmitter receiver detect
+ enable input.
+
+ Internal:
+ mac_pcs_txX_detrx_en. */
+ uint64_t mac_pcs_rx_eq_eval : 1; /**< [ 4: 4](RO/H) Current state of the MAC to PCS receiver equalizer evaluation
+ request input.
+
+ Internal:
+ mac_pcs_rxX_eq_eval. */
+ uint64_t mac_pcs_lane_mode : 4; /**< [ 3: 0](RO/H) Current state of the MAC to PCS lane mode input.
+ Internal:
+ mac_pcs_laneX_mode[3:0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mac_pcs_lane_mode : 4; /**< [ 3: 0](RO/H) Current state of the MAC to PCS lane mode input.
+ Internal:
+ mac_pcs_laneX_mode[3:0]. */
+ uint64_t mac_pcs_rx_eq_eval : 1; /**< [ 4: 4](RO/H) Current state of the MAC to PCS receiver equalizer evaluation
+ request input.
+
+ Internal:
+ mac_pcs_rxX_eq_eval. */
+ uint64_t mac_pcs_tx_detrx_en : 1; /**< [ 5: 5](RO/H) Current state of the MAC to PCS transmitter receiver detect
+ enable input.
+
+ Internal:
+ mac_pcs_txX_detrx_en. */
+ uint64_t mac_pcs_rx_cdr_coast : 1; /**< [ 6: 6](RO/H) Current state of the MAC to PCS lane receiver CDR coast input.
+ Internal:
+ mac_pcs_rxX_cdr_coast. */
+ uint64_t mac_pcs_rx_eie_det_en : 1; /**< [ 7: 7](RO/H) Current state of the MAC to PCS receiver electrical idle exit
+ detect enable input.
+
+ Internal:
+ mac_pcs_rxX_eie_det_en. */
+ uint64_t mac_pcs_lane_loopbk_en : 1; /**< [ 8: 8](RO/H) Current state of the MAC to PCS lane loopback enable input.
+ Internal:
+ mac_pcs_laneX_loopbk_en. */
+ uint64_t mac_pcs_lane_soft_reset : 1;/**< [ 9: 9](RO/H) Current state of the MAC to PCS soft reset input.
+ Internal:
+ mac_pcs_laneX_soft_reset. */
+ uint64_t reserved_10 : 1;
+ uint64_t mac_pcs_lane_pwr_off : 1; /**< [ 11: 11](RO/H) Current state of the MAC to PCS lane power off input.
+ Internal:
+ mac_pcs_laneX_pwr_off. */
+ uint64_t mac_pcs_rx_pstate : 2; /**< [ 13: 12](RO/H) Current state of the MAC to PCS RX power state\<2:0\> input.
+
+ Internal:
+ mac_pcs_rxX_pstate[2:0]. */
+ uint64_t mac_pcs_tx_pstate : 2; /**< [ 15: 14](RO/H) Current state of the MAC to PCS TX power state\<2:0\> input.
+
+ Internal:
+ mac_pcs_txX_pstate[2:0]. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_pcs_macifc_mon_0_s cn; */
+};
+typedef union bdk_gserx_lanex_pcs_macifc_mon_0 bdk_gserx_lanex_pcs_macifc_mon_0_t;
+
+static inline uint64_t BDK_GSERX_LANEX_PCS_MACIFC_MON_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_PCS_MACIFC_MON_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904c0108ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904c0108ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904c0108ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_PCS_MACIFC_MON_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_PCS_MACIFC_MON_0(a,b) bdk_gserx_lanex_pcs_macifc_mon_0_t
+#define bustype_BDK_GSERX_LANEX_PCS_MACIFC_MON_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_PCS_MACIFC_MON_0(a,b) "GSERX_LANEX_PCS_MACIFC_MON_0"
+#define device_bar_BDK_GSERX_LANEX_PCS_MACIFC_MON_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_PCS_MACIFC_MON_0(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_PCS_MACIFC_MON_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_pcs_macifc_mon_2
+ *
+ * GSER Lane MAC to Raw PCS Interface Monitor 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_pcs_macifc_mon_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_pcs_macifc_mon_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t tx_coeff_req : 1; /**< [ 15: 15](RO/H) Current state of the MAC to PCS TX coefficient request input.
+ Internal:
+ mac_pcs_txX_coeff_req. */
+ uint64_t tx_vboost_en : 1; /**< [ 14: 14](RO/H) Current state of the MAC to PCS TX Vboost enable input.
+ Internal:
+ mac_pcs_txX_vboost_en. */
+ uint64_t tx_swing : 5; /**< [ 13: 9](RO/H) Current state of the MAC to PCS TX equalizer swing\<4:0\> input.
+
+ Internal:
+ mac_pcs_txX_swing[4:0]. */
+ uint64_t tx_pre : 4; /**< [ 8: 5](RO/H) Current state of the MAC to PCS TX equalizer preemphasis\<3:0\> input.
+
+ Internal:
+ mac_pcs_txX_pre[3:0]. */
+ uint64_t tx_post : 5; /**< [ 4: 0](RO/H) Current state of the MAC to PCS TX equalizer postemphasis\<4:0\> input.
+
+ Internal:
+ mac_pcs_txX_post[4:0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_post : 5; /**< [ 4: 0](RO/H) Current state of the MAC to PCS TX equalizer postemphasis\<4:0\> input.
+
+ Internal:
+ mac_pcs_txX_post[4:0]. */
+ uint64_t tx_pre : 4; /**< [ 8: 5](RO/H) Current state of the MAC to PCS TX equalizer preemphasis\<3:0\> input.
+
+ Internal:
+ mac_pcs_txX_pre[3:0]. */
+ uint64_t tx_swing : 5; /**< [ 13: 9](RO/H) Current state of the MAC to PCS TX equalizer swing\<4:0\> input.
+
+ Internal:
+ mac_pcs_txX_swing[4:0]. */
+ uint64_t tx_vboost_en : 1; /**< [ 14: 14](RO/H) Current state of the MAC to PCS TX Vboost enable input.
+ Internal:
+ mac_pcs_txX_vboost_en. */
+ uint64_t tx_coeff_req : 1; /**< [ 15: 15](RO/H) Current state of the MAC to PCS TX coefficient request input.
+ Internal:
+ mac_pcs_txX_coeff_req. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_pcs_macifc_mon_2_s cn; */
+};
+typedef union bdk_gserx_lanex_pcs_macifc_mon_2 bdk_gserx_lanex_pcs_macifc_mon_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_PCS_MACIFC_MON_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_PCS_MACIFC_MON_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904c0118ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904c0118ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904c0118ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_PCS_MACIFC_MON_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_PCS_MACIFC_MON_2(a,b) bdk_gserx_lanex_pcs_macifc_mon_2_t
+#define bustype_BDK_GSERX_LANEX_PCS_MACIFC_MON_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_PCS_MACIFC_MON_2(a,b) "GSERX_LANEX_PCS_MACIFC_MON_2"
+#define device_bar_BDK_GSERX_LANEX_PCS_MACIFC_MON_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_PCS_MACIFC_MON_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_PCS_MACIFC_MON_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_pma_loopback_ctrl
+ *
+ * GSER Lane PMA Loopback Control Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_pma_loopback_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_pma_loopback_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t cfg_ln_lpbk_mode_ovrrd_en : 1;/**< [ 1: 1](R/W) Enable override mac_pcs_loopbk_mode[3:0] with value of FG_LN_LPBK_MODE. */
+ uint64_t cfg_ln_lpbk_mode : 1; /**< [ 0: 0](R/W) Override value when CFG_LN_LPBK_MODE_OVRRD_EN is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_ln_lpbk_mode : 1; /**< [ 0: 0](R/W) Override value when CFG_LN_LPBK_MODE_OVRRD_EN is set. */
+ uint64_t cfg_ln_lpbk_mode_ovrrd_en : 1;/**< [ 1: 1](R/W) Enable override mac_pcs_loopbk_mode[3:0] with value of FG_LN_LPBK_MODE. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_pma_loopback_ctrl_s cn; */
+};
+typedef union bdk_gserx_lanex_pma_loopback_ctrl bdk_gserx_lanex_pma_loopback_ctrl_t;
+
+static inline uint64_t BDK_GSERX_LANEX_PMA_LOOPBACK_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_PMA_LOOPBACK_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904400d0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904400d0ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904400d0ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_PMA_LOOPBACK_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_PMA_LOOPBACK_CTRL(a,b) bdk_gserx_lanex_pma_loopback_ctrl_t
+#define bustype_BDK_GSERX_LANEX_PMA_LOOPBACK_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_PMA_LOOPBACK_CTRL(a,b) "GSERX_LANEX_PMA_LOOPBACK_CTRL"
+#define device_bar_BDK_GSERX_LANEX_PMA_LOOPBACK_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_PMA_LOOPBACK_CTRL(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_PMA_LOOPBACK_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_pwr_ctrl
+ *
+ * GSER Lane Power Control Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_pwr_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_pwr_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t tx_sds_fifo_reset_ovrrd_en : 1;/**< [ 14: 14](R/W) When asserted, TX_SDS_FIFO_RESET_OVRRD_VAL is used to specify the value of the reset
+ signal for the TX FIFO supplying data to the SerDes p2s interface. */
+ uint64_t tx_sds_fifo_reset_ovrrd_val : 1;/**< [ 13: 13](R/W) When asserted, TX_SDS_FIFO_RESET_OVRRD_EN is asserted, this field is
+ used to specify the value of the reset
+ signal for the TX FIFO supplying data to the SerDes p2s interface. */
+ uint64_t tx_pcs_reset_ovrrd_val : 1; /**< [ 12: 12](R/W) When TX_PCS_RESET_OVRRD_EN is
+ asserted, this field is used to specify the value of
+ the reset signal for PCS TX logic. */
+ uint64_t rx_pcs_reset_ovrrd_val : 1; /**< [ 11: 11](R/W) When RX_PCS_RESET_OVRRD_EN is
+ asserted, this field is used to specify the value of
+ the reset signal for PCS RX logic. */
+ uint64_t reserved_9_10 : 2;
+ uint64_t rx_resetn_ovrrd_en : 1; /**< [ 8: 8](R/W) Override RX power state machine rx_resetn
+ control signal. When set, the rx_resetn control signal is taken
+ from the GSER()_LANE()_RX_CFG_0[RX_RESETN_OVRRD_VAL]
+ control bit. */
+ uint64_t rx_resetn_ovrrd_val : 1; /**< [ 7: 7](R/W) Override RX power state machine reset control
+ signal. When set, reset control signals are specified in
+ [RX_PCS_RESET_OVRRD_VAL]. */
+ uint64_t rx_lctrl_ovrrd_en : 1; /**< [ 6: 6](R/W) Override RX power state machine loop control
+ signals. When set, the loop control settings are
+ specified in the GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL] field. */
+ uint64_t rx_lctrl_ovrrd_val : 1; /**< [ 5: 5](R/W) Override RX power state machine power down
+ control signal. When set, the power down control signal is
+ specified by GSER()_LANE()_RX_CFG_1[RX_CHPD_OVRRD_VAL]. */
+ uint64_t tx_tristate_en_ovrrd_en : 1;/**< [ 4: 4](R/W) Override TX power state machine TX tristate
+ control signal. When set, TX tristate control signal is specified
+ in GSER()_LANE()_TX_CFG_0[TX_TRISTATE_EN_OVRRD_VAL]. */
+ uint64_t tx_pcs_reset_ovrrd_en : 1; /**< [ 3: 3](R/W) Override TX power state machine reset control
+ signal. When set, reset control signals is specified in
+ [TX_PCS_RESET_OVRRD_VAL]. */
+ uint64_t tx_elec_idle_ovrrd_en : 1; /**< [ 2: 2](R/W) Override mac_pcs_txX_elec_idle signal
+ When set, TX electrical idle is controlled from
+ GSER()_LANE()_TX_CFG_1[TX_ELEC_IDLE_OVRRD_VAL]
+ mac_pcs_txX_elec_idle signal is ignored. */
+ uint64_t tx_pd_ovrrd_en : 1; /**< [ 1: 1](R/W) Override TX power state machine TX lane
+ power-down control signal
+ When set, TX lane power down is controlled by
+ GSER()_LANE()_TX_CFG_0[TX_CHPD_OVRRD_VAL]. */
+ uint64_t tx_p2s_resetn_ovrrd_en : 1; /**< [ 0: 0](R/W) Override TX power state machine TX reset
+ control signal
+ When set, TX reset is controlled by
+ GSER()_LANE()_TX_CFG_0[TX_RESETN_OVRRD_VAL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_p2s_resetn_ovrrd_en : 1; /**< [ 0: 0](R/W) Override TX power state machine TX reset
+ control signal
+ When set, TX reset is controlled by
+ GSER()_LANE()_TX_CFG_0[TX_RESETN_OVRRD_VAL]. */
+ uint64_t tx_pd_ovrrd_en : 1; /**< [ 1: 1](R/W) Override TX power state machine TX lane
+ power-down control signal
+ When set, TX lane power down is controlled by
+ GSER()_LANE()_TX_CFG_0[TX_CHPD_OVRRD_VAL]. */
+ uint64_t tx_elec_idle_ovrrd_en : 1; /**< [ 2: 2](R/W) Override mac_pcs_txX_elec_idle signal
+ When set, TX electrical idle is controlled from
+ GSER()_LANE()_TX_CFG_1[TX_ELEC_IDLE_OVRRD_VAL]
+ mac_pcs_txX_elec_idle signal is ignored. */
+ uint64_t tx_pcs_reset_ovrrd_en : 1; /**< [ 3: 3](R/W) Override TX power state machine reset control
+ signal. When set, reset control signals is specified in
+ [TX_PCS_RESET_OVRRD_VAL]. */
+ uint64_t tx_tristate_en_ovrrd_en : 1;/**< [ 4: 4](R/W) Override TX power state machine TX tristate
+ control signal. When set, TX tristate control signal is specified
+ in GSER()_LANE()_TX_CFG_0[TX_TRISTATE_EN_OVRRD_VAL]. */
+ uint64_t rx_lctrl_ovrrd_val : 1; /**< [ 5: 5](R/W) Override RX power state machine power down
+ control signal. When set, the power down control signal is
+ specified by GSER()_LANE()_RX_CFG_1[RX_CHPD_OVRRD_VAL]. */
+ uint64_t rx_lctrl_ovrrd_en : 1; /**< [ 6: 6](R/W) Override RX power state machine loop control
+ signals. When set, the loop control settings are
+ specified in the GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL] field. */
+ uint64_t rx_resetn_ovrrd_val : 1; /**< [ 7: 7](R/W) Override RX power state machine reset control
+ signal. When set, reset control signals are specified in
+ [RX_PCS_RESET_OVRRD_VAL]. */
+ uint64_t rx_resetn_ovrrd_en : 1; /**< [ 8: 8](R/W) Override RX power state machine rx_resetn
+ control signal. When set, the rx_resetn control signal is taken
+ from the GSER()_LANE()_RX_CFG_0[RX_RESETN_OVRRD_VAL]
+ control bit. */
+ uint64_t reserved_9_10 : 2;
+ uint64_t rx_pcs_reset_ovrrd_val : 1; /**< [ 11: 11](R/W) When RX_PCS_RESET_OVRRD_EN is
+ asserted, this field is used to specify the value of
+ the reset signal for PCS RX logic. */
+ uint64_t tx_pcs_reset_ovrrd_val : 1; /**< [ 12: 12](R/W) When TX_PCS_RESET_OVRRD_EN is
+ asserted, this field is used to specify the value of
+ the reset signal for PCS TX logic. */
+ uint64_t tx_sds_fifo_reset_ovrrd_val : 1;/**< [ 13: 13](R/W) When asserted, TX_SDS_FIFO_RESET_OVRRD_EN is asserted, this field is
+ used to specify the value of the reset
+ signal for the TX FIFO supplying data to the SerDes p2s interface. */
+ uint64_t tx_sds_fifo_reset_ovrrd_en : 1;/**< [ 14: 14](R/W) When asserted, TX_SDS_FIFO_RESET_OVRRD_VAL is used to specify the value of the reset
+ signal for the TX FIFO supplying data to the SerDes p2s interface. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_pwr_ctrl_s cn; */
+};
+typedef union bdk_gserx_lanex_pwr_ctrl bdk_gserx_lanex_pwr_ctrl_t;
+
+static inline uint64_t BDK_GSERX_LANEX_PWR_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_PWR_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904400d8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904400d8ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904400d8ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_PWR_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_PWR_CTRL(a,b) bdk_gserx_lanex_pwr_ctrl_t
+#define bustype_BDK_GSERX_LANEX_PWR_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_PWR_CTRL(a,b) "GSERX_LANEX_PWR_CTRL"
+#define device_bar_BDK_GSERX_LANEX_PWR_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_PWR_CTRL(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_PWR_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_aeq_out_0
+ *
+ * GSER Lane SerDes RX Adaptive Equalizer 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_aeq_out_0
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_aeq_out_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t sds_pcs_rx_aeq_out : 10; /**< [ 9: 0](RO/H) \<9:5\>: DFE TAP5.
+ \<4:0\>: DFE TAP4. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_aeq_out : 10; /**< [ 9: 0](RO/H) \<9:5\>: DFE TAP5.
+ \<4:0\>: DFE TAP4. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_aeq_out_0_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_aeq_out_0 bdk_gserx_lanex_rx_aeq_out_0_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_AEQ_OUT_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_AEQ_OUT_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440280ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440280ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440280ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_AEQ_OUT_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_AEQ_OUT_0(a,b) bdk_gserx_lanex_rx_aeq_out_0_t
+#define bustype_BDK_GSERX_LANEX_RX_AEQ_OUT_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_AEQ_OUT_0(a,b) "GSERX_LANEX_RX_AEQ_OUT_0"
+#define device_bar_BDK_GSERX_LANEX_RX_AEQ_OUT_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_AEQ_OUT_0(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_AEQ_OUT_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_aeq_out_1
+ *
+ * GSER Lane SerDes RX Adaptive Equalizer 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_aeq_out_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_aeq_out_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t sds_pcs_rx_aeq_out : 15; /**< [ 14: 0](RO/H) \<14:10\> = DFE TAP3.
+ \<9:5\> = DFE TAP2.
+ \<4:0\> = DFE TAP1. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_aeq_out : 15; /**< [ 14: 0](RO/H) \<14:10\> = DFE TAP3.
+ \<9:5\> = DFE TAP2.
+ \<4:0\> = DFE TAP1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_aeq_out_1_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_aeq_out_1 bdk_gserx_lanex_rx_aeq_out_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_AEQ_OUT_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_AEQ_OUT_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440288ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440288ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440288ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_AEQ_OUT_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_AEQ_OUT_1(a,b) bdk_gserx_lanex_rx_aeq_out_1_t
+#define bustype_BDK_GSERX_LANEX_RX_AEQ_OUT_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_AEQ_OUT_1(a,b) "GSERX_LANEX_RX_AEQ_OUT_1"
+#define device_bar_BDK_GSERX_LANEX_RX_AEQ_OUT_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_AEQ_OUT_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_AEQ_OUT_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_aeq_out_2
+ *
+ * GSER Lane SerDes RX Adaptive Equalizer 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_aeq_out_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_aeq_out_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t sds_pcs_rx_aeq_out : 15; /**< [ 14: 0](RO/H) \<9:8\> = Reserved.
+ \<7:4\> = Pre-CTLE gain.
+ \<3:0\> = Post-CTLE gain. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_aeq_out : 15; /**< [ 14: 0](RO/H) \<9:8\> = Reserved.
+ \<7:4\> = Pre-CTLE gain.
+ \<3:0\> = Post-CTLE gain. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_aeq_out_2_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_aeq_out_2 bdk_gserx_lanex_rx_aeq_out_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_AEQ_OUT_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_AEQ_OUT_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440290ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440290ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440290ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_AEQ_OUT_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_AEQ_OUT_2(a,b) bdk_gserx_lanex_rx_aeq_out_2_t
+#define bustype_BDK_GSERX_LANEX_RX_AEQ_OUT_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_AEQ_OUT_2(a,b) "GSERX_LANEX_RX_AEQ_OUT_2"
+#define device_bar_BDK_GSERX_LANEX_RX_AEQ_OUT_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_AEQ_OUT_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_AEQ_OUT_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cdr_ctrl_1
+ *
+ * GSER Lane SerDes RX CDR Control 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cdr_ctrl_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cdr_ctrl_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t cfg_rx_cdr_ctrl_ovrrd_val : 16;/**< [ 15: 0](R/W) Set CFG_RX_CDR_CTRL_OVRRD_EN in register
+ GSER()_LANE()_RX_MISC_OVRRD to override pcs_sds_rx_cdr_ctrl.
+ \<15:13\> = CDR frequency gain.
+ \<12\> = Frequency accumulator manual enable.
+ \<11:5\> = Frequency accumulator manual value.
+ \<4\> = CDR phase offset override enable.
+ \<3:0\> = CDR phase offset override, DLL IQ. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_rx_cdr_ctrl_ovrrd_val : 16;/**< [ 15: 0](R/W) Set CFG_RX_CDR_CTRL_OVRRD_EN in register
+ GSER()_LANE()_RX_MISC_OVRRD to override pcs_sds_rx_cdr_ctrl.
+ \<15:13\> = CDR frequency gain.
+ \<12\> = Frequency accumulator manual enable.
+ \<11:5\> = Frequency accumulator manual value.
+ \<4\> = CDR phase offset override enable.
+ \<3:0\> = CDR phase offset override, DLL IQ. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_cdr_ctrl_1_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_cdr_ctrl_1 bdk_gserx_lanex_rx_cdr_ctrl_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CDR_CTRL_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CDR_CTRL_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440038ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440038ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440038ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CDR_CTRL_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CDR_CTRL_1(a,b) bdk_gserx_lanex_rx_cdr_ctrl_1_t
+#define bustype_BDK_GSERX_LANEX_RX_CDR_CTRL_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CDR_CTRL_1(a,b) "GSERX_LANEX_RX_CDR_CTRL_1"
+#define device_bar_BDK_GSERX_LANEX_RX_CDR_CTRL_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CDR_CTRL_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CDR_CTRL_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cdr_ctrl_2
+ *
+ * GSER Lane SerDes RX CDR Control 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cdr_ctrl_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cdr_ctrl_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t cfg_rx_cdr_ctrl_ovrrd_val : 16;/**< [ 15: 0](R/W) Set CFG_RX_CDR_CTRL_OVRRD_EN in register
+ GSER()_LANE()_RX_MISC_OVRRD to override pcs_sds_rx_cdr_ctrl.
+ \<15\> = Shadow PI phase enable.
+ \<14:8\> = Shadow PI phase value.
+ \<7\> = CDR manual phase enable.
+ \<6:0\> = CDR manual phase value. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_rx_cdr_ctrl_ovrrd_val : 16;/**< [ 15: 0](R/W) Set CFG_RX_CDR_CTRL_OVRRD_EN in register
+ GSER()_LANE()_RX_MISC_OVRRD to override pcs_sds_rx_cdr_ctrl.
+ \<15\> = Shadow PI phase enable.
+ \<14:8\> = Shadow PI phase value.
+ \<7\> = CDR manual phase enable.
+ \<6:0\> = CDR manual phase value. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_cdr_ctrl_2_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_cdr_ctrl_2 bdk_gserx_lanex_rx_cdr_ctrl_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CDR_CTRL_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CDR_CTRL_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440040ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440040ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440040ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CDR_CTRL_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CDR_CTRL_2(a,b) bdk_gserx_lanex_rx_cdr_ctrl_2_t
+#define bustype_BDK_GSERX_LANEX_RX_CDR_CTRL_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CDR_CTRL_2(a,b) "GSERX_LANEX_RX_CDR_CTRL_2"
+#define device_bar_BDK_GSERX_LANEX_RX_CDR_CTRL_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CDR_CTRL_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CDR_CTRL_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cdr_misc_ctrl_0
+ *
+ * GSER Lane SerDes RX CDR Miscellaneous Control 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cdr_misc_ctrl_0
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cdr_misc_ctrl_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t pcs_sds_rx_cdr_misc_ctrl : 8;/**< [ 7: 0](R/W) Per lane RX miscellaneous CDR control:
+ \<7\> = RT-Eyemon counter enable, will start counting 5.4e9 bits.
+ \<6\> = RT-Eyemon shadow PI control enable.
+ \<5:4\> = RT-Eyemon error counter byte selection observable on
+ SDS_OCS_RX_CDR_STATUS[14:7] in register GSER_LANE_RX_CDR_STATUS_1.
+ \<3:0\> = LBW adjustment thresholds. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_rx_cdr_misc_ctrl : 8;/**< [ 7: 0](R/W) Per lane RX miscellaneous CDR control:
+ \<7\> = RT-Eyemon counter enable, will start counting 5.4e9 bits.
+ \<6\> = RT-Eyemon shadow PI control enable.
+ \<5:4\> = RT-Eyemon error counter byte selection observable on
+ SDS_OCS_RX_CDR_STATUS[14:7] in register GSER_LANE_RX_CDR_STATUS_1.
+ \<3:0\> = LBW adjustment thresholds. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_lanex_rx_cdr_misc_ctrl_0_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t reserved_8_15 : 8;
+ uint64_t pcs_sds_rx_cdr_misc_ctrl : 8;/**< [ 7: 0](R/W) Per lane RX miscellaneous CDR control:
+ \<7\> = RT-Eyemon counter enable, will start counting 5.4e9 bits.
+ \<6\> = RT-Eyemon shadow PI control enable.
+ \<5:4\> = RT-Eyemon error counter byte selection observable on
+ SDS_OCS_RX_CDR_STATUS[14:7] in register GSER_LANE_RX_CDR_STATUS_1.
+ \<3:0\> = LBW adjustment thresholds. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_rx_cdr_misc_ctrl : 8;/**< [ 7: 0](R/W) Per lane RX miscellaneous CDR control:
+ \<7\> = RT-Eyemon counter enable, will start counting 5.4e9 bits.
+ \<6\> = RT-Eyemon shadow PI control enable.
+ \<5:4\> = RT-Eyemon error counter byte selection observable on
+ SDS_OCS_RX_CDR_STATUS[14:7] in register GSER_LANE_RX_CDR_STATUS_1.
+ \<3:0\> = LBW adjustment thresholds. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_gserx_lanex_rx_cdr_misc_ctrl_0 bdk_gserx_lanex_rx_cdr_misc_ctrl_0_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CDR_MISC_CTRL_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CDR_MISC_CTRL_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440208ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440208ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440208ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CDR_MISC_CTRL_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CDR_MISC_CTRL_0(a,b) bdk_gserx_lanex_rx_cdr_misc_ctrl_0_t
+#define bustype_BDK_GSERX_LANEX_RX_CDR_MISC_CTRL_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CDR_MISC_CTRL_0(a,b) "GSERX_LANEX_RX_CDR_MISC_CTRL_0"
+#define device_bar_BDK_GSERX_LANEX_RX_CDR_MISC_CTRL_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CDR_MISC_CTRL_0(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CDR_MISC_CTRL_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cdr_status_1
+ *
+ * GSER Lane SerDes RX CDR Status 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cdr_status_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cdr_status_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t sds_pcs_rx_cdr_status : 15; /**< [ 14: 0](RO/H) Per lane RX CDR status:
+ \<14:7\> = RT-Eyemon error counter.
+ \<6:4\> = LBW adjustment value.
+ \<3:0\> = LBW adjustment state. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_cdr_status : 15; /**< [ 14: 0](RO/H) Per lane RX CDR status:
+ \<14:7\> = RT-Eyemon error counter.
+ \<6:4\> = LBW adjustment value.
+ \<3:0\> = LBW adjustment state. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_lanex_rx_cdr_status_1_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t reserved_15 : 1;
+ uint64_t sds_pcs_rx_cdr_status : 15; /**< [ 14: 0](RO/H) Per lane RX CDR status:
+ \<14:7\> = RT-Eyemon error counter.
+ \<6:4\> = LBW adjustment value.
+ \<3:0\> = LBW adjustment state. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_cdr_status : 15; /**< [ 14: 0](RO/H) Per lane RX CDR status:
+ \<14:7\> = RT-Eyemon error counter.
+ \<6:4\> = LBW adjustment value.
+ \<3:0\> = LBW adjustment state. */
+ uint64_t reserved_15 : 1;
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_gserx_lanex_rx_cdr_status_1 bdk_gserx_lanex_rx_cdr_status_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CDR_STATUS_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CDR_STATUS_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904402d0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904402d0ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904402d0ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CDR_STATUS_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CDR_STATUS_1(a,b) bdk_gserx_lanex_rx_cdr_status_1_t
+#define bustype_BDK_GSERX_LANEX_RX_CDR_STATUS_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CDR_STATUS_1(a,b) "GSERX_LANEX_RX_CDR_STATUS_1"
+#define device_bar_BDK_GSERX_LANEX_RX_CDR_STATUS_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CDR_STATUS_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CDR_STATUS_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cdr_status_2
+ *
+ * GSER Lane SerDes RX CDR Status 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cdr_status_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cdr_status_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t sds_pcs_rx_cdr_status : 14; /**< [ 13: 0](RO/H) CDR status.
+ \<13:7\> = CDR phase control output.
+ \<6:0\> = CDR frequency accumulator output. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_cdr_status : 14; /**< [ 13: 0](RO/H) CDR status.
+ \<13:7\> = CDR phase control output.
+ \<6:0\> = CDR frequency accumulator output. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_cdr_status_2_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_cdr_status_2 bdk_gserx_lanex_rx_cdr_status_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CDR_STATUS_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CDR_STATUS_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904402d8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904402d8ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904402d8ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CDR_STATUS_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CDR_STATUS_2(a,b) bdk_gserx_lanex_rx_cdr_status_2_t
+#define bustype_BDK_GSERX_LANEX_RX_CDR_STATUS_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CDR_STATUS_2(a,b) "GSERX_LANEX_RX_CDR_STATUS_2"
+#define device_bar_BDK_GSERX_LANEX_RX_CDR_STATUS_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CDR_STATUS_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CDR_STATUS_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cfg_0
+ *
+ * GSER Lane SerDes RX Configuration 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cfg_0
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cfg_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t rx_datarate_ovrrd_en : 1; /**< [ 15: 15](R/W) Override enable for RX power state machine data rate signal. */
+ uint64_t pcs_sds_rx_tristate_enable : 1;/**< [ 14: 14](R/W) RX termination high-Z enable. */
+ uint64_t rx_resetn_ovrrd_val : 1; /**< [ 13: 13](R/W) This value overrides the RX power state machine rx_resetn control
+ signal when GSER()_LANE()_PWR_CTRL[RX_RESETN_OVRRD_EN] is set. */
+ uint64_t pcs_sds_rx_eyemon_en : 1; /**< [ 12: 12](R/W) RX eyemon test enable. */
+ uint64_t pcs_sds_rx_pcm_ctrl : 4; /**< [ 11: 8](R/W) \<11\>: Reserved.
+ \<10-8\>:
+ 0x0 = 540mV.
+ 0x1 = 540mV + 20mV.
+ 0x2-0x3 = Reserved.
+ 0x4 = 100-620mV (default).
+ 0x5-0x7 = Reserved. */
+ uint64_t rx_datarate_ovrrd_val : 2; /**< [ 7: 6](R/W) Specifies the data rate when RX_DATARATE_OVRRD_EN is asserted:
+ 0x0 = Full rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate. */
+ uint64_t cfg_rx_pol_invert : 1; /**< [ 5: 5](R/W) Invert the receive data. Allies with GSER()_LANE()_MISC_CFG_0[USE_PMA_POLARITY]
+ is deasserted. */
+ uint64_t rx_subblk_pd_ovrrd_val : 5; /**< [ 4: 0](R/W) Not supported. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_subblk_pd_ovrrd_val : 5; /**< [ 4: 0](R/W) Not supported. */
+ uint64_t cfg_rx_pol_invert : 1; /**< [ 5: 5](R/W) Invert the receive data. Allies with GSER()_LANE()_MISC_CFG_0[USE_PMA_POLARITY]
+ is deasserted. */
+ uint64_t rx_datarate_ovrrd_val : 2; /**< [ 7: 6](R/W) Specifies the data rate when RX_DATARATE_OVRRD_EN is asserted:
+ 0x0 = Full rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate. */
+ uint64_t pcs_sds_rx_pcm_ctrl : 4; /**< [ 11: 8](R/W) \<11\>: Reserved.
+ \<10-8\>:
+ 0x0 = 540mV.
+ 0x1 = 540mV + 20mV.
+ 0x2-0x3 = Reserved.
+ 0x4 = 100-620mV (default).
+ 0x5-0x7 = Reserved. */
+ uint64_t pcs_sds_rx_eyemon_en : 1; /**< [ 12: 12](R/W) RX eyemon test enable. */
+ uint64_t rx_resetn_ovrrd_val : 1; /**< [ 13: 13](R/W) This value overrides the RX power state machine rx_resetn control
+ signal when GSER()_LANE()_PWR_CTRL[RX_RESETN_OVRRD_EN] is set. */
+ uint64_t pcs_sds_rx_tristate_enable : 1;/**< [ 14: 14](R/W) RX termination high-Z enable. */
+ uint64_t rx_datarate_ovrrd_en : 1; /**< [ 15: 15](R/W) Override enable for RX power state machine data rate signal. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_cfg_0_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_cfg_0 bdk_gserx_lanex_rx_cfg_0_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440000ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440000ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440000ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CFG_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CFG_0(a,b) bdk_gserx_lanex_rx_cfg_0_t
+#define bustype_BDK_GSERX_LANEX_RX_CFG_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CFG_0(a,b) "GSERX_LANEX_RX_CFG_0"
+#define device_bar_BDK_GSERX_LANEX_RX_CFG_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CFG_0(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CFG_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cfg_1
+ *
+ * GSER Lane SerDes RX Configuration 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cfg_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cfg_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t rx_chpd_ovrrd_val : 1; /**< [ 15: 15](R/W) Not supported. */
+ uint64_t pcs_sds_rx_os_men : 1; /**< [ 14: 14](R/W) RX offset manual enable. */
+ uint64_t eie_en_ovrrd_en : 1; /**< [ 13: 13](R/W) Override enable for electrical-idle-exit circuit. */
+ uint64_t eie_en_ovrrd_val : 1; /**< [ 12: 12](R/W) Override value for electrical-idle-exit circuit. */
+ uint64_t reserved_11 : 1;
+ uint64_t rx_pcie_mode_ovrrd_en : 1; /**< [ 10: 10](R/W) Override enable for RX_PCIE_MODE_OVRRD_VAL. */
+ uint64_t rx_pcie_mode_ovrrd_val : 1; /**< [ 9: 9](R/W) Override value for RX_PCIE_MODE_OVRRD_VAL;
+ selects between RX terminations.
+ 0x0 = pcs_sds_rx_terminate_to_vdda.
+ 0x1 = VDDA. */
+ uint64_t cfg_rx_dll_locken : 1; /**< [ 8: 8](R/W) Enable DLL lock when GSER()_LANE()_RX_MISC_OVRRD[CFG_RX_DLL_LOCKEN_OVRRD_EN] is asserted. */
+ uint64_t pcs_sds_rx_cdr_ssc_mode : 8;/**< [ 7: 0](R/W) Per-lane RX CDR SSC control:
+ \<7:4\> = Reserved.
+ \<3\> = Clean SSC error flag.
+ \<2\> = Disable SSC filter.
+ \<1\> = Enable SSC value usage.
+ \<0\> = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_rx_cdr_ssc_mode : 8;/**< [ 7: 0](R/W) Per-lane RX CDR SSC control:
+ \<7:4\> = Reserved.
+ \<3\> = Clean SSC error flag.
+ \<2\> = Disable SSC filter.
+ \<1\> = Enable SSC value usage.
+ \<0\> = Reserved. */
+ uint64_t cfg_rx_dll_locken : 1; /**< [ 8: 8](R/W) Enable DLL lock when GSER()_LANE()_RX_MISC_OVRRD[CFG_RX_DLL_LOCKEN_OVRRD_EN] is asserted. */
+ uint64_t rx_pcie_mode_ovrrd_val : 1; /**< [ 9: 9](R/W) Override value for RX_PCIE_MODE_OVRRD_VAL;
+ selects between RX terminations.
+ 0x0 = pcs_sds_rx_terminate_to_vdda.
+ 0x1 = VDDA. */
+ uint64_t rx_pcie_mode_ovrrd_en : 1; /**< [ 10: 10](R/W) Override enable for RX_PCIE_MODE_OVRRD_VAL. */
+ uint64_t reserved_11 : 1;
+ uint64_t eie_en_ovrrd_val : 1; /**< [ 12: 12](R/W) Override value for electrical-idle-exit circuit. */
+ uint64_t eie_en_ovrrd_en : 1; /**< [ 13: 13](R/W) Override enable for electrical-idle-exit circuit. */
+ uint64_t pcs_sds_rx_os_men : 1; /**< [ 14: 14](R/W) RX offset manual enable. */
+ uint64_t rx_chpd_ovrrd_val : 1; /**< [ 15: 15](R/W) Not supported. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_cfg_1_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_cfg_1 bdk_gserx_lanex_rx_cfg_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440008ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440008ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440008ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CFG_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CFG_1(a,b) bdk_gserx_lanex_rx_cfg_1_t
+#define bustype_BDK_GSERX_LANEX_RX_CFG_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CFG_1(a,b) "GSERX_LANEX_RX_CFG_1"
+#define device_bar_BDK_GSERX_LANEX_RX_CFG_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CFG_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CFG_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cfg_2
+ *
+ * GSER Lane SerDes RX Configuration 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cfg_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cfg_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t pcs_sds_rx_terminate_to_vdda : 1;/**< [ 14: 14](R/W) RX termination control:
+ 0 = Floating.
+ 1 = Terminate to sds_vdda. */
+ uint64_t pcs_sds_rx_sampler_boost : 2;/**< [ 13: 12](R/W) Controls amount of boost.
+ Note that this control can negatively impact reliability. */
+ uint64_t pcs_sds_rx_sampler_boost_en : 1;/**< [ 11: 11](R/W) Faster sampler c2q.
+ For diagnostic use only. */
+ uint64_t reserved_10 : 1;
+ uint64_t rx_sds_rx_agc_mval : 10; /**< [ 9: 0](R/W) AGC manual value used when
+ GSER()_LANE()_RX_CFG_5[RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL]
+ are set.
+
+ \<9:8\>: Reserved.
+
+ \<7:4\>: Pre-CTLE (continuous time linear equalizer) gain (steps of approximately 0.75dB):
+ _ 0x0 = -6dB.
+ _ 0x1 = -5dB.
+ _ 0xF = +5dB.
+
+ \<3:0\>: Post-CTLE gain (steps of 0.0875):
+ _ 0x0 = lowest.
+ _ 0xF = lowest * 2.3125.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<
+ 5 Gbaud, pre-CTLE, post-CTLE, and peaking control settings should be manually
+ configured. GSER()_LANE()_RX_CFG_5[RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL]
+ should both be set, [RX_SDS_RX_AGC_MVAL] has the pre and post settings,
+ and GSER()_LANE()_RX_CTLE_CTRL[PCS_SDS_RX_CTLE_ZERO] controls equalizer
+ peaking.
+
+ The [RX_SDS_RX_AGC_MVAL] settings should be derived from signal integrity
+ simulations with the IBIS-AMI model supplied by Cavium when
+ GSER()_LANE()_RX_CFG_5[RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL] are set.
+
+ Internal:
+ reset value may be reasonable default settings. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sds_rx_agc_mval : 10; /**< [ 9: 0](R/W) AGC manual value used when
+ GSER()_LANE()_RX_CFG_5[RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL]
+ are set.
+
+ \<9:8\>: Reserved.
+
+ \<7:4\>: Pre-CTLE (continuous time linear equalizer) gain (steps of approximately 0.75dB):
+ _ 0x0 = -6dB.
+ _ 0x1 = -5dB.
+ _ 0xF = +5dB.
+
+ \<3:0\>: Post-CTLE gain (steps of 0.0875):
+ _ 0x0 = lowest.
+ _ 0xF = lowest * 2.3125.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<
+ 5 Gbaud, pre-CTLE, post-CTLE, and peaking control settings should be manually
+ configured. GSER()_LANE()_RX_CFG_5[RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL]
+ should both be set, [RX_SDS_RX_AGC_MVAL] has the pre and post settings,
+ and GSER()_LANE()_RX_CTLE_CTRL[PCS_SDS_RX_CTLE_ZERO] controls equalizer
+ peaking.
+
+ The [RX_SDS_RX_AGC_MVAL] settings should be derived from signal integrity
+ simulations with the IBIS-AMI model supplied by Cavium when
+ GSER()_LANE()_RX_CFG_5[RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL] are set.
+
+ Internal:
+ reset value may be reasonable default settings. */
+ uint64_t reserved_10 : 1;
+ uint64_t pcs_sds_rx_sampler_boost_en : 1;/**< [ 11: 11](R/W) Faster sampler c2q.
+ For diagnostic use only. */
+ uint64_t pcs_sds_rx_sampler_boost : 2;/**< [ 13: 12](R/W) Controls amount of boost.
+ Note that this control can negatively impact reliability. */
+ uint64_t pcs_sds_rx_terminate_to_vdda : 1;/**< [ 14: 14](R/W) RX termination control:
+ 0 = Floating.
+ 1 = Terminate to sds_vdda. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_cfg_2_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_cfg_2 bdk_gserx_lanex_rx_cfg_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440010ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440010ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440010ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CFG_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CFG_2(a,b) bdk_gserx_lanex_rx_cfg_2_t
+#define bustype_BDK_GSERX_LANEX_RX_CFG_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CFG_2(a,b) "GSERX_LANEX_RX_CFG_2"
+#define device_bar_BDK_GSERX_LANEX_RX_CFG_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CFG_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CFG_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cfg_3
+ *
+ * GSER Lane SerDes RX Configuration 3 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cfg_3
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cfg_3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t cfg_rx_errdet_ctrl : 16; /**< [ 15: 0](R/W) RX adaptive equalizer control.
+ Value of pcs_sds_rx_err_det_ctrl when
+ GSER()_LANE()_RX_MISC_OVRRD[CFG_RS_ERRDET_CTRL_OVRRD_EN}
+ is set.
+
+ \<15:13\>: Starting delta (6.7mV/step, 13.4mV + 6.7mV*N).
+
+ \<12:10\>: Minimum delta to adapt to (6.7mV/step, 13.4mV + 6.7mV*N).
+
+ \<9:7\>: Window mode (PM) delta (6.7mV/step, 13.4mV + 6.7mV*N).
+
+ \<6\>: Enable DFE for edge samplers.
+
+ \<5:4\>: Edge sampler DEF alpha:
+ 0x0 = 1/4.
+ 0x1 = 1/2.
+ 0x2 = 3/4.
+ 0x3 = 1.
+
+ \<3:0\>: Q/QB error sampler 1 threshold, 6.7mV/step. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_rx_errdet_ctrl : 16; /**< [ 15: 0](R/W) RX adaptive equalizer control.
+ Value of pcs_sds_rx_err_det_ctrl when
+ GSER()_LANE()_RX_MISC_OVRRD[CFG_RS_ERRDET_CTRL_OVRRD_EN}
+ is set.
+
+ \<15:13\>: Starting delta (6.7mV/step, 13.4mV + 6.7mV*N).
+
+ \<12:10\>: Minimum delta to adapt to (6.7mV/step, 13.4mV + 6.7mV*N).
+
+ \<9:7\>: Window mode (PM) delta (6.7mV/step, 13.4mV + 6.7mV*N).
+
+ \<6\>: Enable DFE for edge samplers.
+
+ \<5:4\>: Edge sampler DEF alpha:
+ 0x0 = 1/4.
+ 0x1 = 1/2.
+ 0x2 = 3/4.
+ 0x3 = 1.
+
+ \<3:0\>: Q/QB error sampler 1 threshold, 6.7mV/step. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_cfg_3_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_cfg_3 bdk_gserx_lanex_rx_cfg_3_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_3(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_3(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440018ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440018ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440018ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CFG_3", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CFG_3(a,b) bdk_gserx_lanex_rx_cfg_3_t
+#define bustype_BDK_GSERX_LANEX_RX_CFG_3(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CFG_3(a,b) "GSERX_LANEX_RX_CFG_3"
+#define device_bar_BDK_GSERX_LANEX_RX_CFG_3(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CFG_3(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CFG_3(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cfg_4
+ *
+ * GSER Lane SerDes RX Configuration 4 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cfg_4
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cfg_4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t cfg_rx_errdet_ctrl : 16; /**< [ 15: 0](R/W) RX adaptive equalizer control.
+ Value of pcs_sds_rx_err_det_ctrl when
+ GSER()_LANE()_RX_MISC_OVRRD[CFG_RS_ERRDET_CTRL_OVRRD_EN] is set.
+
+ \<15:14\>: Reserved.
+
+ \<13:8\>: Q/QB error sampler 0 threshold, 6.7mV/step, used for training/LMS.
+
+ \<7\>: Enable Window mode, after training has finished.
+
+ \<6:5\>: Control sds_pcs_rx_vma_status[15:8].
+
+ 0x0 = window counter[19:12] (FOM).
+ 0x1 = window counter[11:4].
+ 0x2 = CTLE pole, SDLL_IQ.
+ 0x3 = pre-CTLE gain, CTLE peak.
+
+ \<4\>: Offset cancellation enable.
+
+ \<3:0\>: Max CTLE peak setting during training when pcs_sds_rx_vma_ctl[7] is set in
+ GSER()_LANE()_RX_VMA_CTRL. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_rx_errdet_ctrl : 16; /**< [ 15: 0](R/W) RX adaptive equalizer control.
+ Value of pcs_sds_rx_err_det_ctrl when
+ GSER()_LANE()_RX_MISC_OVRRD[CFG_RS_ERRDET_CTRL_OVRRD_EN] is set.
+
+ \<15:14\>: Reserved.
+
+ \<13:8\>: Q/QB error sampler 0 threshold, 6.7mV/step, used for training/LMS.
+
+ \<7\>: Enable Window mode, after training has finished.
+
+ \<6:5\>: Control sds_pcs_rx_vma_status[15:8].
+
+ 0x0 = window counter[19:12] (FOM).
+ 0x1 = window counter[11:4].
+ 0x2 = CTLE pole, SDLL_IQ.
+ 0x3 = pre-CTLE gain, CTLE peak.
+
+ \<4\>: Offset cancellation enable.
+
+ \<3:0\>: Max CTLE peak setting during training when pcs_sds_rx_vma_ctl[7] is set in
+ GSER()_LANE()_RX_VMA_CTRL. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_cfg_4_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_cfg_4 bdk_gserx_lanex_rx_cfg_4_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_4(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_4(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440020ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440020ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440020ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CFG_4", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CFG_4(a,b) bdk_gserx_lanex_rx_cfg_4_t
+#define bustype_BDK_GSERX_LANEX_RX_CFG_4(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CFG_4(a,b) "GSERX_LANEX_RX_CFG_4"
+#define device_bar_BDK_GSERX_LANEX_RX_CFG_4(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CFG_4(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CFG_4(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_cfg_5
+ *
+ * GSER Lane SerDes RX Configuration 5 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_cfg_5
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_cfg_5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t rx_agc_men_ovrrd_en : 1; /**< [ 4: 4](R/W) Override enable for AGC manual mode.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<
+ 5 Gbaud, pre-CTLE, post-CTLE, and peaking control settings should be manually
+ configured. [RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL] should both be set,
+ GSER()_LANE()_RX_CFG_2[RX_SDS_RX_AGC_MVAL] has the pre and post settings,
+ and GSER()_LANE()_RX_CTLE_CTRL[PCS_SDS_RX_CTLE_ZERO] controls equalizer
+ peaking. */
+ uint64_t rx_agc_men_ovrrd_val : 1; /**< [ 3: 3](R/W) Override value for AGC manual mode.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<
+ 5 Gbaud, pre-CTLE, post-CTLE, and peaking control settings should be manually
+ configured. [RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL] should both be set,
+ GSER()_LANE()_RX_CFG_2[RX_SDS_RX_AGC_MVAL] has the pre and post settings,
+ and GSER()_LANE()_RX_CTLE_CTRL[PCS_SDS_RX_CTLE_ZERO] controls equalizer
+ peaking. */
+ uint64_t rx_widthsel_ovrrd_en : 1; /**< [ 2: 2](R/W) Override enable for RX width select to the SerDes pcs_sds_rx_widthsel. */
+ uint64_t rx_widthsel_ovrrd_val : 2; /**< [ 1: 0](R/W) Override value for RX width select to the SerDes pcs_sds_rx_widthsel.
+ 0x0 = 8-bit raw data.
+ 0x1 = 10-bit raw data.
+ 0x2 = 16-bit raw data.
+ 0x3 = 20-bit raw data. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_widthsel_ovrrd_val : 2; /**< [ 1: 0](R/W) Override value for RX width select to the SerDes pcs_sds_rx_widthsel.
+ 0x0 = 8-bit raw data.
+ 0x1 = 10-bit raw data.
+ 0x2 = 16-bit raw data.
+ 0x3 = 20-bit raw data. */
+ uint64_t rx_widthsel_ovrrd_en : 1; /**< [ 2: 2](R/W) Override enable for RX width select to the SerDes pcs_sds_rx_widthsel. */
+ uint64_t rx_agc_men_ovrrd_val : 1; /**< [ 3: 3](R/W) Override value for AGC manual mode.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<
+ 5 Gbaud, pre-CTLE, post-CTLE, and peaking control settings should be manually
+ configured. [RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL] should both be set,
+ GSER()_LANE()_RX_CFG_2[RX_SDS_RX_AGC_MVAL] has the pre and post settings,
+ and GSER()_LANE()_RX_CTLE_CTRL[PCS_SDS_RX_CTLE_ZERO] controls equalizer
+ peaking. */
+ uint64_t rx_agc_men_ovrrd_en : 1; /**< [ 4: 4](R/W) Override enable for AGC manual mode.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<
+ 5 Gbaud, pre-CTLE, post-CTLE, and peaking control settings should be manually
+ configured. [RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL] should both be set,
+ GSER()_LANE()_RX_CFG_2[RX_SDS_RX_AGC_MVAL] has the pre and post settings,
+ and GSER()_LANE()_RX_CTLE_CTRL[PCS_SDS_RX_CTLE_ZERO] controls equalizer
+ peaking. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_cfg_5_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_cfg_5 bdk_gserx_lanex_rx_cfg_5_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_5(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CFG_5(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440028ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440028ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440028ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CFG_5", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CFG_5(a,b) bdk_gserx_lanex_rx_cfg_5_t
+#define bustype_BDK_GSERX_LANEX_RX_CFG_5(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CFG_5(a,b) "GSERX_LANEX_RX_CFG_5"
+#define device_bar_BDK_GSERX_LANEX_RX_CFG_5(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CFG_5(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CFG_5(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_ctle_ctrl
+ *
+ * GSER Lane RX Precorrelation Control Register
+ * These are the RAW PCS per-lane RX CTLE control registers.
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_ctle_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_ctle_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t pcs_sds_rx_ctle_bias_ctrl : 2;/**< [ 15: 14](R/W) CTLE bias trim bits.
+ 0x0 = -10%.
+ 0x1 = 0%.
+ 0x2 = +5%.
+ 0x3 = +10%. */
+ uint64_t pcs_sds_rx_ctle_zero : 4; /**< [ 13: 10](R/W) Equalizer peaking control.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<
+ 5 Gbaud,
+ pre-CTLE, post-CTLE, and peaking control settings should be manually
+ configured. GSER()_LANE()_RX_CFG_5[RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL]
+ should both be set, GSER()_LANE()_RX_CFG_2[RX_SDS_RX_AGC_MVAL] has the
+ pre and post settings, and [PCS_SDS_RX_CTLE_ZERO] controls equalizer
+ peaking.
+
+ The [PCS_SDS_RX_CTLE_ZERO] setting should be derived from signal integrity
+ simulations with the IBIS-AMI model supplied by Cavium when auto-negotiated
+ link training is not present and link speed \< 5 Gbaud. */
+ uint64_t rx_ctle_pole_ovrrd_en : 1; /**< [ 9: 9](R/W) Equalizer pole adjustment override enable. */
+ uint64_t rx_ctle_pole_ovrrd_val : 4; /**< [ 8: 5](R/W) Equalizer pole adjustment override value.
+ RX precorrelation sample counter control
+ bit 3: Optimize CTLE during training.
+ bit 2: Turn off DFE1 for edge samplers.
+ bits 1:0:
+ 0x0 = ~ 5dB of peaking at 4.0 GHz.
+ 0x1 = ~10dB of peaking at 5.0 GHz.
+ 0x2 = ~15dB of peaking at 5.5 GHz.
+ 0x3 = ~20dB of peaking at 6.0 GHz. */
+ uint64_t pcs_sds_rx_ctle_pole_max : 2;/**< [ 4: 3](R/W) Maximum pole value (for VMA adaption, not applicable in manual mode). */
+ uint64_t pcs_sds_rx_ctle_pole_min : 2;/**< [ 2: 1](R/W) Minimum pole value (for VMA adaption, not applicable in manual mode). */
+ uint64_t pcs_sds_rx_ctle_pole_step : 1;/**< [ 0: 0](R/W) Step pole value (for VMA adaption, not applicable in manual mode). */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_rx_ctle_pole_step : 1;/**< [ 0: 0](R/W) Step pole value (for VMA adaption, not applicable in manual mode). */
+ uint64_t pcs_sds_rx_ctle_pole_min : 2;/**< [ 2: 1](R/W) Minimum pole value (for VMA adaption, not applicable in manual mode). */
+ uint64_t pcs_sds_rx_ctle_pole_max : 2;/**< [ 4: 3](R/W) Maximum pole value (for VMA adaption, not applicable in manual mode). */
+ uint64_t rx_ctle_pole_ovrrd_val : 4; /**< [ 8: 5](R/W) Equalizer pole adjustment override value.
+ RX precorrelation sample counter control
+ bit 3: Optimize CTLE during training.
+ bit 2: Turn off DFE1 for edge samplers.
+ bits 1:0:
+ 0x0 = ~ 5dB of peaking at 4.0 GHz.
+ 0x1 = ~10dB of peaking at 5.0 GHz.
+ 0x2 = ~15dB of peaking at 5.5 GHz.
+ 0x3 = ~20dB of peaking at 6.0 GHz. */
+ uint64_t rx_ctle_pole_ovrrd_en : 1; /**< [ 9: 9](R/W) Equalizer pole adjustment override enable. */
+ uint64_t pcs_sds_rx_ctle_zero : 4; /**< [ 13: 10](R/W) Equalizer peaking control.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<
+ 5 Gbaud,
+ pre-CTLE, post-CTLE, and peaking control settings should be manually
+ configured. GSER()_LANE()_RX_CFG_5[RX_AGC_MEN_OVRRD_EN,RX_AGC_MEN_OVRRD_VAL]
+ should both be set, GSER()_LANE()_RX_CFG_2[RX_SDS_RX_AGC_MVAL] has the
+ pre and post settings, and [PCS_SDS_RX_CTLE_ZERO] controls equalizer
+ peaking.
+
+ The [PCS_SDS_RX_CTLE_ZERO] setting should be derived from signal integrity
+ simulations with the IBIS-AMI model supplied by Cavium when auto-negotiated
+ link training is not present and link speed \< 5 Gbaud. */
+ uint64_t pcs_sds_rx_ctle_bias_ctrl : 2;/**< [ 15: 14](R/W) CTLE bias trim bits.
+ 0x0 = -10%.
+ 0x1 = 0%.
+ 0x2 = +5%.
+ 0x3 = +10%. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_ctle_ctrl_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_ctle_ctrl bdk_gserx_lanex_rx_ctle_ctrl_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_CTLE_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_CTLE_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440058ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440058ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440058ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_CTLE_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_CTLE_CTRL(a,b) bdk_gserx_lanex_rx_ctle_ctrl_t
+#define bustype_BDK_GSERX_LANEX_RX_CTLE_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_CTLE_CTRL(a,b) "GSERX_LANEX_RX_CTLE_CTRL"
+#define device_bar_BDK_GSERX_LANEX_RX_CTLE_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_CTLE_CTRL(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_CTLE_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_loop_ctrl
+ *
+ * GSER Lane RX Loop Control Registers
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_loop_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_loop_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t fast_dll_lock : 1; /**< [ 11: 11](R/W/H) Assert to enable fast DLL lock (for simulation purposes only). */
+ uint64_t fast_ofst_cncl : 1; /**< [ 10: 10](R/W/H) Assert to enable fast Offset cancellation (for simulation purposes only). */
+ uint64_t cfg_rx_lctrl : 10; /**< [ 9: 0](R/W) Loop control settings.
+
+ \<0\> = cdr_en_byp.
+ \<1\> = dfe_en_byp.
+ \<2\> = agc_en_byp.
+ \<3\> = ofst_cncl_en_byp.
+ \<4\> = CDR resetn.
+ \<5\> = CTLE resetn.
+ \<6\> = VMA resetn.
+ \<7\> = ofst_cncl_rstn_byp.
+ \<8\> = lctrl_men.
+ \<9\> = Reserved.
+
+ GSER()_LANE()_PWR_CTRL[RX_LCTRL_OVRRD_EN] controls \<9:7\> and \<3:0\>.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present, non-SATA/PCIe, and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL],
+ setting [CFG_RX_LCTRL\<8\>], clearing [CFG_RX_LCTRL\<1\>], clearing all of
+ GSER(0..6)_LANE(0..1)_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,
+ DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_rx_lctrl : 10; /**< [ 9: 0](R/W) Loop control settings.
+
+ \<0\> = cdr_en_byp.
+ \<1\> = dfe_en_byp.
+ \<2\> = agc_en_byp.
+ \<3\> = ofst_cncl_en_byp.
+ \<4\> = CDR resetn.
+ \<5\> = CTLE resetn.
+ \<6\> = VMA resetn.
+ \<7\> = ofst_cncl_rstn_byp.
+ \<8\> = lctrl_men.
+ \<9\> = Reserved.
+
+ GSER()_LANE()_PWR_CTRL[RX_LCTRL_OVRRD_EN] controls \<9:7\> and \<3:0\>.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present, non-SATA/PCIe, and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL],
+ setting [CFG_RX_LCTRL\<8\>], clearing [CFG_RX_LCTRL\<1\>], clearing all of
+ GSER(0..6)_LANE(0..1)_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,
+ DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t fast_ofst_cncl : 1; /**< [ 10: 10](R/W/H) Assert to enable fast Offset cancellation (for simulation purposes only). */
+ uint64_t fast_dll_lock : 1; /**< [ 11: 11](R/W/H) Assert to enable fast DLL lock (for simulation purposes only). */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_loop_ctrl_s cn81xx; */
+ struct bdk_gserx_lanex_rx_loop_ctrl_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t fast_dll_lock : 1; /**< [ 11: 11](R/W/H) Assert to enable fast DLL lock (for simulation purposes only). */
+ uint64_t fast_ofst_cncl : 1; /**< [ 10: 10](R/W/H) Assert to enable fast Offset cancellation (for simulation purposes only). */
+ uint64_t cfg_rx_lctrl : 10; /**< [ 9: 0](R/W) Loop control settings.
+
+ \<0\> = cdr_en_byp.
+ \<1\> = dfe_en_byp.
+ \<2\> = agc_en_byp.
+ \<3\> = ofst_cncl_en_byp.
+ \<4\> = CDR resetn.
+ \<5\> = CTLE resetn.
+ \<6\> = VMA resetn.
+ \<7\> = ofst_cncl_rstn_byp.
+ \<8\> = lctrl_men.
+ \<9\> = Reserved.
+
+ GSER()_LANE()_PWR_CTRL[RX_LCTRL_OVRRD_EN] controls \<9:7\> and \<3:0\>.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present, non-SATA/PCIe, and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL],
+ setting [CFG_RX_LCTRL\<8\>], clearing [CFG_RX_LCTRL\<1\>], clearing all of
+ GSER(0..6)_LANE(0..3)_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,
+ DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_rx_lctrl : 10; /**< [ 9: 0](R/W) Loop control settings.
+
+ \<0\> = cdr_en_byp.
+ \<1\> = dfe_en_byp.
+ \<2\> = agc_en_byp.
+ \<3\> = ofst_cncl_en_byp.
+ \<4\> = CDR resetn.
+ \<5\> = CTLE resetn.
+ \<6\> = VMA resetn.
+ \<7\> = ofst_cncl_rstn_byp.
+ \<8\> = lctrl_men.
+ \<9\> = Reserved.
+
+ GSER()_LANE()_PWR_CTRL[RX_LCTRL_OVRRD_EN] controls \<9:7\> and \<3:0\>.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present, non-SATA/PCIe, and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL],
+ setting [CFG_RX_LCTRL\<8\>], clearing [CFG_RX_LCTRL\<1\>], clearing all of
+ GSER(0..6)_LANE(0..3)_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,
+ DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t fast_ofst_cncl : 1; /**< [ 10: 10](R/W/H) Assert to enable fast Offset cancellation (for simulation purposes only). */
+ uint64_t fast_dll_lock : 1; /**< [ 11: 11](R/W/H) Assert to enable fast DLL lock (for simulation purposes only). */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gserx_lanex_rx_loop_ctrl_cn88xx cn83xx; */
+};
+typedef union bdk_gserx_lanex_rx_loop_ctrl bdk_gserx_lanex_rx_loop_ctrl_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_LOOP_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_LOOP_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440048ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440048ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440048ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_LOOP_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_LOOP_CTRL(a,b) bdk_gserx_lanex_rx_loop_ctrl_t
+#define bustype_BDK_GSERX_LANEX_RX_LOOP_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_LOOP_CTRL(a,b) "GSERX_LANEX_RX_LOOP_CTRL"
+#define device_bar_BDK_GSERX_LANEX_RX_LOOP_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_LOOP_CTRL(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_LOOP_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_misc_ctrl
+ *
+ * GSER Lane RX Miscellaneous Control Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_misc_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_misc_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t pcs_sds_rx_misc_ctrl : 8; /**< [ 7: 0](R/W/H) Miscellaneous receive control settings.
+
+ \<0\> = Shadow PI control. Must set when using the RX internal eye monitor.
+ \<1\> = Reserved.
+ \<3:2\> = Offset cal.
+ \<4\> = Reserved.
+ \<5\> = Reserved.
+ \<6\> = 1149 hysteresis control.
+ \<7\> = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_rx_misc_ctrl : 8; /**< [ 7: 0](R/W/H) Miscellaneous receive control settings.
+
+ \<0\> = Shadow PI control. Must set when using the RX internal eye monitor.
+ \<1\> = Reserved.
+ \<3:2\> = Offset cal.
+ \<4\> = Reserved.
+ \<5\> = Reserved.
+ \<6\> = 1149 hysteresis control.
+ \<7\> = Reserved. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_misc_ctrl_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_misc_ctrl bdk_gserx_lanex_rx_misc_ctrl_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_MISC_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_MISC_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440050ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440050ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440050ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_MISC_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_MISC_CTRL(a,b) bdk_gserx_lanex_rx_misc_ctrl_t
+#define bustype_BDK_GSERX_LANEX_RX_MISC_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_MISC_CTRL(a,b) "GSERX_LANEX_RX_MISC_CTRL"
+#define device_bar_BDK_GSERX_LANEX_RX_MISC_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_MISC_CTRL(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_MISC_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_misc_ovrrd
+ *
+ * GSER Lane RX Miscellaneous Override Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_misc_ovrrd
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_misc_ovrrd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t cfg_rx_oob_clk_en_ovrrd_val : 1;/**< [ 13: 13](R/W) Override value for RX OOB clock enable. */
+ uint64_t cfg_rx_oob_clk_en_ovrrd_en : 1;/**< [ 12: 12](R/W) Override enable for RX OOB clock enable. */
+ uint64_t cfg_rx_eie_det_ovrrd_val : 1;/**< [ 11: 11](R/W) Override value for RX electrical-idle-exit
+ detect enable. */
+ uint64_t cfg_rx_eie_det_ovrrd_en : 1;/**< [ 10: 10](R/W) Override enable for RX electrical-idle-exit
+ detect enable. */
+ uint64_t cfg_rx_cdr_ctrl_ovrrd_en : 1;/**< [ 9: 9](R/W) Not supported. */
+ uint64_t cfg_rx_eq_eval_ovrrd_val : 1;/**< [ 8: 8](R/W) Training mode control in override mode. */
+ uint64_t cfg_rx_eq_eval_ovrrd_en : 1;/**< [ 7: 7](R/W) Override enable for RX-EQ eval.
+ When asserted, training mode is controlled by
+ CFG_RX_EQ_EVAL_OVRRD_VAL. */
+ uint64_t reserved_6 : 1;
+ uint64_t cfg_rx_dll_locken_ovrrd_en : 1;/**< [ 5: 5](R/W) When asserted, override DLL lock enable
+ signal from the RX power state machine with
+ CFG_RX_DLL_LOCKEN in register
+ GSER()_LANE()_RX_CFG_1. */
+ uint64_t cfg_rx_errdet_ctrl_ovrrd_en : 1;/**< [ 4: 4](R/W) When asserted, pcs_sds_rx_err_det_ctrl is set
+ to cfg_rx_errdet_ctrl in registers
+ GSER()_LANE()_RX_CFG_3 and GSER()_LANE()_RX_CFG_4. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t cfg_rxeq_eval_restore_en : 1;/**< [ 0: 0](R/W) When asserted, AGC and CTLE use the RX EQ settings determined from RX EQ
+ evaluation process when VMA is not in manual mode. Otherwise, default settings are used. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_rxeq_eval_restore_en : 1;/**< [ 0: 0](R/W) When asserted, AGC and CTLE use the RX EQ settings determined from RX EQ
+ evaluation process when VMA is not in manual mode. Otherwise, default settings are used. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t cfg_rx_errdet_ctrl_ovrrd_en : 1;/**< [ 4: 4](R/W) When asserted, pcs_sds_rx_err_det_ctrl is set
+ to cfg_rx_errdet_ctrl in registers
+ GSER()_LANE()_RX_CFG_3 and GSER()_LANE()_RX_CFG_4. */
+ uint64_t cfg_rx_dll_locken_ovrrd_en : 1;/**< [ 5: 5](R/W) When asserted, override DLL lock enable
+ signal from the RX power state machine with
+ CFG_RX_DLL_LOCKEN in register
+ GSER()_LANE()_RX_CFG_1. */
+ uint64_t reserved_6 : 1;
+ uint64_t cfg_rx_eq_eval_ovrrd_en : 1;/**< [ 7: 7](R/W) Override enable for RX-EQ eval.
+ When asserted, training mode is controlled by
+ CFG_RX_EQ_EVAL_OVRRD_VAL. */
+ uint64_t cfg_rx_eq_eval_ovrrd_val : 1;/**< [ 8: 8](R/W) Training mode control in override mode. */
+ uint64_t cfg_rx_cdr_ctrl_ovrrd_en : 1;/**< [ 9: 9](R/W) Not supported. */
+ uint64_t cfg_rx_eie_det_ovrrd_en : 1;/**< [ 10: 10](R/W) Override enable for RX electrical-idle-exit
+ detect enable. */
+ uint64_t cfg_rx_eie_det_ovrrd_val : 1;/**< [ 11: 11](R/W) Override value for RX electrical-idle-exit
+ detect enable. */
+ uint64_t cfg_rx_oob_clk_en_ovrrd_en : 1;/**< [ 12: 12](R/W) Override enable for RX OOB clock enable. */
+ uint64_t cfg_rx_oob_clk_en_ovrrd_val : 1;/**< [ 13: 13](R/W) Override value for RX OOB clock enable. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_lanex_rx_misc_ovrrd_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t cfg_rx_oob_clk_en_ovrrd_val : 1;/**< [ 13: 13](R/W) Override value for RX OOB clock enable. */
+ uint64_t cfg_rx_oob_clk_en_ovrrd_en : 1;/**< [ 12: 12](R/W) Override enable for RX OOB clock enable. */
+ uint64_t cfg_rx_eie_det_ovrrd_val : 1;/**< [ 11: 11](R/W) Override value for RX electrical-idle-exit
+ detect enable. */
+ uint64_t cfg_rx_eie_det_ovrrd_en : 1;/**< [ 10: 10](R/W) Override enable for RX electrical-idle-exit
+ detect enable. */
+ uint64_t cfg_rx_cdr_ctrl_ovrrd_en : 1;/**< [ 9: 9](R/W) Not supported. */
+ uint64_t cfg_rx_eq_eval_ovrrd_val : 1;/**< [ 8: 8](R/W) Training mode control in override mode. */
+ uint64_t cfg_rx_eq_eval_ovrrd_en : 1;/**< [ 7: 7](R/W) Override enable for RX-EQ eval.
+ When asserted, training mode is controlled by
+ CFG_RX_EQ_EVAL_OVRRD_VAL. */
+ uint64_t reserved_6 : 1;
+ uint64_t cfg_rx_dll_locken_ovrrd_en : 1;/**< [ 5: 5](R/W) When asserted, override DLL lock enable
+ signal from the RX power state machine with
+ CFG_RX_DLL_LOCKEN in register
+ GSER()_LANE()_RX_CFG_1. */
+ uint64_t cfg_rx_errdet_ctrl_ovrrd_en : 1;/**< [ 4: 4](R/W) When asserted, pcs_sds_rx_err_det_ctrl is set
+ to cfg_rx_errdet_ctrl in registers
+ GSER()_LANE()_RX_CFG_3 and GSER()_LANE()_RX_CFG_4. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t cfg_rx_errdet_ctrl_ovrrd_en : 1;/**< [ 4: 4](R/W) When asserted, pcs_sds_rx_err_det_ctrl is set
+ to cfg_rx_errdet_ctrl in registers
+ GSER()_LANE()_RX_CFG_3 and GSER()_LANE()_RX_CFG_4. */
+ uint64_t cfg_rx_dll_locken_ovrrd_en : 1;/**< [ 5: 5](R/W) When asserted, override DLL lock enable
+ signal from the RX power state machine with
+ CFG_RX_DLL_LOCKEN in register
+ GSER()_LANE()_RX_CFG_1. */
+ uint64_t reserved_6 : 1;
+ uint64_t cfg_rx_eq_eval_ovrrd_en : 1;/**< [ 7: 7](R/W) Override enable for RX-EQ eval.
+ When asserted, training mode is controlled by
+ CFG_RX_EQ_EVAL_OVRRD_VAL. */
+ uint64_t cfg_rx_eq_eval_ovrrd_val : 1;/**< [ 8: 8](R/W) Training mode control in override mode. */
+ uint64_t cfg_rx_cdr_ctrl_ovrrd_en : 1;/**< [ 9: 9](R/W) Not supported. */
+ uint64_t cfg_rx_eie_det_ovrrd_en : 1;/**< [ 10: 10](R/W) Override enable for RX electrical-idle-exit
+ detect enable. */
+ uint64_t cfg_rx_eie_det_ovrrd_val : 1;/**< [ 11: 11](R/W) Override value for RX electrical-idle-exit
+ detect enable. */
+ uint64_t cfg_rx_oob_clk_en_ovrrd_en : 1;/**< [ 12: 12](R/W) Override enable for RX OOB clock enable. */
+ uint64_t cfg_rx_oob_clk_en_ovrrd_val : 1;/**< [ 13: 13](R/W) Override value for RX OOB clock enable. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_gserx_lanex_rx_misc_ovrrd_s cn81xx; */
+ /* struct bdk_gserx_lanex_rx_misc_ovrrd_s cn83xx; */
+ /* struct bdk_gserx_lanex_rx_misc_ovrrd_s cn88xxp2; */
+};
+typedef union bdk_gserx_lanex_rx_misc_ovrrd bdk_gserx_lanex_rx_misc_ovrrd_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_MISC_OVRRD(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_MISC_OVRRD(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440258ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440258ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440258ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_MISC_OVRRD", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_MISC_OVRRD(a,b) bdk_gserx_lanex_rx_misc_ovrrd_t
+#define bustype_BDK_GSERX_LANEX_RX_MISC_OVRRD(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_MISC_OVRRD(a,b) "GSERX_LANEX_RX_MISC_OVRRD"
+#define device_bar_BDK_GSERX_LANEX_RX_MISC_OVRRD(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_MISC_OVRRD(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_MISC_OVRRD(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_os_mvalbbd_1
+ *
+ * GSER Lane SerDes RX Offset Calibration Manual Control 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_os_mvalbbd_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_os_mvalbbd_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t pcs_sds_rx_os_mval : 16; /**< [ 15: 0](R/W/H) Offset calibration override value when GSER()_LANE()_RX_CFG_1[PCS_SDS_RX_OS_MEN] is set.
+ Requires SIGN-MAG format.
+ \<15:14\> = Not used.
+ \<13:8\> = Qerr0.
+ \<7:2\> = I.
+ \<3:0\> = Ib. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_rx_os_mval : 16; /**< [ 15: 0](R/W/H) Offset calibration override value when GSER()_LANE()_RX_CFG_1[PCS_SDS_RX_OS_MEN] is set.
+ Requires SIGN-MAG format.
+ \<15:14\> = Not used.
+ \<13:8\> = Qerr0.
+ \<7:2\> = I.
+ \<3:0\> = Ib. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_os_mvalbbd_1_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_os_mvalbbd_1 bdk_gserx_lanex_rx_os_mvalbbd_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_OS_MVALBBD_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_OS_MVALBBD_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440230ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440230ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440230ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_OS_MVALBBD_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_OS_MVALBBD_1(a,b) bdk_gserx_lanex_rx_os_mvalbbd_1_t
+#define bustype_BDK_GSERX_LANEX_RX_OS_MVALBBD_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_OS_MVALBBD_1(a,b) "GSERX_LANEX_RX_OS_MVALBBD_1"
+#define device_bar_BDK_GSERX_LANEX_RX_OS_MVALBBD_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_OS_MVALBBD_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_OS_MVALBBD_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_os_mvalbbd_2
+ *
+ * GSER Lane SerDes RX Offset Calibration Manual Control 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_os_mvalbbd_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_os_mvalbbd_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t pcs_sds_rx_os_mval : 16; /**< [ 15: 0](R/W/H) Offset calibration override value when GSER()_LANE()_RX_CFG_1[PCS_SDS_RX_OS_MEN] is set.
+ Requires SIGN-MAG format.
+ \<15:12\> = Ib.
+ \<11:6\> = Q.
+ \<5:0\> = Qb. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_rx_os_mval : 16; /**< [ 15: 0](R/W/H) Offset calibration override value when GSER()_LANE()_RX_CFG_1[PCS_SDS_RX_OS_MEN] is set.
+ Requires SIGN-MAG format.
+ \<15:12\> = Ib.
+ \<11:6\> = Q.
+ \<5:0\> = Qb. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_os_mvalbbd_2_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_os_mvalbbd_2 bdk_gserx_lanex_rx_os_mvalbbd_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_OS_MVALBBD_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_OS_MVALBBD_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440238ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440238ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440238ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_OS_MVALBBD_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_OS_MVALBBD_2(a,b) bdk_gserx_lanex_rx_os_mvalbbd_2_t
+#define bustype_BDK_GSERX_LANEX_RX_OS_MVALBBD_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_OS_MVALBBD_2(a,b) "GSERX_LANEX_RX_OS_MVALBBD_2"
+#define device_bar_BDK_GSERX_LANEX_RX_OS_MVALBBD_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_OS_MVALBBD_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_OS_MVALBBD_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_os_out_1
+ *
+ * GSER Lane SerDes RX Calibration Status 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_os_out_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_os_out_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t sds_pcs_rx_os_out : 12; /**< [ 11: 0](RO/H) Offset calibration code for readout, 2's complement.
+ \<11:6\> = Not used.
+ \<5:0\> = Qerr0. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_os_out : 12; /**< [ 11: 0](RO/H) Offset calibration code for readout, 2's complement.
+ \<11:6\> = Not used.
+ \<5:0\> = Qerr0. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_os_out_1_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_os_out_1 bdk_gserx_lanex_rx_os_out_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_OS_OUT_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_OS_OUT_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904402a0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904402a0ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904402a0ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_OS_OUT_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_OS_OUT_1(a,b) bdk_gserx_lanex_rx_os_out_1_t
+#define bustype_BDK_GSERX_LANEX_RX_OS_OUT_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_OS_OUT_1(a,b) "GSERX_LANEX_RX_OS_OUT_1"
+#define device_bar_BDK_GSERX_LANEX_RX_OS_OUT_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_OS_OUT_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_OS_OUT_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_os_out_2
+ *
+ * GSER Lane SerDes RX Calibration Status 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_os_out_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_os_out_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t sds_pcs_rx_os_out : 12; /**< [ 11: 0](RO/H) Offset calibration code for readout, 2's complement.
+ \<11:6\> = I.
+ \<5:0\> = Ib. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_os_out : 12; /**< [ 11: 0](RO/H) Offset calibration code for readout, 2's complement.
+ \<11:6\> = I.
+ \<5:0\> = Ib. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_os_out_2_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_os_out_2 bdk_gserx_lanex_rx_os_out_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_OS_OUT_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_OS_OUT_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904402a8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904402a8ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904402a8ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_OS_OUT_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_OS_OUT_2(a,b) bdk_gserx_lanex_rx_os_out_2_t
+#define bustype_BDK_GSERX_LANEX_RX_OS_OUT_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_OS_OUT_2(a,b) "GSERX_LANEX_RX_OS_OUT_2"
+#define device_bar_BDK_GSERX_LANEX_RX_OS_OUT_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_OS_OUT_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_OS_OUT_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_os_out_3
+ *
+ * GSER Lane SerDes RX Calibration Status 3 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_os_out_3
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_os_out_3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t sds_pcs_rx_os_out : 12; /**< [ 11: 0](RO/H) Offset calibration code for readout, 2's complement.
+ \<11:6\> = Q.
+ \<5:0\> = Qb. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_os_out : 12; /**< [ 11: 0](RO/H) Offset calibration code for readout, 2's complement.
+ \<11:6\> = Q.
+ \<5:0\> = Qb. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_os_out_3_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_os_out_3 bdk_gserx_lanex_rx_os_out_3_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_OS_OUT_3(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_OS_OUT_3(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904402b0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904402b0ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904402b0ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_OS_OUT_3", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_OS_OUT_3(a,b) bdk_gserx_lanex_rx_os_out_3_t
+#define bustype_BDK_GSERX_LANEX_RX_OS_OUT_3(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_OS_OUT_3(a,b) "GSERX_LANEX_RX_OS_OUT_3"
+#define device_bar_BDK_GSERX_LANEX_RX_OS_OUT_3(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_OS_OUT_3(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_OS_OUT_3(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_precorr_ctrl
+ *
+ * GSER Lane RX Precorrelation Control Register
+ * These are the RAW PCS per-lane RX precorrelation control registers. These registers are for
+ * diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_precorr_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_precorr_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t rx_precorr_disable : 1; /**< [ 4: 4](R/W) Disable RX precorrelation calculation. */
+ uint64_t rx_precorr_en_ovrrd_en : 1; /**< [ 3: 3](R/W) Override enable for RX precorrelation calculation enable. */
+ uint64_t rx_precorr_en_ovrrd_val : 1;/**< [ 2: 2](R/W) Override value for RX precorrelation calculation enable. */
+ uint64_t pcs_sds_rx_precorr_scnt_ctrl : 2;/**< [ 1: 0](R/W) RX precorrelation sample counter control.
+ 0x0 = Load max sample counter with 0x1FF.
+ 0x1 = Load max sample counter with 0x3FF.
+ 0x2 = Load max sample counter with 0x7FF.
+ 0x3 = Load max sample counter with 0xFFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_rx_precorr_scnt_ctrl : 2;/**< [ 1: 0](R/W) RX precorrelation sample counter control.
+ 0x0 = Load max sample counter with 0x1FF.
+ 0x1 = Load max sample counter with 0x3FF.
+ 0x2 = Load max sample counter with 0x7FF.
+ 0x3 = Load max sample counter with 0xFFF. */
+ uint64_t rx_precorr_en_ovrrd_val : 1;/**< [ 2: 2](R/W) Override value for RX precorrelation calculation enable. */
+ uint64_t rx_precorr_en_ovrrd_en : 1; /**< [ 3: 3](R/W) Override enable for RX precorrelation calculation enable. */
+ uint64_t rx_precorr_disable : 1; /**< [ 4: 4](R/W) Disable RX precorrelation calculation. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_precorr_ctrl_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_precorr_ctrl bdk_gserx_lanex_rx_precorr_ctrl_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_PRECORR_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_PRECORR_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440060ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440060ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440060ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_PRECORR_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_PRECORR_CTRL(a,b) bdk_gserx_lanex_rx_precorr_ctrl_t
+#define bustype_BDK_GSERX_LANEX_RX_PRECORR_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_PRECORR_CTRL(a,b) "GSERX_LANEX_RX_PRECORR_CTRL"
+#define device_bar_BDK_GSERX_LANEX_RX_PRECORR_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_PRECORR_CTRL(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_PRECORR_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_precorr_val
+ *
+ * GSER Lane RX Precorrelation Count Register
+ * These are the RAW PCS per-lane RX precorrelation control registers. These registers are for
+ * diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_precorr_val
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_precorr_val_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t sds_pcs_rx_precorr_vld : 1; /**< [ 12: 12](RO/H) RX precorrelation count is valid. */
+ uint64_t sds_pcs_rx_precorr_cnt : 12;/**< [ 11: 0](RO/H) RX precorrelation count. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_precorr_cnt : 12;/**< [ 11: 0](RO/H) RX precorrelation count. */
+ uint64_t sds_pcs_rx_precorr_vld : 1; /**< [ 12: 12](RO/H) RX precorrelation count is valid. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_precorr_val_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_precorr_val bdk_gserx_lanex_rx_precorr_val_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_PRECORR_VAL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_PRECORR_VAL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440078ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440078ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440078ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_PRECORR_VAL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_PRECORR_VAL(a,b) bdk_gserx_lanex_rx_precorr_val_t
+#define bustype_BDK_GSERX_LANEX_RX_PRECORR_VAL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_PRECORR_VAL(a,b) "GSERX_LANEX_RX_PRECORR_VAL"
+#define device_bar_BDK_GSERX_LANEX_RX_PRECORR_VAL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_PRECORR_VAL(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_PRECORR_VAL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_valbbd_ctrl_0
+ *
+ * GSER Lane RX Adaptive Equalizer Control Register 0
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_valbbd_ctrl_0
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_valbbd_ctrl_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t agc_gain : 2; /**< [ 13: 12](R/W) AGC gain. */
+ uint64_t dfe_gain : 2; /**< [ 11: 10](R/W) DFE gain. */
+ uint64_t dfe_c5_mval : 4; /**< [ 9: 6](R/W) DFE Tap5 manual value when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ [DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c5_msgn : 1; /**< [ 5: 5](R/W) DFE Tap5 manual sign when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ [DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c4_mval : 4; /**< [ 4: 1](R/W) DFE Tap4 manual value when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ [DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c4_msgn : 1; /**< [ 0: 0](R/W) DFE Tap4 manual sign when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ [DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_c4_msgn : 1; /**< [ 0: 0](R/W) DFE Tap4 manual sign when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ [DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c4_mval : 4; /**< [ 4: 1](R/W) DFE Tap4 manual value when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ [DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c5_msgn : 1; /**< [ 5: 5](R/W) DFE Tap5 manual sign when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ [DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c5_mval : 4; /**< [ 9: 6](R/W) DFE Tap5 manual value when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ [DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,DFE_C4_MSGN], and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_gain : 2; /**< [ 11: 10](R/W) DFE gain. */
+ uint64_t agc_gain : 2; /**< [ 13: 12](R/W) AGC gain. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_valbbd_ctrl_0_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_valbbd_ctrl_0 bdk_gserx_lanex_rx_valbbd_ctrl_0_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_VALBBD_CTRL_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_VALBBD_CTRL_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440240ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440240ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440240ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_VALBBD_CTRL_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_VALBBD_CTRL_0(a,b) bdk_gserx_lanex_rx_valbbd_ctrl_0_t
+#define bustype_BDK_GSERX_LANEX_RX_VALBBD_CTRL_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_VALBBD_CTRL_0(a,b) "GSERX_LANEX_RX_VALBBD_CTRL_0"
+#define device_bar_BDK_GSERX_LANEX_RX_VALBBD_CTRL_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_VALBBD_CTRL_0(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_VALBBD_CTRL_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_valbbd_ctrl_1
+ *
+ * GSER Lane RX Adaptive Equalizer Control Register 1
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_valbbd_ctrl_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_valbbd_ctrl_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t dfe_c3_mval : 4; /**< [ 14: 11](R/W) DFE Tap3 manual value when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c3_msgn : 1; /**< [ 10: 10](R/W) DFE Tap3 manual sign when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c2_mval : 4; /**< [ 9: 6](R/W) DFE Tap2 manual value when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c2_msgn : 1; /**< [ 5: 5](R/W) DFE Tap2 manual sign when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c1_mval : 4; /**< [ 4: 1](R/W) DFE Tap1 manual value when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c1_msgn : 1; /**< [ 0: 0](R/W) DFE Tap1 manual sign when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_c1_msgn : 1; /**< [ 0: 0](R/W) DFE Tap1 manual sign when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c1_mval : 4; /**< [ 4: 1](R/W) DFE Tap1 manual value when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c2_msgn : 1; /**< [ 5: 5](R/W) DFE Tap2 manual sign when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c2_mval : 4; /**< [ 9: 6](R/W) DFE Tap2 manual value when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c3_msgn : 1; /**< [ 10: 10](R/W) DFE Tap3 manual sign when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c3_mval : 4; /**< [ 14: 11](R/W) DFE Tap3 manual value when GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN] and
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_C5_OVRD_VAL] are both set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ GSER()_LANE()_RX_VALBBD_CTRL_2[DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,
+ DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL,FE_C4_MSGN],
+ and clearing all of [DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_valbbd_ctrl_1_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_valbbd_ctrl_1 bdk_gserx_lanex_rx_valbbd_ctrl_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_VALBBD_CTRL_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_VALBBD_CTRL_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440248ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440248ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440248ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_VALBBD_CTRL_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_VALBBD_CTRL_1(a,b) bdk_gserx_lanex_rx_valbbd_ctrl_1_t
+#define bustype_BDK_GSERX_LANEX_RX_VALBBD_CTRL_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_VALBBD_CTRL_1(a,b) "GSERX_LANEX_RX_VALBBD_CTRL_1"
+#define device_bar_BDK_GSERX_LANEX_RX_VALBBD_CTRL_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_VALBBD_CTRL_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_VALBBD_CTRL_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_valbbd_ctrl_2
+ *
+ * GSER Lane RX Adaptive Equalizer Control Register 2
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_valbbd_ctrl_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_valbbd_ctrl_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dfe_ovrd_en : 1; /**< [ 5: 5](R/W) Override enable for DFE tap controls. When asserted, the register bits in
+ GSER()_LANE()_RX_VALBBD_CTRL_0 and GSER()_LANE()_RX_VALBBD_CTRL_1 are
+ used for controlling the DFE tap manual mode, instead the manual mode signal indexed by
+ GSER()_LANE_MODE[LMODE].
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c5_ovrd_val : 1; /**< [ 4: 4](R/W) Override value for DFE Tap5 manual enable. Used when [DFE_OVRD_EN] is set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c4_ovrd_val : 1; /**< [ 3: 3](R/W) Override value for DFE Tap4 manual enable. Used when [DFE_OVRD_EN] is set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c3_ovrd_val : 1; /**< [ 2: 2](R/W) Override value for DFE Tap3 manual enable. Used when [DFE_OVRD_EN] is set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c2_ovrd_val : 1; /**< [ 1: 1](R/W) Override value for DFE Tap2 manual enable. Used when [DFE_OVRD_EN] is set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c1_ovrd_val : 1; /**< [ 0: 0](R/W) Override value for DFE Tap1 manual enable. Used when [DFE_OVRD_EN] is set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_c1_ovrd_val : 1; /**< [ 0: 0](R/W) Override value for DFE Tap1 manual enable. Used when [DFE_OVRD_EN] is set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c2_ovrd_val : 1; /**< [ 1: 1](R/W) Override value for DFE Tap2 manual enable. Used when [DFE_OVRD_EN] is set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c3_ovrd_val : 1; /**< [ 2: 2](R/W) Override value for DFE Tap3 manual enable. Used when [DFE_OVRD_EN] is set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c4_ovrd_val : 1; /**< [ 3: 3](R/W) Override value for DFE Tap4 manual enable. Used when [DFE_OVRD_EN] is set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_c5_ovrd_val : 1; /**< [ 4: 4](R/W) Override value for DFE Tap5 manual enable. Used when [DFE_OVRD_EN] is set.
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t dfe_ovrd_en : 1; /**< [ 5: 5](R/W) Override enable for DFE tap controls. When asserted, the register bits in
+ GSER()_LANE()_RX_VALBBD_CTRL_0 and GSER()_LANE()_RX_VALBBD_CTRL_1 are
+ used for controlling the DFE tap manual mode, instead the manual mode signal indexed by
+ GSER()_LANE_MODE[LMODE].
+
+ Recommended settings:
+
+ When auto-negotiated link training is not present (e.g. BGX) and link speed \<=
+ 5 Gbaud, the DFE should be completely disabled by setting all of
+ [DFE_OVRD_EN,DFE_C5_OVRD_VAL,DFE_C4_OVRD_VAL,DFE_C3_OVRD_VAL,DFE_C2_OVRD_VAL,
+ DFE_C1_OVRD_VAL], setting
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<8\>], clearing
+ GSER()_LANE()_RX_LOOP_CTRL[CFG_RX_LCTRL\<1\>], clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_0[DFE_C5_MVAL,DFE_C5_MSGN,DFE_C4_MVAL, FE_C4_MSGN],
+ and clearing all of
+ GSER()_LANE()_RX_VALBBD_CTRL_1[DFE_C3_MVAL,DFE_C3_MSGN,DFE_C2_MVAL,DFE_C2_MSGN,
+ DFE_C1_MVAL,DFE_C1_MSGN]. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_valbbd_ctrl_2_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_valbbd_ctrl_2 bdk_gserx_lanex_rx_valbbd_ctrl_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_VALBBD_CTRL_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_VALBBD_CTRL_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440250ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440250ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440250ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_VALBBD_CTRL_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_VALBBD_CTRL_2(a,b) bdk_gserx_lanex_rx_valbbd_ctrl_2_t
+#define bustype_BDK_GSERX_LANEX_RX_VALBBD_CTRL_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_VALBBD_CTRL_2(a,b) "GSERX_LANEX_RX_VALBBD_CTRL_2"
+#define device_bar_BDK_GSERX_LANEX_RX_VALBBD_CTRL_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_VALBBD_CTRL_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_VALBBD_CTRL_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_vma_ctrl
+ *
+ * GSER Lane RX VMA Control Register
+ * These are the RAW PCS per-lane RX VMA control registers. These registers are for diagnostic
+ * use only.
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_vma_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_vma_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t vma_fine_cfg_sel_ovrrd_en : 1;/**< [ 15: 15](R/W) Enable override of VMA fine configuration selection. */
+ uint64_t vma_fine_cfg_sel_ovrrd_val : 1;/**< [ 14: 14](R/W) Override value of VMA fine configuration selection.
+ 0 = Coarse mode.
+ 1 = Fine mode. */
+ uint64_t rx_fom_div_delta : 1; /**< [ 13: 13](R/W) TX figure of merit delta division-mode enable. */
+ uint64_t rx_vna_ctrl_18_16 : 3; /**< [ 12: 10](R/W) RX VMA loop control. */
+ uint64_t rx_vna_ctrl_9_0 : 10; /**< [ 9: 0](R/W) RX VMA loop control.
+ \<9:8\> = Parameter settling wait time.
+ \<7\> = Limit CTLE peak to max value.
+ \<6\> = Long reach enabled.
+ \<5\> = Short reach enabled.
+ \<4\> = Training done override enable.
+ \<3\> = Training done override value.
+ \<2:0\> = VMA clock modulation. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_vna_ctrl_9_0 : 10; /**< [ 9: 0](R/W) RX VMA loop control.
+ \<9:8\> = Parameter settling wait time.
+ \<7\> = Limit CTLE peak to max value.
+ \<6\> = Long reach enabled.
+ \<5\> = Short reach enabled.
+ \<4\> = Training done override enable.
+ \<3\> = Training done override value.
+ \<2:0\> = VMA clock modulation. */
+ uint64_t rx_vna_ctrl_18_16 : 3; /**< [ 12: 10](R/W) RX VMA loop control. */
+ uint64_t rx_fom_div_delta : 1; /**< [ 13: 13](R/W) TX figure of merit delta division-mode enable. */
+ uint64_t vma_fine_cfg_sel_ovrrd_val : 1;/**< [ 14: 14](R/W) Override value of VMA fine configuration selection.
+ 0 = Coarse mode.
+ 1 = Fine mode. */
+ uint64_t vma_fine_cfg_sel_ovrrd_en : 1;/**< [ 15: 15](R/W) Enable override of VMA fine configuration selection. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_vma_ctrl_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_vma_ctrl bdk_gserx_lanex_rx_vma_ctrl_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_VMA_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_VMA_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440200ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440200ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440200ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_VMA_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_VMA_CTRL(a,b) bdk_gserx_lanex_rx_vma_ctrl_t
+#define bustype_BDK_GSERX_LANEX_RX_VMA_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_VMA_CTRL(a,b) "GSERX_LANEX_RX_VMA_CTRL"
+#define device_bar_BDK_GSERX_LANEX_RX_VMA_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_VMA_CTRL(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_VMA_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_vma_status_0
+ *
+ * GSER Lane SerDes RX CDR Status 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_vma_status_0
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_vma_status_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t sds_pcs_rx_vma_status : 8; /**< [ 7: 0](RO/H) \<7\> = DFE powerdown.
+ \<6\> = Reserved.
+ \<5:2\> = CTLE Peak.
+ \<1:0\> = CTLE Pole. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_vma_status : 8; /**< [ 7: 0](RO/H) \<7\> = DFE powerdown.
+ \<6\> = Reserved.
+ \<5:2\> = CTLE Peak.
+ \<1:0\> = CTLE Pole. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_vma_status_0_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_vma_status_0 bdk_gserx_lanex_rx_vma_status_0_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_VMA_STATUS_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_VMA_STATUS_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904402b8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904402b8ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904402b8ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_VMA_STATUS_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_VMA_STATUS_0(a,b) bdk_gserx_lanex_rx_vma_status_0_t
+#define bustype_BDK_GSERX_LANEX_RX_VMA_STATUS_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_VMA_STATUS_0(a,b) "GSERX_LANEX_RX_VMA_STATUS_0"
+#define device_bar_BDK_GSERX_LANEX_RX_VMA_STATUS_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_VMA_STATUS_0(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_VMA_STATUS_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_rx_vma_status_1
+ *
+ * GSER Lane SerDes RX CDR Status 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_rx_vma_status_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_rx_vma_status_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t sds_pcs_rx_vma_status : 16; /**< [ 15: 0](RO/H) \<15:8\>: Output is controlled by GSER()_LANE()_RX_CFG_4[CFG_RX_ERRDET_CTRL]\<6:5\>:
+ 0x0 = Window counter\<19:12\> (VMA RAW FOM).
+ 0x1 = Window counter\<11:4\>.
+ 0x2 = CTLE (continuous time linear equalizer) pole, SDLL_IQ.
+ 0x3 = Pre-CTLE gain, CTLE Peak.
+
+ \<7\>: Training done.
+
+ \<6:4\>: Internal state machine delta.
+
+ \<3:0\>: Output is controlled by GSER()_LANE()_RX_CDR_CTRL_1[CDR phase offset override
+ enable]\<4\>:
+ 0x0 = DLL IQ Training value.
+ 0x1 = CDR Phase Offset. */
+#else /* Word 0 - Little Endian */
+ uint64_t sds_pcs_rx_vma_status : 16; /**< [ 15: 0](RO/H) \<15:8\>: Output is controlled by GSER()_LANE()_RX_CFG_4[CFG_RX_ERRDET_CTRL]\<6:5\>:
+ 0x0 = Window counter\<19:12\> (VMA RAW FOM).
+ 0x1 = Window counter\<11:4\>.
+ 0x2 = CTLE (continuous time linear equalizer) pole, SDLL_IQ.
+ 0x3 = Pre-CTLE gain, CTLE Peak.
+
+ \<7\>: Training done.
+
+ \<6:4\>: Internal state machine delta.
+
+ \<3:0\>: Output is controlled by GSER()_LANE()_RX_CDR_CTRL_1[CDR phase offset override
+ enable]\<4\>:
+ 0x0 = DLL IQ Training value.
+ 0x1 = CDR Phase Offset. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_rx_vma_status_1_s cn; */
+};
+typedef union bdk_gserx_lanex_rx_vma_status_1 bdk_gserx_lanex_rx_vma_status_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_RX_VMA_STATUS_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_RX_VMA_STATUS_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904402c0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904402c0ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904402c0ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_RX_VMA_STATUS_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_RX_VMA_STATUS_1(a,b) bdk_gserx_lanex_rx_vma_status_1_t
+#define bustype_BDK_GSERX_LANEX_RX_VMA_STATUS_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_RX_VMA_STATUS_1(a,b) "GSERX_LANEX_RX_VMA_STATUS_1"
+#define device_bar_BDK_GSERX_LANEX_RX_VMA_STATUS_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_RX_VMA_STATUS_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_RX_VMA_STATUS_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_sds_pin_mon_0
+ *
+ * GSER Lane SerDes Pin Monitor 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_sds_pin_mon_0
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_sds_pin_mon_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t pcs_sds_tx_widthsel : 2; /**< [ 9: 8](RO/H) TX parallel interface width settings (RAW PCS to
+ SerDes TX).
+ 0x0 = 8-bit raw data (not supported).
+ 0x1 = 10-bit raw data (not supported).
+ 0x2 = 16-bit raw data (not supported).
+ 0x3 = 20-bit raw data. */
+ uint64_t pcs_sds_rx_pcie_mode : 1; /**< [ 7: 7](RO/H) Selects between RX terminations:
+ 0x0 = pcs_sds_rx_terminate_to_vdda.
+ 0x1 = VSS. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t pcs_sds_rx_misc_ctrl_5 : 1; /**< [ 4: 4](RO/H) Not used. */
+ uint64_t tx_detrx_state : 2; /**< [ 3: 2](RO/H) RX detection state:
+ 0x0 = IDLE.
+ 0x1 = Charge Up.
+ 0x2 = Detection.
+ 0x3 = Restore common mode. */
+ uint64_t pcs_sds_tx_rx_detect_dis : 1;/**< [ 1: 1](RO/H) TX detect RX, mode disable. */
+ uint64_t pcs_sds_tx_detect_pulsen : 1;/**< [ 0: 0](RO/H) TX detect RX, pulse enable. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_tx_detect_pulsen : 1;/**< [ 0: 0](RO/H) TX detect RX, pulse enable. */
+ uint64_t pcs_sds_tx_rx_detect_dis : 1;/**< [ 1: 1](RO/H) TX detect RX, mode disable. */
+ uint64_t tx_detrx_state : 2; /**< [ 3: 2](RO/H) RX detection state:
+ 0x0 = IDLE.
+ 0x1 = Charge Up.
+ 0x2 = Detection.
+ 0x3 = Restore common mode. */
+ uint64_t pcs_sds_rx_misc_ctrl_5 : 1; /**< [ 4: 4](RO/H) Not used. */
+ uint64_t reserved_5_6 : 2;
+ uint64_t pcs_sds_rx_pcie_mode : 1; /**< [ 7: 7](RO/H) Selects between RX terminations:
+ 0x0 = pcs_sds_rx_terminate_to_vdda.
+ 0x1 = VSS. */
+ uint64_t pcs_sds_tx_widthsel : 2; /**< [ 9: 8](RO/H) TX parallel interface width settings (RAW PCS to
+ SerDes TX).
+ 0x0 = 8-bit raw data (not supported).
+ 0x1 = 10-bit raw data (not supported).
+ 0x2 = 16-bit raw data (not supported).
+ 0x3 = 20-bit raw data. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_sds_pin_mon_0_s cn; */
+};
+typedef union bdk_gserx_lanex_sds_pin_mon_0 bdk_gserx_lanex_sds_pin_mon_0_t;
+
+static inline uint64_t BDK_GSERX_LANEX_SDS_PIN_MON_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_SDS_PIN_MON_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440130ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440130ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440130ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_SDS_PIN_MON_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_SDS_PIN_MON_0(a,b) bdk_gserx_lanex_sds_pin_mon_0_t
+#define bustype_BDK_GSERX_LANEX_SDS_PIN_MON_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_SDS_PIN_MON_0(a,b) "GSERX_LANEX_SDS_PIN_MON_0"
+#define device_bar_BDK_GSERX_LANEX_SDS_PIN_MON_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_SDS_PIN_MON_0(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_SDS_PIN_MON_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_sds_pin_mon_1
+ *
+ * GSER Lane SerDes Pin Monitor 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_sds_pin_mon_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_sds_pin_mon_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t pcs_sds_rx_chpd : 1; /**< [ 15: 15](RO/H) RX channel powerdown signal. */
+ uint64_t pcs_sds_rx_eie_en : 1; /**< [ 14: 14](RO/H) Enable for electrical idle detection circuit
+ in SerDes RX. */
+ uint64_t reserved_13 : 1;
+ uint64_t pcs_sds_ln_loopback_mode : 1;/**< [ 12: 12](RO/H) TX to RX on chip loopback control signal. */
+ uint64_t pcs_sds_tx_chpd : 1; /**< [ 11: 11](RO/H) TX channel powerdown signal. */
+ uint64_t pcs_sds_rx_widthsel : 2; /**< [ 10: 9](RO/H) Width select.
+ 0x0 = 8-bit raw data.
+ 0x1 = 10-bit raw data.
+ 0x2 = 16-bit raw data.
+ 0x3 = 20-bit raw data. */
+ uint64_t reserved_8 : 1;
+ uint64_t pcs_sds_tx_resetn : 1; /**< [ 7: 7](RO/H) TX reset, active low (RAW PCS output to lane TX). */
+ uint64_t pcs_sds_tx_tristate_en : 1; /**< [ 6: 6](RO/H) TX driver tristate enable (RAW PCS output to lane TX). */
+ uint64_t pcs_sds_tx_swing : 5; /**< [ 5: 1](RO/H) TX swing (RAW PCS output to lane TX). */
+ uint64_t pcs_sds_tx_elec_idle : 1; /**< [ 0: 0](RO/H) TX electrical idle control (RAW PCS output to lane TX). */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_tx_elec_idle : 1; /**< [ 0: 0](RO/H) TX electrical idle control (RAW PCS output to lane TX). */
+ uint64_t pcs_sds_tx_swing : 5; /**< [ 5: 1](RO/H) TX swing (RAW PCS output to lane TX). */
+ uint64_t pcs_sds_tx_tristate_en : 1; /**< [ 6: 6](RO/H) TX driver tristate enable (RAW PCS output to lane TX). */
+ uint64_t pcs_sds_tx_resetn : 1; /**< [ 7: 7](RO/H) TX reset, active low (RAW PCS output to lane TX). */
+ uint64_t reserved_8 : 1;
+ uint64_t pcs_sds_rx_widthsel : 2; /**< [ 10: 9](RO/H) Width select.
+ 0x0 = 8-bit raw data.
+ 0x1 = 10-bit raw data.
+ 0x2 = 16-bit raw data.
+ 0x3 = 20-bit raw data. */
+ uint64_t pcs_sds_tx_chpd : 1; /**< [ 11: 11](RO/H) TX channel powerdown signal. */
+ uint64_t pcs_sds_ln_loopback_mode : 1;/**< [ 12: 12](RO/H) TX to RX on chip loopback control signal. */
+ uint64_t reserved_13 : 1;
+ uint64_t pcs_sds_rx_eie_en : 1; /**< [ 14: 14](RO/H) Enable for electrical idle detection circuit
+ in SerDes RX. */
+ uint64_t pcs_sds_rx_chpd : 1; /**< [ 15: 15](RO/H) RX channel powerdown signal. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_sds_pin_mon_1_s cn; */
+};
+typedef union bdk_gserx_lanex_sds_pin_mon_1 bdk_gserx_lanex_sds_pin_mon_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_SDS_PIN_MON_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_SDS_PIN_MON_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440138ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440138ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440138ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_SDS_PIN_MON_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_SDS_PIN_MON_1(a,b) bdk_gserx_lanex_sds_pin_mon_1_t
+#define bustype_BDK_GSERX_LANEX_SDS_PIN_MON_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_SDS_PIN_MON_1(a,b) "GSERX_LANEX_SDS_PIN_MON_1"
+#define device_bar_BDK_GSERX_LANEX_SDS_PIN_MON_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_SDS_PIN_MON_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_SDS_PIN_MON_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_sds_pin_mon_2
+ *
+ * GSER Lane SerDes Pin Monitor 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lanex_sds_pin_mon_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_sds_pin_mon_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t pcs_sds_tx_vboost_en : 1; /**< [ 10: 10](RO/H) TX boost enable. */
+ uint64_t pcs_sds_tx_turbos_en : 1; /**< [ 9: 9](RO/H) TX turbo mode enable signal, increases swing of TX
+ through current mode. */
+ uint64_t pcs_sds_premptap : 9; /**< [ 8: 0](RO/H) Preemphasis control.
+ \<8:4\> = Postcursor.
+ \<3:0\> = Precursor. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_premptap : 9; /**< [ 8: 0](RO/H) Preemphasis control.
+ \<8:4\> = Postcursor.
+ \<3:0\> = Precursor. */
+ uint64_t pcs_sds_tx_turbos_en : 1; /**< [ 9: 9](RO/H) TX turbo mode enable signal, increases swing of TX
+ through current mode. */
+ uint64_t pcs_sds_tx_vboost_en : 1; /**< [ 10: 10](RO/H) TX boost enable. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_sds_pin_mon_2_s cn; */
+};
+typedef union bdk_gserx_lanex_sds_pin_mon_2 bdk_gserx_lanex_sds_pin_mon_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_SDS_PIN_MON_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_SDS_PIN_MON_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090440140ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e090440140ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e090440140ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_SDS_PIN_MON_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_SDS_PIN_MON_2(a,b) bdk_gserx_lanex_sds_pin_mon_2_t
+#define bustype_BDK_GSERX_LANEX_SDS_PIN_MON_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_SDS_PIN_MON_2(a,b) "GSERX_LANEX_SDS_PIN_MON_2"
+#define device_bar_BDK_GSERX_LANEX_SDS_PIN_MON_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_SDS_PIN_MON_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_SDS_PIN_MON_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_tx_cfg_0
+ *
+ * GSER Lane TX Configuration 0 Register
+ * These registers are reset by hardware only during chip cold reset. The
+ * values of the CSR fields in these registers do not change during chip
+ * warm or soft resets.
+ */
+union bdk_gserx_lanex_tx_cfg_0
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_tx_cfg_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t tx_tristate_en_ovrrd_val : 1;/**< [ 15: 15](R/W) TX termination high-Z enable. Override value when
+ GSER()_LANE()_PWR_CTRL[TX_TRISTATE_EN_OVRRD_EN] is set. */
+ uint64_t tx_chpd_ovrrd_val : 1; /**< [ 14: 14](R/W) TX lane power down. Active high. Override value when
+ GSER()_LANE()_PWR_CTRL[TX_PD_OVRRD_EN] is set. */
+ uint64_t reserved_10_13 : 4;
+ uint64_t tx_resetn_ovrrd_val : 1; /**< [ 9: 9](R/W) TX P2S reset. Active high. Override value when
+ GSER()_LANE()_PWR_CTRL[TX_P2S_RESET_OVRRD_EN] is set. */
+ uint64_t tx_cm_mode : 1; /**< [ 8: 8](R/W/H) Assert to enable fast common-mode charge up. For simulation purposes only. */
+ uint64_t cfg_tx_swing : 5; /**< [ 7: 3](R/W) TX output swing control.
+ Default swing encoding when GSER()_LANE()_TX_CFG_1[TX_SWING_OVRRD_EN] is
+ asserted.
+
+ It is recommended to not use the GSER()_LANE()_TX_CFG_0[CFG_TX_SWING],
+ GSER()_LANE()_TX_CFG_1[TX_SWING_OVRRD_EN,TX_PREMPTAP_OVRRD_VAL], or
+ GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP] override registers for 10BASE-KR
+ or PCIe links in which the transmitter is adapted by the respective
+ hardware-controlled link training protocols.
+
+ The [CFG_TX_SWING] value for transmitter swing should be derived from
+ signal integrity simulations with IBIS-AMI models supplied by Cavium.
+
+ A transmit swing change should be followed by a control interface configuration
+ over-ride to force the new setting - see
+ GSER()_LANE()_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ]. */
+ uint64_t fast_rdet_mode : 1; /**< [ 2: 2](R/W/H) Assert to enable fast RX detection. For simulation purposes only. */
+ uint64_t fast_tristate_mode : 1; /**< [ 1: 1](R/W/H) Assert to enable fast tristate power up. For simulation purposes only. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t fast_tristate_mode : 1; /**< [ 1: 1](R/W/H) Assert to enable fast tristate power up. For simulation purposes only. */
+ uint64_t fast_rdet_mode : 1; /**< [ 2: 2](R/W/H) Assert to enable fast RX detection. For simulation purposes only. */
+ uint64_t cfg_tx_swing : 5; /**< [ 7: 3](R/W) TX output swing control.
+ Default swing encoding when GSER()_LANE()_TX_CFG_1[TX_SWING_OVRRD_EN] is
+ asserted.
+
+ It is recommended to not use the GSER()_LANE()_TX_CFG_0[CFG_TX_SWING],
+ GSER()_LANE()_TX_CFG_1[TX_SWING_OVRRD_EN,TX_PREMPTAP_OVRRD_VAL], or
+ GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP] override registers for 10BASE-KR
+ or PCIe links in which the transmitter is adapted by the respective
+ hardware-controlled link training protocols.
+
+ The [CFG_TX_SWING] value for transmitter swing should be derived from
+ signal integrity simulations with IBIS-AMI models supplied by Cavium.
+
+ A transmit swing change should be followed by a control interface configuration
+ over-ride to force the new setting - see
+ GSER()_LANE()_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ]. */
+ uint64_t tx_cm_mode : 1; /**< [ 8: 8](R/W/H) Assert to enable fast common-mode charge up. For simulation purposes only. */
+ uint64_t tx_resetn_ovrrd_val : 1; /**< [ 9: 9](R/W) TX P2S reset. Active high. Override value when
+ GSER()_LANE()_PWR_CTRL[TX_P2S_RESET_OVRRD_EN] is set. */
+ uint64_t reserved_10_13 : 4;
+ uint64_t tx_chpd_ovrrd_val : 1; /**< [ 14: 14](R/W) TX lane power down. Active high. Override value when
+ GSER()_LANE()_PWR_CTRL[TX_PD_OVRRD_EN] is set. */
+ uint64_t tx_tristate_en_ovrrd_val : 1;/**< [ 15: 15](R/W) TX termination high-Z enable. Override value when
+ GSER()_LANE()_PWR_CTRL[TX_TRISTATE_EN_OVRRD_EN] is set. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_tx_cfg_0_s cn; */
+};
+typedef union bdk_gserx_lanex_tx_cfg_0 bdk_gserx_lanex_tx_cfg_0_t;
+
+static inline uint64_t BDK_GSERX_LANEX_TX_CFG_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_TX_CFG_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904400a8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904400a8ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904400a8ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_TX_CFG_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_TX_CFG_0(a,b) bdk_gserx_lanex_tx_cfg_0_t
+#define bustype_BDK_GSERX_LANEX_TX_CFG_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_TX_CFG_0(a,b) "GSERX_LANEX_TX_CFG_0"
+#define device_bar_BDK_GSERX_LANEX_TX_CFG_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_TX_CFG_0(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_TX_CFG_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_tx_cfg_1
+ *
+ * GSER Lane TX Configuration 1 Register
+ * These registers are reset by hardware only during chip cold reset. The
+ * values of the CSR fields in these registers do not change during chip
+ * warm or soft resets.
+ */
+union bdk_gserx_lanex_tx_cfg_1
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_tx_cfg_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t tx_widthsel_ovrrd_en : 1; /**< [ 14: 14](R/W) Override enable for pcs_sds_txX_widthsel, TX parallel interface width setting. */
+ uint64_t tx_widthsel_ovrrd_val : 2; /**< [ 13: 12](R/W) Override value for pcs_sds_widthsel, TX parallel interface width setting.
+ 0x0 = 8-bit (not supported).
+ 0x1 = 10-bit (not supported).
+ 0x2 = 16-bit (for PCIe Gen3 8Gb only).
+ 0x3 = 20-bit. */
+ uint64_t tx_vboost_en_ovrrd_en : 1; /**< [ 11: 11](R/W) Override enable for pcs_sds_txX_vboost_en, TX vboost mode enable. */
+ uint64_t tx_turbo_en_ovrrd_en : 1; /**< [ 10: 10](R/W) Override enable for pcs_sds_txX_turbo_en, Turbo mode enable. */
+ uint64_t tx_swing_ovrrd_en : 1; /**< [ 9: 9](R/W) Override enable for pcs_sds_txX_swing, TX swing. See
+ GSER()_LANE()_TX_CFG_0[CFG_TX_SWING].
+
+ It is recommended to not use the GSER()_LANE()_TX_CFG_0[CFG_TX_SWING],
+ GSER()_LANE()_TX_CFG_1[TX_SWING_OVRRD_EN,TX_PREMPTAP_OVRRD_VAL], or
+ GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP] override registers for 10BASE-KR
+ or PCIe links in which the transmitter is adapted by the respective
+ hardware-controlled link training protocols.
+
+ A transmit swing change should be followed by a control interface
+ configuration over-ride to force the new setting - see
+ GSER()_LANE()_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ]. */
+ uint64_t tx_premptap_ovrrd_val : 1; /**< [ 8: 8](R/W) Override enable for pcs_sds_txX_preemptap, preemphasis control. When
+ over-riding, [TX_PREMPTAP_OVRRD_VAL] should be set and
+ GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP] has the precursor and
+ postcursor values.
+
+ It is recommended to not use the GSER()_LANE()_TX_CFG_0[CFG_TX_SWING],
+ GSER()_LANE()_TX_CFG_1[TX_SWING_OVRRD_EN,TX_PREMPTAP_OVRRD_VAL], or
+ GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP] override registers for 10BASE-KR
+ or PCIe links in which the transmitter is adapted by the respective
+ hardware-controlled link training protocols.
+
+ A preemphasis control change should be followed by a control
+ interface configuration override to force the new setting - see
+ GSER()_LANE()_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ]. */
+ uint64_t tx_elec_idle_ovrrd_en : 1; /**< [ 7: 7](R/W) Override enable for pcs_sds_txX_elec_idle, TX electrical idle. */
+ uint64_t smpl_rate_ovrrd_en : 1; /**< [ 6: 6](R/W) Override enable for TX power state machine sample rate. When asserted, the TX sample is
+ specified from SMPL_RATE_OVRRD_VAL and the TX Power state machine control signal is
+ ignored. */
+ uint64_t smpl_rate_ovrrd_val : 3; /**< [ 5: 3](R/W) Specifies the sample rate (strobe assertion) relative to mac_pcs_txX_clk when
+ SMPL_RATE_OVRRD_EN is asserted.
+ 0x0 = full rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate.
+ 0x4 = 1/16 data rate.
+ 0x5-7 = Reserved. */
+ uint64_t tx_datarate_ovrrd_en : 1; /**< [ 2: 2](R/W) Override enable for RX power state machine data rate signal. When set, rx_datarate is
+ specified from [TX_DATARATE_OVRRD_VAL] and the RX power state machine control signal is
+ ignored. */
+ uint64_t tx_datarate_ovrrd_val : 2; /**< [ 1: 0](R/W) Specifies the TX data rate when TX_DATARATE_OVRRD_EN is asserted.
+ 0x0 = full rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_datarate_ovrrd_val : 2; /**< [ 1: 0](R/W) Specifies the TX data rate when TX_DATARATE_OVRRD_EN is asserted.
+ 0x0 = full rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate. */
+ uint64_t tx_datarate_ovrrd_en : 1; /**< [ 2: 2](R/W) Override enable for RX power state machine data rate signal. When set, rx_datarate is
+ specified from [TX_DATARATE_OVRRD_VAL] and the RX power state machine control signal is
+ ignored. */
+ uint64_t smpl_rate_ovrrd_val : 3; /**< [ 5: 3](R/W) Specifies the sample rate (strobe assertion) relative to mac_pcs_txX_clk when
+ SMPL_RATE_OVRRD_EN is asserted.
+ 0x0 = full rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate.
+ 0x4 = 1/16 data rate.
+ 0x5-7 = Reserved. */
+ uint64_t smpl_rate_ovrrd_en : 1; /**< [ 6: 6](R/W) Override enable for TX power state machine sample rate. When asserted, the TX sample is
+ specified from SMPL_RATE_OVRRD_VAL and the TX Power state machine control signal is
+ ignored. */
+ uint64_t tx_elec_idle_ovrrd_en : 1; /**< [ 7: 7](R/W) Override enable for pcs_sds_txX_elec_idle, TX electrical idle. */
+ uint64_t tx_premptap_ovrrd_val : 1; /**< [ 8: 8](R/W) Override enable for pcs_sds_txX_preemptap, preemphasis control. When
+ over-riding, [TX_PREMPTAP_OVRRD_VAL] should be set and
+ GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP] has the precursor and
+ postcursor values.
+
+ It is recommended to not use the GSER()_LANE()_TX_CFG_0[CFG_TX_SWING],
+ GSER()_LANE()_TX_CFG_1[TX_SWING_OVRRD_EN,TX_PREMPTAP_OVRRD_VAL], or
+ GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP] override registers for 10BASE-KR
+ or PCIe links in which the transmitter is adapted by the respective
+ hardware-controlled link training protocols.
+
+ A preemphasis control change should be followed by a control
+ interface configuration override to force the new setting - see
+ GSER()_LANE()_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ]. */
+ uint64_t tx_swing_ovrrd_en : 1; /**< [ 9: 9](R/W) Override enable for pcs_sds_txX_swing, TX swing. See
+ GSER()_LANE()_TX_CFG_0[CFG_TX_SWING].
+
+ It is recommended to not use the GSER()_LANE()_TX_CFG_0[CFG_TX_SWING],
+ GSER()_LANE()_TX_CFG_1[TX_SWING_OVRRD_EN,TX_PREMPTAP_OVRRD_VAL], or
+ GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP] override registers for 10BASE-KR
+ or PCIe links in which the transmitter is adapted by the respective
+ hardware-controlled link training protocols.
+
+ A transmit swing change should be followed by a control interface
+ configuration over-ride to force the new setting - see
+ GSER()_LANE()_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ]. */
+ uint64_t tx_turbo_en_ovrrd_en : 1; /**< [ 10: 10](R/W) Override enable for pcs_sds_txX_turbo_en, Turbo mode enable. */
+ uint64_t tx_vboost_en_ovrrd_en : 1; /**< [ 11: 11](R/W) Override enable for pcs_sds_txX_vboost_en, TX vboost mode enable. */
+ uint64_t tx_widthsel_ovrrd_val : 2; /**< [ 13: 12](R/W) Override value for pcs_sds_widthsel, TX parallel interface width setting.
+ 0x0 = 8-bit (not supported).
+ 0x1 = 10-bit (not supported).
+ 0x2 = 16-bit (for PCIe Gen3 8Gb only).
+ 0x3 = 20-bit. */
+ uint64_t tx_widthsel_ovrrd_en : 1; /**< [ 14: 14](R/W) Override enable for pcs_sds_txX_widthsel, TX parallel interface width setting. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_tx_cfg_1_s cn; */
+};
+typedef union bdk_gserx_lanex_tx_cfg_1 bdk_gserx_lanex_tx_cfg_1_t;
+
+static inline uint64_t BDK_GSERX_LANEX_TX_CFG_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_TX_CFG_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904400b0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904400b0ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904400b0ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_TX_CFG_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_TX_CFG_1(a,b) bdk_gserx_lanex_tx_cfg_1_t
+#define bustype_BDK_GSERX_LANEX_TX_CFG_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_TX_CFG_1(a,b) "GSERX_LANEX_TX_CFG_1"
+#define device_bar_BDK_GSERX_LANEX_TX_CFG_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_TX_CFG_1(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_TX_CFG_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_tx_cfg_2
+ *
+ * GSER Lane TX Configuration 2 Register
+ * These registers are for diagnostic use only. These registers are reset by hardware only during
+ * chip cold reset. The values of the CSR fields in these registers do not change during chip
+ * warm or soft resets.
+ */
+union bdk_gserx_lanex_tx_cfg_2
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_tx_cfg_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t pcs_sds_tx_dcc_en : 1; /**< [ 15: 15](R/W) DCC enable. */
+ uint64_t reserved_3_14 : 12;
+ uint64_t rcvr_test_ovrrd_en : 1; /**< [ 2: 2](R/W) Override RX detect disable and test pulse. */
+ uint64_t rcvr_test_ovrrd_val : 1; /**< [ 1: 1](R/W) Override value for RX detect test pulse; used to create a pulse during which the receiver
+ detect test operation is performed. */
+ uint64_t tx_rx_detect_dis_ovrrd_val : 1;/**< [ 0: 0](R/W) Override value of RX detect disable. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_rx_detect_dis_ovrrd_val : 1;/**< [ 0: 0](R/W) Override value of RX detect disable. */
+ uint64_t rcvr_test_ovrrd_val : 1; /**< [ 1: 1](R/W) Override value for RX detect test pulse; used to create a pulse during which the receiver
+ detect test operation is performed. */
+ uint64_t rcvr_test_ovrrd_en : 1; /**< [ 2: 2](R/W) Override RX detect disable and test pulse. */
+ uint64_t reserved_3_14 : 12;
+ uint64_t pcs_sds_tx_dcc_en : 1; /**< [ 15: 15](R/W) DCC enable. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_tx_cfg_2_s cn; */
+};
+typedef union bdk_gserx_lanex_tx_cfg_2 bdk_gserx_lanex_tx_cfg_2_t;
+
+static inline uint64_t BDK_GSERX_LANEX_TX_CFG_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_TX_CFG_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904400b8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904400b8ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904400b8ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_TX_CFG_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_TX_CFG_2(a,b) bdk_gserx_lanex_tx_cfg_2_t
+#define bustype_BDK_GSERX_LANEX_TX_CFG_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_TX_CFG_2(a,b) "GSERX_LANEX_TX_CFG_2"
+#define device_bar_BDK_GSERX_LANEX_TX_CFG_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_TX_CFG_2(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_TX_CFG_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_tx_cfg_3
+ *
+ * GSER Lane TX Configuration 3 Register
+ * These registers are for diagnostic use only. These registers are reset by hardware only during
+ * chip cold reset. The values of the CSR fields in these registers do not change during chip
+ * warm or soft resets.
+ */
+union bdk_gserx_lanex_tx_cfg_3
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_tx_cfg_3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t cfg_tx_vboost_en : 1; /**< [ 14: 14](R/W) Specifies the value of TX VBoost enable when
+ GSER()_LANE()_TX_CFG_1[TX_VBOOST_EN_OVRRD_EN] is asserted. */
+ uint64_t reserved_7_13 : 7;
+ uint64_t pcs_sds_tx_gain : 3; /**< [ 6: 4](R/W/H) TX gain. For debug use only. */
+ uint64_t pcs_sds_tx_srate_sel : 3; /**< [ 3: 1](R/W/H) Reserved. */
+ uint64_t cfg_tx_turbo_en : 1; /**< [ 0: 0](R/W) Specifies value of TX turbo enable when GSER()_LANE()_TX_CFG_1[TX_TURBO_EN] is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_tx_turbo_en : 1; /**< [ 0: 0](R/W) Specifies value of TX turbo enable when GSER()_LANE()_TX_CFG_1[TX_TURBO_EN] is set. */
+ uint64_t pcs_sds_tx_srate_sel : 3; /**< [ 3: 1](R/W/H) Reserved. */
+ uint64_t pcs_sds_tx_gain : 3; /**< [ 6: 4](R/W/H) TX gain. For debug use only. */
+ uint64_t reserved_7_13 : 7;
+ uint64_t cfg_tx_vboost_en : 1; /**< [ 14: 14](R/W) Specifies the value of TX VBoost enable when
+ GSER()_LANE()_TX_CFG_1[TX_VBOOST_EN_OVRRD_EN] is asserted. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_tx_cfg_3_s cn; */
+};
+typedef union bdk_gserx_lanex_tx_cfg_3 bdk_gserx_lanex_tx_cfg_3_t;
+
+static inline uint64_t BDK_GSERX_LANEX_TX_CFG_3(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_TX_CFG_3(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904400c0ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904400c0ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904400c0ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_TX_CFG_3", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_TX_CFG_3(a,b) bdk_gserx_lanex_tx_cfg_3_t
+#define bustype_BDK_GSERX_LANEX_TX_CFG_3(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_TX_CFG_3(a,b) "GSERX_LANEX_TX_CFG_3"
+#define device_bar_BDK_GSERX_LANEX_TX_CFG_3(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_TX_CFG_3(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_TX_CFG_3(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane#_tx_pre_emphasis
+ *
+ * GSER Lane TX Configuration Preemphasis Register
+ * These registers are reset by hardware only during chip cold reset. The
+ * values of the CSR fields in these registers do not change during chip
+ * warm or soft resets.
+ */
+union bdk_gserx_lanex_tx_pre_emphasis
+{
+ uint64_t u;
+ struct bdk_gserx_lanex_tx_pre_emphasis_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t cfg_tx_premptap : 9; /**< [ 8: 0](R/W) Override preemphasis control. Applies when
+ GSER()_LANE()_TX_CFG_1[TX_PREMPTAP_OVRRD_VAL] is asserted.
+ \<8:4\> = Postcursor.
+ \<3:0\> = Precursor.
+
+ It is recommended to not use the GSER()_LANE()_TX_CFG_0[CFG_TX_SWING],
+ GSER()_LANE()_TX_CFG_1[TX_SWING_OVRRD_EN,TX_PREMPTAP_OVRRD_VAL], or
+ GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP] override registers for 10BASE-KR
+ or PCIe links in which the transmitter is adapted by the respective
+ hardware-controlled link training protocols.
+
+ The [CFG_TX_PREEMPTAP] value for transmitter preemphasis and
+ postemphasis should be derived from signal integrity simulations
+ with IBIS-AMI models supplied by Cavium.
+
+ A preemphasis control change should be followed by a control interface
+ configuration over-ride to force the new setting - see
+ GSER()_LANE()_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_tx_premptap : 9; /**< [ 8: 0](R/W) Override preemphasis control. Applies when
+ GSER()_LANE()_TX_CFG_1[TX_PREMPTAP_OVRRD_VAL] is asserted.
+ \<8:4\> = Postcursor.
+ \<3:0\> = Precursor.
+
+ It is recommended to not use the GSER()_LANE()_TX_CFG_0[CFG_TX_SWING],
+ GSER()_LANE()_TX_CFG_1[TX_SWING_OVRRD_EN,TX_PREMPTAP_OVRRD_VAL], or
+ GSER()_LANE()_TX_PRE_EMPHASIS[CFG_TX_PREMPTAP] override registers for 10BASE-KR
+ or PCIe links in which the transmitter is adapted by the respective
+ hardware-controlled link training protocols.
+
+ The [CFG_TX_PREEMPTAP] value for transmitter preemphasis and
+ postemphasis should be derived from signal integrity simulations
+ with IBIS-AMI models supplied by Cavium.
+
+ A preemphasis control change should be followed by a control interface
+ configuration over-ride to force the new setting - see
+ GSER()_LANE()_PCS_CTLIFC_2[CTLIFC_OVRRD_REQ]. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lanex_tx_pre_emphasis_s cn; */
+};
+typedef union bdk_gserx_lanex_tx_pre_emphasis bdk_gserx_lanex_tx_pre_emphasis_t;
+
+static inline uint64_t BDK_GSERX_LANEX_TX_PRE_EMPHASIS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANEX_TX_PRE_EMPHASIS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e0904400c8ll + 0x1000000ll * ((a) & 0x3) + 0x100000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3)))
+ return 0x87e0904400c8ll + 0x1000000ll * ((a) & 0x7) + 0x100000ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3)))
+ return 0x87e0904400c8ll + 0x1000000ll * ((a) & 0xf) + 0x100000ll * ((b) & 0x3);
+ __bdk_csr_fatal("GSERX_LANEX_TX_PRE_EMPHASIS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANEX_TX_PRE_EMPHASIS(a,b) bdk_gserx_lanex_tx_pre_emphasis_t
+#define bustype_BDK_GSERX_LANEX_TX_PRE_EMPHASIS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANEX_TX_PRE_EMPHASIS(a,b) "GSERX_LANEX_TX_PRE_EMPHASIS"
+#define device_bar_BDK_GSERX_LANEX_TX_PRE_EMPHASIS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANEX_TX_PRE_EMPHASIS(a,b) (a)
+#define arguments_BDK_GSERX_LANEX_TX_PRE_EMPHASIS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane_lpbken
+ *
+ * GSER Lane Loopback Enable Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_lpbken
+{
+ uint64_t u;
+ struct bdk_gserx_lane_lpbken_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lpbken : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode. When asserted in P0 state,
+ allows per-lane TX-to-RX serial loopback activation.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t lpbken : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode. When asserted in P0 state,
+ allows per-lane TX-to-RX serial loopback activation.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_lpbken_s cn81xx; */
+ struct bdk_gserx_lane_lpbken_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lpbken : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode (including all CCPI links). When asserted in
+ P0
+ state,
+ allows per-lane TX-to-RX serial loopback activation.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t lpbken : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode (including all CCPI links). When asserted in
+ P0
+ state,
+ allows per-lane TX-to-RX serial loopback activation.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_lane_lpbken_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lpbken : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode. When asserted in
+ P0 state, allows per lane TX-to-RX serial loopback activation.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t lpbken : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode. When asserted in
+ P0 state, allows per lane TX-to-RX serial loopback activation.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_lane_lpbken bdk_gserx_lane_lpbken_t;
+
+static inline uint64_t BDK_GSERX_LANE_LPBKEN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_LPBKEN(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000110ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000110ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000110ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_LPBKEN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_LPBKEN(a) bdk_gserx_lane_lpbken_t
+#define bustype_BDK_GSERX_LANE_LPBKEN(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_LPBKEN(a) "GSERX_LANE_LPBKEN"
+#define device_bar_BDK_GSERX_LANE_LPBKEN(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_LPBKEN(a) (a)
+#define arguments_BDK_GSERX_LANE_LPBKEN(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_lane_mode
+ *
+ * GSER Lane Mode Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_mode
+{
+ uint64_t u;
+ struct bdk_gserx_lane_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmode : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode, used to index into the PHY
+ table to select electrical specs and link rate. Note that the PHY table can be modified
+ such that any supported link rate can be derived regardless of the configured LMODE.
+
+ 0x0: R_25G_REFCLK100.
+ 0x1: R_5G_REFCLK100.
+ 0x2: R_8G_REFCLK100.
+ 0x3: R_125G_REFCLK15625_KX (not supported).
+ 0x4: R_3125G_REFCLK15625_XAUI.
+ 0x5: R_103125G_REFCLK15625_KR.
+ 0x6: R_125G_REFCLK15625_SGMII.
+ 0x7: R_5G_REFCLK15625_QSGMII.
+ 0x8: R_625G_REFCLK15625_RXAUI.
+ 0x9: R_25G_REFCLK125.
+ 0xA: R_5G_REFCLK125.
+ 0xB: R_8G_REFCLK125.
+ 0xC - 0xF: Reserved.
+
+ This register is not used for PCIE configurations. For BGX links, this register
+ defaults to R_625G_REFCLK15625_RXAUI.
+
+ It is recommended that the PHY be in reset when reconfiguring the [LMODE]
+ (GSER()_PHY_CTL[PHY_RESET] is set).
+
+ Once the [LMODE] has been configured, and the PHY is out of reset, the table entries for
+ the
+ selected [LMODE] must be updated to reflect the reference clock speed. Refer to the
+ register
+ description and index into the table using the rate and reference speed to obtain the
+ recommended values.
+
+ _ Write GSER()_PLL_P()_MODE_0.
+ _ Write GSER()_PLL_P()_MODE_1.
+ _ Write GSER()_LANE_P()_MODE_0.
+ _ Write GSER()_LANE_P()_MODE_1.
+
+ where in "P(z)", z equals [LMODE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t lmode : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode, used to index into the PHY
+ table to select electrical specs and link rate. Note that the PHY table can be modified
+ such that any supported link rate can be derived regardless of the configured LMODE.
+
+ 0x0: R_25G_REFCLK100.
+ 0x1: R_5G_REFCLK100.
+ 0x2: R_8G_REFCLK100.
+ 0x3: R_125G_REFCLK15625_KX (not supported).
+ 0x4: R_3125G_REFCLK15625_XAUI.
+ 0x5: R_103125G_REFCLK15625_KR.
+ 0x6: R_125G_REFCLK15625_SGMII.
+ 0x7: R_5G_REFCLK15625_QSGMII.
+ 0x8: R_625G_REFCLK15625_RXAUI.
+ 0x9: R_25G_REFCLK125.
+ 0xA: R_5G_REFCLK125.
+ 0xB: R_8G_REFCLK125.
+ 0xC - 0xF: Reserved.
+
+ This register is not used for PCIE configurations. For BGX links, this register
+ defaults to R_625G_REFCLK15625_RXAUI.
+
+ It is recommended that the PHY be in reset when reconfiguring the [LMODE]
+ (GSER()_PHY_CTL[PHY_RESET] is set).
+
+ Once the [LMODE] has been configured, and the PHY is out of reset, the table entries for
+ the
+ selected [LMODE] must be updated to reflect the reference clock speed. Refer to the
+ register
+ description and index into the table using the rate and reference speed to obtain the
+ recommended values.
+
+ _ Write GSER()_PLL_P()_MODE_0.
+ _ Write GSER()_PLL_P()_MODE_1.
+ _ Write GSER()_LANE_P()_MODE_0.
+ _ Write GSER()_LANE_P()_MODE_1.
+
+ where in "P(z)", z equals [LMODE]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_mode_s cn81xx; */
+ struct bdk_gserx_lane_mode_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmode : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode (including all CCPI links), used to index into
+ the PHY
+ table to select electrical specs and link rate. Note that the PHY table can be modified
+ such that any supported link rate can be derived regardless of the configured LMODE.
+
+ 0x0: R_25G_REFCLK100.
+ 0x1: R_5G_REFCLK100.
+ 0x2: R_8G_REFCLK100.
+ 0x3: R_125G_REFCLK15625_KX (not supported).
+ 0x4: R_3125G_REFCLK15625_XAUI.
+ 0x5: R_103125G_REFCLK15625_KR.
+ 0x6: R_125G_REFCLK15625_SGMII.
+ 0x7: R_5G_REFCLK15625_QSGMII (not supported).
+ 0x8: R_625G_REFCLK15625_RXAUI.
+ 0x9: R_25G_REFCLK125.
+ 0xA: R_5G_REFCLK125.
+ 0xB: R_8G_REFCLK125.
+ 0xC - 0xF: Reserved.
+
+ This register is not used for PCIE configurations. For non-CCPI links, this register
+ defaults to R_625G_REFCLK15625_RXAUI. For CCPI links, the value is mapped at reset from
+ the
+ GSER()_SPD and the appropriate table updates are performed so the rate is obtained for the
+ particular reference clock.
+
+ It is recommended that the PHY be in reset when reconfiguring the [LMODE]
+ (GSER()_PHY_CTL[PHY_RESET] is set).
+
+ Once the [LMODE] has been configured, and the PHY is out of reset, the table entries for
+ the
+ selected [LMODE] must be updated to reflect the reference clock speed. Refer to the
+ register
+ description and index into the table using the rate and reference speed to obtain the
+ recommended values.
+
+ _ Write GSER()_PLL_P()_MODE_0.
+ _ Write GSER()_PLL_P()_MODE_1.
+ _ Write GSER()_LANE_P()_MODE_0.
+ _ Write GSER()_LANE_P()_MODE_1.
+
+ where in "P(z)", z equals [LMODE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t lmode : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode (including all CCPI links), used to index into
+ the PHY
+ table to select electrical specs and link rate. Note that the PHY table can be modified
+ such that any supported link rate can be derived regardless of the configured LMODE.
+
+ 0x0: R_25G_REFCLK100.
+ 0x1: R_5G_REFCLK100.
+ 0x2: R_8G_REFCLK100.
+ 0x3: R_125G_REFCLK15625_KX (not supported).
+ 0x4: R_3125G_REFCLK15625_XAUI.
+ 0x5: R_103125G_REFCLK15625_KR.
+ 0x6: R_125G_REFCLK15625_SGMII.
+ 0x7: R_5G_REFCLK15625_QSGMII (not supported).
+ 0x8: R_625G_REFCLK15625_RXAUI.
+ 0x9: R_25G_REFCLK125.
+ 0xA: R_5G_REFCLK125.
+ 0xB: R_8G_REFCLK125.
+ 0xC - 0xF: Reserved.
+
+ This register is not used for PCIE configurations. For non-CCPI links, this register
+ defaults to R_625G_REFCLK15625_RXAUI. For CCPI links, the value is mapped at reset from
+ the
+ GSER()_SPD and the appropriate table updates are performed so the rate is obtained for the
+ particular reference clock.
+
+ It is recommended that the PHY be in reset when reconfiguring the [LMODE]
+ (GSER()_PHY_CTL[PHY_RESET] is set).
+
+ Once the [LMODE] has been configured, and the PHY is out of reset, the table entries for
+ the
+ selected [LMODE] must be updated to reflect the reference clock speed. Refer to the
+ register
+ description and index into the table using the rate and reference speed to obtain the
+ recommended values.
+
+ _ Write GSER()_PLL_P()_MODE_0.
+ _ Write GSER()_PLL_P()_MODE_1.
+ _ Write GSER()_LANE_P()_MODE_0.
+ _ Write GSER()_LANE_P()_MODE_1.
+
+ where in "P(z)", z equals [LMODE]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_lane_mode_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lmode : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode, used to index into the PHY
+ table to select electrical specs and link rate. Note that the PHY table can be modified
+ such that any supported link rate can be derived regardless of the configured LMODE.
+
+ 0x0: R_25G_REFCLK100.
+ 0x1: R_5G_REFCLK100.
+ 0x2: R_8G_REFCLK100.
+ 0x3: R_125G_REFCLK15625_KX (not supported).
+ 0x4: R_3125G_REFCLK15625_XAUI.
+ 0x5: R_103125G_REFCLK15625_KR.
+ 0x6: R_125G_REFCLK15625_SGMII.
+ 0x7: R_5G_REFCLK15625_QSGMII.
+ 0x8: R_625G_REFCLK15625_RXAUI.
+ 0x9: R_25G_REFCLK125.
+ 0xA: R_5G_REFCLK125.
+ 0xB: R_8G_REFCLK125.
+ 0xC - 0xF: Reserved.
+
+ This register is not used for PCIE configurations. This register
+ defaults to R_625G_REFCLK15625_RXAUI.
+
+ It is recommended that the PHY be in reset when reconfiguring the [LMODE]
+ (GSER()_PHY_CTL[PHY_RESET] is set).
+
+ Once the [LMODE] has been configured, and the PHY is out of reset, the table entries for
+ the
+ selected [LMODE] must be updated to reflect the reference clock speed. Refer to the
+ register
+ description and index into the table using the rate and reference speed to obtain the
+ recommended values.
+
+ _ Write GSER()_PLL_P()_MODE_0.
+ _ Write GSER()_PLL_P()_MODE_1.
+ _ Write GSER()_LANE_P()_MODE_0.
+ _ Write GSER()_LANE_P()_MODE_1.
+
+ where in "P(z)", z equals [LMODE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t lmode : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode, used to index into the PHY
+ table to select electrical specs and link rate. Note that the PHY table can be modified
+ such that any supported link rate can be derived regardless of the configured LMODE.
+
+ 0x0: R_25G_REFCLK100.
+ 0x1: R_5G_REFCLK100.
+ 0x2: R_8G_REFCLK100.
+ 0x3: R_125G_REFCLK15625_KX (not supported).
+ 0x4: R_3125G_REFCLK15625_XAUI.
+ 0x5: R_103125G_REFCLK15625_KR.
+ 0x6: R_125G_REFCLK15625_SGMII.
+ 0x7: R_5G_REFCLK15625_QSGMII.
+ 0x8: R_625G_REFCLK15625_RXAUI.
+ 0x9: R_25G_REFCLK125.
+ 0xA: R_5G_REFCLK125.
+ 0xB: R_8G_REFCLK125.
+ 0xC - 0xF: Reserved.
+
+ This register is not used for PCIE configurations. This register
+ defaults to R_625G_REFCLK15625_RXAUI.
+
+ It is recommended that the PHY be in reset when reconfiguring the [LMODE]
+ (GSER()_PHY_CTL[PHY_RESET] is set).
+
+ Once the [LMODE] has been configured, and the PHY is out of reset, the table entries for
+ the
+ selected [LMODE] must be updated to reflect the reference clock speed. Refer to the
+ register
+ description and index into the table using the rate and reference speed to obtain the
+ recommended values.
+
+ _ Write GSER()_PLL_P()_MODE_0.
+ _ Write GSER()_PLL_P()_MODE_1.
+ _ Write GSER()_LANE_P()_MODE_0.
+ _ Write GSER()_LANE_P()_MODE_1.
+
+ where in "P(z)", z equals [LMODE]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_lane_mode bdk_gserx_lane_mode_t;
+
+static inline uint64_t BDK_GSERX_LANE_MODE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_MODE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000118ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000118ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000118ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_MODE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_MODE(a) bdk_gserx_lane_mode_t
+#define bustype_BDK_GSERX_LANE_MODE(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_MODE(a) "GSERX_LANE_MODE"
+#define device_bar_BDK_GSERX_LANE_MODE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_MODE(a) (a)
+#define arguments_BDK_GSERX_LANE_MODE(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_lane_p#_mode_0
+ *
+ * GSER Lane Protocol Mode 0 Register
+ * These are the RAW PCS lane settings mode 0 registers. There is one register per
+ * 4 lanes per GSER per GSER_LMODE_E value (0..11). Only one entry is used at any given time in a
+ * given GSER lane - the one selected by the corresponding GSER()_LANE_MODE[LMODE].
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_px_mode_0
+{
+ uint64_t u;
+ struct bdk_gserx_lane_px_mode_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t ctle : 2; /**< [ 14: 13](R/W/H) Continuous time linear equalizer pole configuration.
+ 0x0 = ~5dB of peaking at 4 GHz (Minimum bandwidth).
+ 0x1 =~10dB of peaking at 5 GHz.
+ 0x2 = ~15dB of peaking at 5.5 GHz.
+ 0x3 = ~20dB of peaking at 6 GHz (Maximum bandwidth).
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0x0
+ _ R_5G_REFCLK100: 0x0
+ _ R_8G_REFCLK100: 0x3
+ _ R_125G_REFCLK15625_KX: 0x0
+ _ R_3125G_REFCLK15625_XAUI: 0x0
+ _ R_103125G_REFCLK15625_KR: 0x3
+ _ R_125G_REFCLK15625_SGMII: 0x0
+ _ R_5G_REFCLK15625_QSGMII: 0x0
+ _ R_625G_REFCLK15625_RXAUI: 0x0
+ _ R_25G_REFCLK125: 0x0
+ _ R_5G_REFCLK125: 0x0
+ _ R_8G_REFCLK125: 0x3
+ \</pre\>
+
+ For SATA, [CTLE] should always be 0. */
+ uint64_t pcie : 1; /**< [ 12: 12](R/W/H) Selects between RX terminations.
+ 0 = Differential termination.
+ 1 = Termination between pad and SDS_VDDS.
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0x1
+ _ R_5G_REFCLK100: 0x1
+ _ R_8G_REFCLK100: 0x0
+ _ R_125G_REFCLK15625_KX: 0x0
+ _ R_3125G_REFCLK15625_XAUI: 0x0
+ _ R_103125G_REFCLK15625_KR: 0x0
+ _ R_125G_REFCLK15625_SGMII: 0x0
+ _ R_5G_REFCLK15625_QSGMII: 0x0
+ _ R_625G_REFCLK15625_RXAUI: 0x0
+ _ R_25G_REFCLK125: 0x1
+ _ R_5G_REFCLK125: 0x1
+ _ R_8G_REFCLK125: 0x0
+ \</pre\>
+
+ For SATA, [PCIE] should always be 0. */
+ uint64_t tx_ldiv : 2; /**< [ 11: 10](R/W/H) Configures clock divider used to determine the receive rate.
+ 0x0 = full data rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate.
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0x1
+ _ R_5G_REFCLK100: 0x0
+ _ R_8G_REFCLK100: 0x0
+ _ R_125G_REFCLK15625_KX: 0x2
+ _ R_3125G_REFCLK15625_XAUI: 0x1
+ _ R_103125G_REFCLK15625_KR: 0x0
+ _ R_125G_REFCLK15625_SGMII: 0x2
+ _ R_5G_REFCLK15625_QSGMII: 0x0
+ _ R_625G_REFCLK15625_RXAUI: 0x0
+ _ R_25G_REFCLK125: 0x1
+ _ R_5G_REFCLK125: 0x0
+ _ R_8G_REFCLK125: 0x0
+ \</pre\>
+
+ For SATA, [TX_LDIV] should always be 0. */
+ uint64_t rx_ldiv : 2; /**< [ 9: 8](R/W/H) Configures clock divider used to determine the receive rate.
+ 0x0 = full data rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate.
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0x1
+ _ R_5G_REFCLK100: 0x0
+ _ R_8G_REFCLK100: 0x0
+ _ R_125G_REFCLK15625_KX: 0x2
+ _ R_3125G_REFCLK15625_XAUI: 0x1
+ _ R_103125G_REFCLK15625_KR: 0x0
+ _ R_125G_REFCLK15625_SGMII: 0x2
+ _ R_5G_REFCLK15625_QSGMII: 0x0
+ _ R_625G_REFCLK15625_RXAUI: 0x0
+ _ R_25G_REFCLK125: 0x1
+ _ R_5G_REFCLK125: 0x0
+ _ R_8G_REFCLK125: 0x0
+ \</pre\>
+
+ For SATA, [RX_LDIV] should be 2 for R_25G_REFCLK100 (position 0, 1.5 Gbaud),
+ 1 for R_5G_REFCLK100 (position 1, 3 Gbaud), and 0 for R_8G_REFCLK100
+ (position 2, 6 Gbaud). */
+ uint64_t srate : 3; /**< [ 7: 5](R/W) Sample rate, used to generate strobe to effectively divide the clock down to a slower
+ rate.
+
+ 0x0 = Full rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate.
+ 0x4 = 1/16 data rate.
+ else = Reserved.
+
+ This field should always be cleared to zero (i.e. full rate selected). */
+ uint64_t reserved_4 : 1;
+ uint64_t tx_mode : 2; /**< [ 3: 2](R/W/H) TX data width:
+ 0x0 = 8-bit raw data (not supported).
+ 0x1 = 10-bit raw data (not supported).
+ 0x2 = 16-bit raw data (for PCIe Gen3 8 Gb only - software should normally not select
+ this).
+ 0x3 = 20-bit raw data (anything software-configured). */
+ uint64_t rx_mode : 2; /**< [ 1: 0](R/W/H) RX data width:
+ 0x0 = 8-bit raw data (not supported).
+ 0x1 = 10-bit raw data (not supported).
+ 0x2 = 16-bit raw data (for PCIe Gen3 8 Gb only - software should normally not select
+ this).
+ 0x3 = 20-bit raw data (anything software-configured). */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_mode : 2; /**< [ 1: 0](R/W/H) RX data width:
+ 0x0 = 8-bit raw data (not supported).
+ 0x1 = 10-bit raw data (not supported).
+ 0x2 = 16-bit raw data (for PCIe Gen3 8 Gb only - software should normally not select
+ this).
+ 0x3 = 20-bit raw data (anything software-configured). */
+ uint64_t tx_mode : 2; /**< [ 3: 2](R/W/H) TX data width:
+ 0x0 = 8-bit raw data (not supported).
+ 0x1 = 10-bit raw data (not supported).
+ 0x2 = 16-bit raw data (for PCIe Gen3 8 Gb only - software should normally not select
+ this).
+ 0x3 = 20-bit raw data (anything software-configured). */
+ uint64_t reserved_4 : 1;
+ uint64_t srate : 3; /**< [ 7: 5](R/W) Sample rate, used to generate strobe to effectively divide the clock down to a slower
+ rate.
+
+ 0x0 = Full rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate.
+ 0x4 = 1/16 data rate.
+ else = Reserved.
+
+ This field should always be cleared to zero (i.e. full rate selected). */
+ uint64_t rx_ldiv : 2; /**< [ 9: 8](R/W/H) Configures clock divider used to determine the receive rate.
+ 0x0 = full data rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate.
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0x1
+ _ R_5G_REFCLK100: 0x0
+ _ R_8G_REFCLK100: 0x0
+ _ R_125G_REFCLK15625_KX: 0x2
+ _ R_3125G_REFCLK15625_XAUI: 0x1
+ _ R_103125G_REFCLK15625_KR: 0x0
+ _ R_125G_REFCLK15625_SGMII: 0x2
+ _ R_5G_REFCLK15625_QSGMII: 0x0
+ _ R_625G_REFCLK15625_RXAUI: 0x0
+ _ R_25G_REFCLK125: 0x1
+ _ R_5G_REFCLK125: 0x0
+ _ R_8G_REFCLK125: 0x0
+ \</pre\>
+
+ For SATA, [RX_LDIV] should be 2 for R_25G_REFCLK100 (position 0, 1.5 Gbaud),
+ 1 for R_5G_REFCLK100 (position 1, 3 Gbaud), and 0 for R_8G_REFCLK100
+ (position 2, 6 Gbaud). */
+ uint64_t tx_ldiv : 2; /**< [ 11: 10](R/W/H) Configures clock divider used to determine the receive rate.
+ 0x0 = full data rate.
+ 0x1 = 1/2 data rate.
+ 0x2 = 1/4 data rate.
+ 0x3 = 1/8 data rate.
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0x1
+ _ R_5G_REFCLK100: 0x0
+ _ R_8G_REFCLK100: 0x0
+ _ R_125G_REFCLK15625_KX: 0x2
+ _ R_3125G_REFCLK15625_XAUI: 0x1
+ _ R_103125G_REFCLK15625_KR: 0x0
+ _ R_125G_REFCLK15625_SGMII: 0x2
+ _ R_5G_REFCLK15625_QSGMII: 0x0
+ _ R_625G_REFCLK15625_RXAUI: 0x0
+ _ R_25G_REFCLK125: 0x1
+ _ R_5G_REFCLK125: 0x0
+ _ R_8G_REFCLK125: 0x0
+ \</pre\>
+
+ For SATA, [TX_LDIV] should always be 0. */
+ uint64_t pcie : 1; /**< [ 12: 12](R/W/H) Selects between RX terminations.
+ 0 = Differential termination.
+ 1 = Termination between pad and SDS_VDDS.
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0x1
+ _ R_5G_REFCLK100: 0x1
+ _ R_8G_REFCLK100: 0x0
+ _ R_125G_REFCLK15625_KX: 0x0
+ _ R_3125G_REFCLK15625_XAUI: 0x0
+ _ R_103125G_REFCLK15625_KR: 0x0
+ _ R_125G_REFCLK15625_SGMII: 0x0
+ _ R_5G_REFCLK15625_QSGMII: 0x0
+ _ R_625G_REFCLK15625_RXAUI: 0x0
+ _ R_25G_REFCLK125: 0x1
+ _ R_5G_REFCLK125: 0x1
+ _ R_8G_REFCLK125: 0x0
+ \</pre\>
+
+ For SATA, [PCIE] should always be 0. */
+ uint64_t ctle : 2; /**< [ 14: 13](R/W/H) Continuous time linear equalizer pole configuration.
+ 0x0 = ~5dB of peaking at 4 GHz (Minimum bandwidth).
+ 0x1 =~10dB of peaking at 5 GHz.
+ 0x2 = ~15dB of peaking at 5.5 GHz.
+ 0x3 = ~20dB of peaking at 6 GHz (Maximum bandwidth).
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0x0
+ _ R_5G_REFCLK100: 0x0
+ _ R_8G_REFCLK100: 0x3
+ _ R_125G_REFCLK15625_KX: 0x0
+ _ R_3125G_REFCLK15625_XAUI: 0x0
+ _ R_103125G_REFCLK15625_KR: 0x3
+ _ R_125G_REFCLK15625_SGMII: 0x0
+ _ R_5G_REFCLK15625_QSGMII: 0x0
+ _ R_625G_REFCLK15625_RXAUI: 0x0
+ _ R_25G_REFCLK125: 0x0
+ _ R_5G_REFCLK125: 0x0
+ _ R_8G_REFCLK125: 0x3
+ \</pre\>
+
+ For SATA, [CTLE] should always be 0. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_px_mode_0_s cn; */
+};
+typedef union bdk_gserx_lane_px_mode_0 bdk_gserx_lane_px_mode_0_t;
+
+static inline uint64_t BDK_GSERX_LANE_PX_MODE_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_PX_MODE_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=11)))
+ return 0x87e0904e0040ll + 0x1000000ll * ((a) & 0x3) + 0x20ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=11)))
+ return 0x87e0904e0040ll + 0x1000000ll * ((a) & 0x7) + 0x20ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=11)))
+ return 0x87e0904e0040ll + 0x1000000ll * ((a) & 0xf) + 0x20ll * ((b) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_PX_MODE_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_PX_MODE_0(a,b) bdk_gserx_lane_px_mode_0_t
+#define bustype_BDK_GSERX_LANE_PX_MODE_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_PX_MODE_0(a,b) "GSERX_LANE_PX_MODE_0"
+#define device_bar_BDK_GSERX_LANE_PX_MODE_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_PX_MODE_0(a,b) (a)
+#define arguments_BDK_GSERX_LANE_PX_MODE_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane_p#_mode_1
+ *
+ * GSER Lane Protocol Mode 1 Register
+ * These are the RAW PCS lane settings mode 1 registers. There is one register per 4 lanes,
+ * (0..3) per GSER per GSER_LMODE_E value (0..11). Only one entry is used at any given time in a
+ * given
+ * GSER lane - the one selected by the corresponding GSER()_LANE_MODE[LMODE].
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_px_mode_1
+{
+ uint64_t u;
+ struct bdk_gserx_lane_px_mode_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t vma_fine_cfg_sel : 1; /**< [ 15: 15](R/W/H) Recommended settings:
+ 0 = Disabled. Coarse step adaptation selected (rates lower than 10.3125 Gbaud).
+ 1 = Enabled. Fine step adaptation selected (10.3125 Gbaud rate).
+
+ For SATA, [VMA_FINE_CFG_SEL] should always be 0. */
+ uint64_t vma_mm : 1; /**< [ 14: 14](R/W/H) Manual DFE verses adaptive DFE mode.
+
+ Recommended settings:
+ 0 = Adaptive DFE (5 Gbaud and higher).
+ 1 = Manual DFE, fixed tap (3.125 Gbaud and lower).
+
+ For SATA, [VMA_MM] should always be 1. */
+ uint64_t cdr_fgain : 4; /**< [ 13: 10](R/W/H) CDR frequency gain.
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0xA
+ _ R_5G_REFCLK100: 0xA
+ _ R_8G_REFCLK100: 0xB
+ _ R_125G_REFCLK15625_KX: 0xC
+ _ R_3125G_REFCLK15625_XAUI: 0xC
+ _ R_103125G_REFCLK15625_KR: 0xA
+ _ R_125G_REFCLK15625_SGMII: 0xC
+ _ R_5G_REFCLK15625_QSGMII: 0xC
+ _ R_625G_REFCLK15625_RXAUI: 0xA
+ _ R_25G_REFCLK125: 0xA
+ _ R_5G_REFCLK125: 0xA
+ _ R_8G_REFCLK125: 0xB
+ \</pre\>
+
+ For SATA, [CDR_FGAIN] should always be 0xA. */
+ uint64_t ph_acc_adj : 10; /**< [ 9: 0](R/W/H) Phase accumulator adjust.
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0x14
+ _ R_5G_REFCLK100: 0x14
+ _ R_8G_REFCLK100: 0x23
+ _ R_125G_REFCLK15625_KX: 0x1E
+ _ R_3125G_REFCLK15625_XAUI: 0x1E
+ _ R_103125G_REFCLK15625_KR: 0xF
+ _ R_125G_REFCLK15625_SGMII: 0x1E
+ _ R_5G_REFCLK15625_QSGMII: 0x1E
+ _ R_625G_REFCLK15625_RXAUI: 0x14
+ _ R_25G_REFCLK125: 0x14
+ _ R_5G_REFCLK125: 0x14
+ _ R_8G_REFCLK125: 0x23
+ \</pre\>
+
+ For SATA, [PH_ACC_ADJ] should always be 0x15.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t ph_acc_adj : 10; /**< [ 9: 0](R/W/H) Phase accumulator adjust.
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0x14
+ _ R_5G_REFCLK100: 0x14
+ _ R_8G_REFCLK100: 0x23
+ _ R_125G_REFCLK15625_KX: 0x1E
+ _ R_3125G_REFCLK15625_XAUI: 0x1E
+ _ R_103125G_REFCLK15625_KR: 0xF
+ _ R_125G_REFCLK15625_SGMII: 0x1E
+ _ R_5G_REFCLK15625_QSGMII: 0x1E
+ _ R_625G_REFCLK15625_RXAUI: 0x14
+ _ R_25G_REFCLK125: 0x14
+ _ R_5G_REFCLK125: 0x14
+ _ R_8G_REFCLK125: 0x23
+ \</pre\>
+
+ For SATA, [PH_ACC_ADJ] should always be 0x15.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t cdr_fgain : 4; /**< [ 13: 10](R/W/H) CDR frequency gain.
+
+ Recommended settings:
+
+ \<pre\>
+ _ R_25G_REFCLK100: 0xA
+ _ R_5G_REFCLK100: 0xA
+ _ R_8G_REFCLK100: 0xB
+ _ R_125G_REFCLK15625_KX: 0xC
+ _ R_3125G_REFCLK15625_XAUI: 0xC
+ _ R_103125G_REFCLK15625_KR: 0xA
+ _ R_125G_REFCLK15625_SGMII: 0xC
+ _ R_5G_REFCLK15625_QSGMII: 0xC
+ _ R_625G_REFCLK15625_RXAUI: 0xA
+ _ R_25G_REFCLK125: 0xA
+ _ R_5G_REFCLK125: 0xA
+ _ R_8G_REFCLK125: 0xB
+ \</pre\>
+
+ For SATA, [CDR_FGAIN] should always be 0xA. */
+ uint64_t vma_mm : 1; /**< [ 14: 14](R/W/H) Manual DFE verses adaptive DFE mode.
+
+ Recommended settings:
+ 0 = Adaptive DFE (5 Gbaud and higher).
+ 1 = Manual DFE, fixed tap (3.125 Gbaud and lower).
+
+ For SATA, [VMA_MM] should always be 1. */
+ uint64_t vma_fine_cfg_sel : 1; /**< [ 15: 15](R/W/H) Recommended settings:
+ 0 = Disabled. Coarse step adaptation selected (rates lower than 10.3125 Gbaud).
+ 1 = Enabled. Fine step adaptation selected (10.3125 Gbaud rate).
+
+ For SATA, [VMA_FINE_CFG_SEL] should always be 0. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_px_mode_1_s cn; */
+};
+typedef union bdk_gserx_lane_px_mode_1 bdk_gserx_lane_px_mode_1_t;
+
+static inline uint64_t BDK_GSERX_LANE_PX_MODE_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_PX_MODE_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=11)))
+ return 0x87e0904e0048ll + 0x1000000ll * ((a) & 0x3) + 0x20ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=11)))
+ return 0x87e0904e0048ll + 0x1000000ll * ((a) & 0x7) + 0x20ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=11)))
+ return 0x87e0904e0048ll + 0x1000000ll * ((a) & 0xf) + 0x20ll * ((b) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_PX_MODE_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_PX_MODE_1(a,b) bdk_gserx_lane_px_mode_1_t
+#define bustype_BDK_GSERX_LANE_PX_MODE_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_PX_MODE_1(a,b) "GSERX_LANE_PX_MODE_1"
+#define device_bar_BDK_GSERX_LANE_PX_MODE_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_PX_MODE_1(a,b) (a)
+#define arguments_BDK_GSERX_LANE_PX_MODE_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_lane_poff
+ *
+ * GSER Lane Power Off Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_poff
+{
+ uint64_t u;
+ struct bdk_gserx_lane_poff_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lpoff : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, allows for per lane power down.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t lpoff : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, allows for per lane power down.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_poff_s cn81xx; */
+ struct bdk_gserx_lane_poff_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lpoff : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode (including all CCPI links), allows for per-lane power
+ down.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t lpoff : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode (including all CCPI links), allows for per-lane power
+ down.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_lane_poff_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lpoff : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, allows for per lane power
+ down.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t lpoff : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, allows for per lane power
+ down.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_lane_poff bdk_gserx_lane_poff_t;
+
+static inline uint64_t BDK_GSERX_LANE_POFF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_POFF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000108ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000108ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000108ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_POFF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_POFF(a) bdk_gserx_lane_poff_t
+#define bustype_BDK_GSERX_LANE_POFF(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_POFF(a) "GSERX_LANE_POFF"
+#define device_bar_BDK_GSERX_LANE_POFF(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_POFF(a) (a)
+#define arguments_BDK_GSERX_LANE_POFF(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_lane_srst
+ *
+ * GSER Lane Soft Reset Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_srst
+{
+ uint64_t u;
+ struct bdk_gserx_lane_srst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t lsrst : 1; /**< [ 0: 0](R/W) For links that are not in PCIE or SATA mode, resets all lanes
+ (equivalent to the P2 power state) after any pending requests (power state change, rate
+ change) are complete. The lanes remain in reset state while this signal is asserted. When
+ the signal deasserts, the lanes exit the reset state and the PHY returns to the power
+ state the PHY was in prior. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t lsrst : 1; /**< [ 0: 0](R/W) For links that are not in PCIE or SATA mode, resets all lanes
+ (equivalent to the P2 power state) after any pending requests (power state change, rate
+ change) are complete. The lanes remain in reset state while this signal is asserted. When
+ the signal deasserts, the lanes exit the reset state and the PHY returns to the power
+ state the PHY was in prior. For diagnostic use only. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_srst_s cn81xx; */
+ struct bdk_gserx_lane_srst_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t lsrst : 1; /**< [ 0: 0](R/W) For links that are not in PCIE or SATA mode (including all CCPI links), resets all lanes
+ (equivalent to the P2 power state) after any pending requests (power state change, rate
+ change) are complete. The lanes remain in reset state while this signal is asserted. When
+ the signal deasserts, the lanes exit the reset state and the PHY returns to the power
+ state the PHY was in prior. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t lsrst : 1; /**< [ 0: 0](R/W) For links that are not in PCIE or SATA mode (including all CCPI links), resets all lanes
+ (equivalent to the P2 power state) after any pending requests (power state change, rate
+ change) are complete. The lanes remain in reset state while this signal is asserted. When
+ the signal deasserts, the lanes exit the reset state and the PHY returns to the power
+ state the PHY was in prior. For diagnostic use only. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gserx_lane_srst_s cn83xx; */
+};
+typedef union bdk_gserx_lane_srst bdk_gserx_lane_srst_t;
+
+static inline uint64_t BDK_GSERX_LANE_SRST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_SRST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000100ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000100ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000100ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_SRST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_SRST(a) bdk_gserx_lane_srst_t
+#define bustype_BDK_GSERX_LANE_SRST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_SRST(a) "GSERX_LANE_SRST"
+#define device_bar_BDK_GSERX_LANE_SRST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_SRST(a) (a)
+#define arguments_BDK_GSERX_LANE_SRST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_lane_vma_coarse_ctrl_0
+ *
+ * GSER Lane VMA Coarse Control 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_vma_coarse_ctrl_0
+{
+ uint64_t u;
+ struct bdk_gserx_lane_vma_coarse_ctrl_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t iq_max : 4; /**< [ 15: 12](R/W) Slice DLL IQ maximum value in VMA coarse mode. */
+ uint64_t iq_min : 4; /**< [ 11: 8](R/W) Slice DLL IQ minimum value in VMA coarse mode. */
+ uint64_t iq_step : 2; /**< [ 7: 6](R/W) Slice DLL IQ step size in VMA coarse mode. */
+ uint64_t window_wait : 3; /**< [ 5: 3](R/W) Adaptation window wait setting in VMA coarse mode. */
+ uint64_t lms_wait : 3; /**< [ 2: 0](R/W/H) LMS wait time setting used to control the number of samples taken during the collection of
+ statistics in VMA coarse mode. */
+#else /* Word 0 - Little Endian */
+ uint64_t lms_wait : 3; /**< [ 2: 0](R/W/H) LMS wait time setting used to control the number of samples taken during the collection of
+ statistics in VMA coarse mode. */
+ uint64_t window_wait : 3; /**< [ 5: 3](R/W) Adaptation window wait setting in VMA coarse mode. */
+ uint64_t iq_step : 2; /**< [ 7: 6](R/W) Slice DLL IQ step size in VMA coarse mode. */
+ uint64_t iq_min : 4; /**< [ 11: 8](R/W) Slice DLL IQ minimum value in VMA coarse mode. */
+ uint64_t iq_max : 4; /**< [ 15: 12](R/W) Slice DLL IQ maximum value in VMA coarse mode. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_vma_coarse_ctrl_0_s cn; */
+};
+typedef union bdk_gserx_lane_vma_coarse_ctrl_0 bdk_gserx_lane_vma_coarse_ctrl_0_t;
+
+static inline uint64_t BDK_GSERX_LANE_VMA_COARSE_CTRL_0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_VMA_COARSE_CTRL_0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904e01b0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904e01b0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904e01b0ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_VMA_COARSE_CTRL_0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_VMA_COARSE_CTRL_0(a) bdk_gserx_lane_vma_coarse_ctrl_0_t
+#define bustype_BDK_GSERX_LANE_VMA_COARSE_CTRL_0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_VMA_COARSE_CTRL_0(a) "GSERX_LANE_VMA_COARSE_CTRL_0"
+#define device_bar_BDK_GSERX_LANE_VMA_COARSE_CTRL_0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_VMA_COARSE_CTRL_0(a) (a)
+#define arguments_BDK_GSERX_LANE_VMA_COARSE_CTRL_0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_lane_vma_coarse_ctrl_1
+ *
+ * GSER Lane VMA Coarse Control 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_vma_coarse_ctrl_1
+{
+ uint64_t u;
+ struct bdk_gserx_lane_vma_coarse_ctrl_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t ctle_pmax : 4; /**< [ 9: 6](R/W) RX CTLE peak maximum value in VMA coarse mode. */
+ uint64_t ctle_pmin : 4; /**< [ 5: 2](R/W) RX CTLE peak minimum value in VMA coarse mode. */
+ uint64_t ctle_pstep : 2; /**< [ 1: 0](R/W) CTLE peak step size in VMA coarse mode. */
+#else /* Word 0 - Little Endian */
+ uint64_t ctle_pstep : 2; /**< [ 1: 0](R/W) CTLE peak step size in VMA coarse mode. */
+ uint64_t ctle_pmin : 4; /**< [ 5: 2](R/W) RX CTLE peak minimum value in VMA coarse mode. */
+ uint64_t ctle_pmax : 4; /**< [ 9: 6](R/W) RX CTLE peak maximum value in VMA coarse mode. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_vma_coarse_ctrl_1_s cn; */
+};
+typedef union bdk_gserx_lane_vma_coarse_ctrl_1 bdk_gserx_lane_vma_coarse_ctrl_1_t;
+
+static inline uint64_t BDK_GSERX_LANE_VMA_COARSE_CTRL_1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_VMA_COARSE_CTRL_1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904e01b8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904e01b8ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904e01b8ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_VMA_COARSE_CTRL_1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_VMA_COARSE_CTRL_1(a) bdk_gserx_lane_vma_coarse_ctrl_1_t
+#define bustype_BDK_GSERX_LANE_VMA_COARSE_CTRL_1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_VMA_COARSE_CTRL_1(a) "GSERX_LANE_VMA_COARSE_CTRL_1"
+#define device_bar_BDK_GSERX_LANE_VMA_COARSE_CTRL_1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_VMA_COARSE_CTRL_1(a) (a)
+#define arguments_BDK_GSERX_LANE_VMA_COARSE_CTRL_1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_lane_vma_coarse_ctrl_2
+ *
+ * GSER Lane VMA Fine Control 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_vma_coarse_ctrl_2
+{
+ uint64_t u;
+ struct bdk_gserx_lane_vma_coarse_ctrl_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t pctle_gmax : 4; /**< [ 9: 6](R/W) RX PRE-CTLE gain maximum value in VMA coarse mode. */
+ uint64_t pctle_gmin : 4; /**< [ 5: 2](R/W) RX PRE-CTLE gain minimum value in VMA coarse mode. */
+ uint64_t pctle_gstep : 2; /**< [ 1: 0](R/W) CTLE PRE-peak gain step size in VMA coarse mode. */
+#else /* Word 0 - Little Endian */
+ uint64_t pctle_gstep : 2; /**< [ 1: 0](R/W) CTLE PRE-peak gain step size in VMA coarse mode. */
+ uint64_t pctle_gmin : 4; /**< [ 5: 2](R/W) RX PRE-CTLE gain minimum value in VMA coarse mode. */
+ uint64_t pctle_gmax : 4; /**< [ 9: 6](R/W) RX PRE-CTLE gain maximum value in VMA coarse mode. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_vma_coarse_ctrl_2_s cn; */
+};
+typedef union bdk_gserx_lane_vma_coarse_ctrl_2 bdk_gserx_lane_vma_coarse_ctrl_2_t;
+
+static inline uint64_t BDK_GSERX_LANE_VMA_COARSE_CTRL_2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_VMA_COARSE_CTRL_2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904e01c0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904e01c0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904e01c0ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_VMA_COARSE_CTRL_2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_VMA_COARSE_CTRL_2(a) bdk_gserx_lane_vma_coarse_ctrl_2_t
+#define bustype_BDK_GSERX_LANE_VMA_COARSE_CTRL_2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_VMA_COARSE_CTRL_2(a) "GSERX_LANE_VMA_COARSE_CTRL_2"
+#define device_bar_BDK_GSERX_LANE_VMA_COARSE_CTRL_2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_VMA_COARSE_CTRL_2(a) (a)
+#define arguments_BDK_GSERX_LANE_VMA_COARSE_CTRL_2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_lane_vma_fine_ctrl_0
+ *
+ * GSER Lane VMA Fine Control 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_vma_fine_ctrl_0
+{
+ uint64_t u;
+ struct bdk_gserx_lane_vma_fine_ctrl_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t rx_sdll_iq_max_fine : 4; /**< [ 15: 12](R/W) RX slice DLL IQ maximum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and
+ GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_sdll_iq_min_fine : 4; /**< [ 11: 8](R/W) RX slice DLL IQ minimum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and
+ GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_sdll_iq_step_fine : 2; /**< [ 7: 6](R/W) RX slice DLL IQ step size in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and
+ GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t vma_window_wait_fine : 3; /**< [ 5: 3](R/W) Adaptation window wait setting (in VMA fine mode); used to control the number of samples
+ taken during the collection of statistics (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t lms_wait_time_fine : 3; /**< [ 2: 0](R/W) LMS wait time setting (in VMA fine mode); used to control the number of samples taken
+ during the collection of statistics (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+#else /* Word 0 - Little Endian */
+ uint64_t lms_wait_time_fine : 3; /**< [ 2: 0](R/W) LMS wait time setting (in VMA fine mode); used to control the number of samples taken
+ during the collection of statistics (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t vma_window_wait_fine : 3; /**< [ 5: 3](R/W) Adaptation window wait setting (in VMA fine mode); used to control the number of samples
+ taken during the collection of statistics (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_sdll_iq_step_fine : 2; /**< [ 7: 6](R/W) RX slice DLL IQ step size in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and
+ GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_sdll_iq_min_fine : 4; /**< [ 11: 8](R/W) RX slice DLL IQ minimum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and
+ GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_sdll_iq_max_fine : 4; /**< [ 15: 12](R/W) RX slice DLL IQ maximum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and
+ GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_vma_fine_ctrl_0_s cn; */
+};
+typedef union bdk_gserx_lane_vma_fine_ctrl_0 bdk_gserx_lane_vma_fine_ctrl_0_t;
+
+static inline uint64_t BDK_GSERX_LANE_VMA_FINE_CTRL_0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_VMA_FINE_CTRL_0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904e01c8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904e01c8ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904e01c8ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_VMA_FINE_CTRL_0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_VMA_FINE_CTRL_0(a) bdk_gserx_lane_vma_fine_ctrl_0_t
+#define bustype_BDK_GSERX_LANE_VMA_FINE_CTRL_0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_VMA_FINE_CTRL_0(a) "GSERX_LANE_VMA_FINE_CTRL_0"
+#define device_bar_BDK_GSERX_LANE_VMA_FINE_CTRL_0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_VMA_FINE_CTRL_0(a) (a)
+#define arguments_BDK_GSERX_LANE_VMA_FINE_CTRL_0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_lane_vma_fine_ctrl_1
+ *
+ * GSER Lane VMA Fine Control 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_vma_fine_ctrl_1
+{
+ uint64_t u;
+ struct bdk_gserx_lane_vma_fine_ctrl_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t rx_ctle_peak_max_fine : 4; /**< [ 9: 6](R/W) RX CTLE peak maximum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_ctle_peak_min_fine : 4; /**< [ 5: 2](R/W) RX CTLE peak minimum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_ctle_peak_step_fine : 2; /**< [ 1: 0](R/W) RX CTLE Peak step size in VMA Fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_ctle_peak_step_fine : 2; /**< [ 1: 0](R/W) RX CTLE Peak step size in VMA Fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_ctle_peak_min_fine : 4; /**< [ 5: 2](R/W) RX CTLE peak minimum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_ctle_peak_max_fine : 4; /**< [ 9: 6](R/W) RX CTLE peak maximum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_vma_fine_ctrl_1_s cn; */
+};
+typedef union bdk_gserx_lane_vma_fine_ctrl_1 bdk_gserx_lane_vma_fine_ctrl_1_t;
+
+static inline uint64_t BDK_GSERX_LANE_VMA_FINE_CTRL_1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_VMA_FINE_CTRL_1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904e01d0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904e01d0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904e01d0ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_VMA_FINE_CTRL_1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_VMA_FINE_CTRL_1(a) bdk_gserx_lane_vma_fine_ctrl_1_t
+#define bustype_BDK_GSERX_LANE_VMA_FINE_CTRL_1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_VMA_FINE_CTRL_1(a) "GSERX_LANE_VMA_FINE_CTRL_1"
+#define device_bar_BDK_GSERX_LANE_VMA_FINE_CTRL_1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_VMA_FINE_CTRL_1(a) (a)
+#define arguments_BDK_GSERX_LANE_VMA_FINE_CTRL_1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_lane_vma_fine_ctrl_2
+ *
+ * GSER Lane VMA Fine Control 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_lane_vma_fine_ctrl_2
+{
+ uint64_t u;
+ struct bdk_gserx_lane_vma_fine_ctrl_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t rx_prectle_gain_max_fine : 4;/**< [ 9: 6](R/W) RX PRE-CTLE gain maximum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_prectle_gain_min_fine : 4;/**< [ 5: 2](R/W) RX PRE-CTLE gain minimum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_prectle_gain_step_fine : 2;/**< [ 1: 0](R/W) RX PRE-CTLE gain step size in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_prectle_gain_step_fine : 2;/**< [ 1: 0](R/W) RX PRE-CTLE gain step size in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_prectle_gain_min_fine : 4;/**< [ 5: 2](R/W) RX PRE-CTLE gain minimum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t rx_prectle_gain_max_fine : 4;/**< [ 9: 6](R/W) RX PRE-CTLE gain maximum value in VMA fine mode (valid when
+ GSER()_LANE_P()_MODE_1[VMA_FINE_CFG_SEL]=1 and GSER()_LANE_P()_MODE_1[VMA_MM]=0). */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_lane_vma_fine_ctrl_2_s cn; */
+};
+typedef union bdk_gserx_lane_vma_fine_ctrl_2 bdk_gserx_lane_vma_fine_ctrl_2_t;
+
+static inline uint64_t BDK_GSERX_LANE_VMA_FINE_CTRL_2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_LANE_VMA_FINE_CTRL_2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904e01d8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904e01d8ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904e01d8ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_LANE_VMA_FINE_CTRL_2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_LANE_VMA_FINE_CTRL_2(a) bdk_gserx_lane_vma_fine_ctrl_2_t
+#define bustype_BDK_GSERX_LANE_VMA_FINE_CTRL_2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_LANE_VMA_FINE_CTRL_2(a) "GSERX_LANE_VMA_FINE_CTRL_2"
+#define device_bar_BDK_GSERX_LANE_VMA_FINE_CTRL_2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_LANE_VMA_FINE_CTRL_2(a) (a)
+#define arguments_BDK_GSERX_LANE_VMA_FINE_CTRL_2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_phy_ctl
+ *
+ * GSER PHY Control Register
+ * This register contains general PHY/PLL control of the RAW PCS.
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_phy_ctl
+{
+ uint64_t u;
+ struct bdk_gserx_phy_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t phy_reset : 1; /**< [ 1: 1](R/W/H) When asserted, the PHY is held in reset. This bit is initialized as follows:
+ 0 = (not reset) = Bootable PCIe.
+ 1 = (reset) = Non-bootable PCIe, BGX, or SATA. */
+ uint64_t phy_pd : 1; /**< [ 0: 0](R/W) When asserted, the PHY is powered down. */
+#else /* Word 0 - Little Endian */
+ uint64_t phy_pd : 1; /**< [ 0: 0](R/W) When asserted, the PHY is powered down. */
+ uint64_t phy_reset : 1; /**< [ 1: 1](R/W/H) When asserted, the PHY is held in reset. This bit is initialized as follows:
+ 0 = (not reset) = Bootable PCIe.
+ 1 = (reset) = Non-bootable PCIe, BGX, or SATA. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_phy_ctl_s cn81xx; */
+ struct bdk_gserx_phy_ctl_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t phy_reset : 1; /**< [ 1: 1](R/W/H) When asserted, the PHY is held in reset. This bit is initialized as follows:
+ 0 = (not reset) = Bootable PCIe, or CCPI when GSER(8..13)_SPD[SPD] comes up in a bootable
+ mode.
+ 1 = (reset) = Non-bootable PCIe, BGX, SATA or CCPI when GSER(8..13)_SPD[SPD] comes up in
+ SW_MODE. */
+ uint64_t phy_pd : 1; /**< [ 0: 0](R/W) When asserted, the PHY is powered down. */
+#else /* Word 0 - Little Endian */
+ uint64_t phy_pd : 1; /**< [ 0: 0](R/W) When asserted, the PHY is powered down. */
+ uint64_t phy_reset : 1; /**< [ 1: 1](R/W/H) When asserted, the PHY is held in reset. This bit is initialized as follows:
+ 0 = (not reset) = Bootable PCIe, or CCPI when GSER(8..13)_SPD[SPD] comes up in a bootable
+ mode.
+ 1 = (reset) = Non-bootable PCIe, BGX, SATA or CCPI when GSER(8..13)_SPD[SPD] comes up in
+ SW_MODE. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gserx_phy_ctl_s cn83xx; */
+};
+typedef union bdk_gserx_phy_ctl bdk_gserx_phy_ctl_t;
+
+static inline uint64_t BDK_GSERX_PHY_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_PHY_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000000ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000000ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000000ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_PHY_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_PHY_CTL(a) bdk_gserx_phy_ctl_t
+#define bustype_BDK_GSERX_PHY_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_PHY_CTL(a) "GSERX_PHY_CTL"
+#define device_bar_BDK_GSERX_PHY_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_PHY_CTL(a) (a)
+#define arguments_BDK_GSERX_PHY_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_pipe_lpbk
+ *
+ * GSER PCIE PCS PIPE Lookback Register
+ */
+union bdk_gserx_pipe_lpbk
+{
+ uint64_t u;
+ struct bdk_gserx_pipe_lpbk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t pcie_lpbk : 1; /**< [ 0: 0](R/W) For links that are in PCIE mode, places the PHY in serial loopback mode, where the
+ QLMn_TXN/QLMn_TXP data are looped back to the QLMn_RXN/QLMn_RXP.
+
+ This register has no meaning for links that don't support PCIe. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_lpbk : 1; /**< [ 0: 0](R/W) For links that are in PCIE mode, places the PHY in serial loopback mode, where the
+ QLMn_TXN/QLMn_TXP data are looped back to the QLMn_RXN/QLMn_RXP.
+
+ This register has no meaning for links that don't support PCIe. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_pipe_lpbk_s cn; */
+};
+typedef union bdk_gserx_pipe_lpbk bdk_gserx_pipe_lpbk_t;
+
+static inline uint64_t BDK_GSERX_PIPE_LPBK(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_PIPE_LPBK(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000200ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000200ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000200ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_PIPE_LPBK", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_PIPE_LPBK(a) bdk_gserx_pipe_lpbk_t
+#define bustype_BDK_GSERX_PIPE_LPBK(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_PIPE_LPBK(a) "GSERX_PIPE_LPBK"
+#define device_bar_BDK_GSERX_PIPE_LPBK(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_PIPE_LPBK(a) (a)
+#define arguments_BDK_GSERX_PIPE_LPBK(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_pll_p#_mode_0
+ *
+ * GSER PLL Protocol Mode 0 Register
+ * These are the RAW PCS PLL global settings mode 0 registers. There is one register per GSER per
+ * GSER_LMODE_E value (0..11). Only one entry is used at any given time in a given GSER - the one
+ * selected by the corresponding GSER()_LANE_MODE[LMODE].
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during subsequent chip warm or
+ * soft resets.
+ */
+union bdk_gserx_pll_px_mode_0
+{
+ uint64_t u;
+ struct bdk_gserx_pll_px_mode_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t pll_icp : 4; /**< [ 15: 12](R/W/H) PLL charge pump enable.
+
+ Recommended settings, which are based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x1 0x1 0x1
+ 2.5G: 0x4 0x3 0x3
+ 3.125G: NS 0x1 0x1
+ 5.0G: 0x1 0x1 0x1
+ 6.25G: NS 0x1 0x1
+ 8.0G: 0x3 0x2 NS
+ 10.3125G: NS NS 0x1
+ \</pre\>
+
+ For SATA, [PLL_ICP] should always be 1.
+ For PCIE 1.1 @100 MHz, [PLL_ICP] should be 4.
+ For PCIE 2.1 @100 MHz, [PLL_ICP] should be 4.
+ For PCIE 1.1 @125 MHz, [PLL_ICP] should be 3.
+ For PCIE 2.1 @125 MHz, [PLL_ICP] should be 3.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_rloop : 3; /**< [ 11: 9](R/W/H) Loop resistor tuning.
+
+ Recommended settings:
+
+ \<pre\>
+ _ 1.25G: 0x3
+ _ 2.5G: 0x3
+ _ 3.125G: 0x3
+ _ 5.0G: 0x3
+ _ 6.25G: 0x3
+ _ 8.0G: 0x5
+ _ 10.3125G: 0x5
+ \</pre\>
+
+ For SATA with 100 MHz reference clock, [PLL_RLOOP] should always be 3. */
+ uint64_t pll_pcs_div : 9; /**< [ 8: 0](R/W/H) The divider that generates PCS_MAC_TX_CLK. The frequency of the clock is (pll_frequency /
+ PLL_PCS_DIV).
+
+ Recommended settings:
+
+ \<pre\>
+ PCIE Other
+ _ 1.25G: NS 0x28
+ _ 2.5G: 0x5 0x5
+ _ 3.125G: NS 0x14
+ _ 5.0G: 0x5 0xA
+ _ 6.25G: NS 0xA
+ _ 8.0G: 0x8 0xA
+ _ 10.3125G: NS 0xA
+ \</pre\>
+
+ For SATA, [PLL_PCS_DIV] should always be 5.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t pll_pcs_div : 9; /**< [ 8: 0](R/W/H) The divider that generates PCS_MAC_TX_CLK. The frequency of the clock is (pll_frequency /
+ PLL_PCS_DIV).
+
+ Recommended settings:
+
+ \<pre\>
+ PCIE Other
+ _ 1.25G: NS 0x28
+ _ 2.5G: 0x5 0x5
+ _ 3.125G: NS 0x14
+ _ 5.0G: 0x5 0xA
+ _ 6.25G: NS 0xA
+ _ 8.0G: 0x8 0xA
+ _ 10.3125G: NS 0xA
+ \</pre\>
+
+ For SATA, [PLL_PCS_DIV] should always be 5.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_rloop : 3; /**< [ 11: 9](R/W/H) Loop resistor tuning.
+
+ Recommended settings:
+
+ \<pre\>
+ _ 1.25G: 0x3
+ _ 2.5G: 0x3
+ _ 3.125G: 0x3
+ _ 5.0G: 0x3
+ _ 6.25G: 0x3
+ _ 8.0G: 0x5
+ _ 10.3125G: 0x5
+ \</pre\>
+
+ For SATA with 100 MHz reference clock, [PLL_RLOOP] should always be 3. */
+ uint64_t pll_icp : 4; /**< [ 15: 12](R/W/H) PLL charge pump enable.
+
+ Recommended settings, which are based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x1 0x1 0x1
+ 2.5G: 0x4 0x3 0x3
+ 3.125G: NS 0x1 0x1
+ 5.0G: 0x1 0x1 0x1
+ 6.25G: NS 0x1 0x1
+ 8.0G: 0x3 0x2 NS
+ 10.3125G: NS NS 0x1
+ \</pre\>
+
+ For SATA, [PLL_ICP] should always be 1.
+ For PCIE 1.1 @100 MHz, [PLL_ICP] should be 4.
+ For PCIE 2.1 @100 MHz, [PLL_ICP] should be 4.
+ For PCIE 1.1 @125 MHz, [PLL_ICP] should be 3.
+ For PCIE 2.1 @125 MHz, [PLL_ICP] should be 3.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_pll_px_mode_0_s cn81xx; */
+ /* struct bdk_gserx_pll_px_mode_0_s cn88xx; */
+ struct bdk_gserx_pll_px_mode_0_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t pll_icp : 4; /**< [ 15: 12](R/W/H) PLL charge pump enable.
+
+ Recommended settings, which are based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x1 0x1 0x1
+ 2.5G: 0x4 0x3 0x3
+ 3.125G: NS 0x1 0x1
+ 5.0G: 0x1 0x1 0x1
+ 6.25G: NS 0x1 0x1
+ 8.0G: 0x3 0x2 NS
+ 10.3125G: NS NS 0x1
+ \</pre\>
+
+ For SATA, [PLL_ICP] should always be 1.
+ For PCIE 1.1 @100MHz, [PLL_ICP] should be 4.
+ For PCIE 2.1 @100MHz, [PLL_ICP] should be 4.
+ For PCIE 1.1 @125MHz, [PLL_ICP] should be 3.
+ For PCIE 2.1 @125MHz, [PLL_ICP] should be 3.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_rloop : 3; /**< [ 11: 9](R/W/H) Loop resistor tuning.
+
+ Recommended settings:
+
+ \<pre\>
+ _ 1.25G: 0x3
+ _ 2.5G: 0x3
+ _ 3.125G: 0x3
+ _ 5.0G: 0x3
+ _ 6.25G: 0x3
+ _ 8.0G: 0x5
+ _ 10.3125G: 0x5
+ \</pre\>
+
+ For SATA with 100 MHz reference clock, [PLL_RLOOP] should always be 3. */
+ uint64_t pll_pcs_div : 9; /**< [ 8: 0](R/W/H) The divider that generates PCS_MAC_TX_CLK. The frequency of the clock is (pll_frequency /
+ PLL_PCS_DIV).
+
+ Recommended settings:
+
+ \<pre\>
+ PCIE Other
+ _ 1.25G: NS 0x28
+ _ 2.5G: 0x5 0x5
+ _ 3.125G: NS 0x14
+ _ 5.0G: 0x5 0xA
+ _ 6.25G: NS 0xA
+ _ 8.0G: 0x8 0xA
+ _ 10.3125G: NS 0xA
+ \</pre\>
+
+ For SATA, [PLL_PCS_DIV] should always be 5.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t pll_pcs_div : 9; /**< [ 8: 0](R/W/H) The divider that generates PCS_MAC_TX_CLK. The frequency of the clock is (pll_frequency /
+ PLL_PCS_DIV).
+
+ Recommended settings:
+
+ \<pre\>
+ PCIE Other
+ _ 1.25G: NS 0x28
+ _ 2.5G: 0x5 0x5
+ _ 3.125G: NS 0x14
+ _ 5.0G: 0x5 0xA
+ _ 6.25G: NS 0xA
+ _ 8.0G: 0x8 0xA
+ _ 10.3125G: NS 0xA
+ \</pre\>
+
+ For SATA, [PLL_PCS_DIV] should always be 5.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_rloop : 3; /**< [ 11: 9](R/W/H) Loop resistor tuning.
+
+ Recommended settings:
+
+ \<pre\>
+ _ 1.25G: 0x3
+ _ 2.5G: 0x3
+ _ 3.125G: 0x3
+ _ 5.0G: 0x3
+ _ 6.25G: 0x3
+ _ 8.0G: 0x5
+ _ 10.3125G: 0x5
+ \</pre\>
+
+ For SATA with 100 MHz reference clock, [PLL_RLOOP] should always be 3. */
+ uint64_t pll_icp : 4; /**< [ 15: 12](R/W/H) PLL charge pump enable.
+
+ Recommended settings, which are based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x1 0x1 0x1
+ 2.5G: 0x4 0x3 0x3
+ 3.125G: NS 0x1 0x1
+ 5.0G: 0x1 0x1 0x1
+ 6.25G: NS 0x1 0x1
+ 8.0G: 0x3 0x2 NS
+ 10.3125G: NS NS 0x1
+ \</pre\>
+
+ For SATA, [PLL_ICP] should always be 1.
+ For PCIE 1.1 @100MHz, [PLL_ICP] should be 4.
+ For PCIE 2.1 @100MHz, [PLL_ICP] should be 4.
+ For PCIE 1.1 @125MHz, [PLL_ICP] should be 3.
+ For PCIE 2.1 @125MHz, [PLL_ICP] should be 3.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_pll_px_mode_0 bdk_gserx_pll_px_mode_0_t;
+
+static inline uint64_t BDK_GSERX_PLL_PX_MODE_0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_PLL_PX_MODE_0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=11)))
+ return 0x87e0904e0030ll + 0x1000000ll * ((a) & 0x3) + 0x20ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=11)))
+ return 0x87e0904e0030ll + 0x1000000ll * ((a) & 0x7) + 0x20ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=11)))
+ return 0x87e0904e0030ll + 0x1000000ll * ((a) & 0xf) + 0x20ll * ((b) & 0xf);
+ __bdk_csr_fatal("GSERX_PLL_PX_MODE_0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_PLL_PX_MODE_0(a,b) bdk_gserx_pll_px_mode_0_t
+#define bustype_BDK_GSERX_PLL_PX_MODE_0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_PLL_PX_MODE_0(a,b) "GSERX_PLL_PX_MODE_0"
+#define device_bar_BDK_GSERX_PLL_PX_MODE_0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_PLL_PX_MODE_0(a,b) (a)
+#define arguments_BDK_GSERX_PLL_PX_MODE_0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_pll_p#_mode_1
+ *
+ * GSER PLL Protocol Mode 1 Register
+ * These are the RAW PCS PLL global settings mode 1 registers. There is one register per GSER per
+ * GSER_LMODE_E value (0..11). Only one entry is used at any given time in a given GSER - the one
+ * selected by the corresponding GSER()_LANE_MODE[LMODE].
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in this register do not change during subsequent chip warm or
+ * soft resets.
+ */
+union bdk_gserx_pll_px_mode_1
+{
+ uint64_t u;
+ struct bdk_gserx_pll_px_mode_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t pll_16p5en : 1; /**< [ 13: 13](R/W/H) Enable for the DIV 16.5 divided down clock.
+
+ Recommended settings, based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x1 0x1 0x1
+ 2.5G: 0x0 0x0 0x0
+ 3.125G: NS 0x1 0x1
+ 5.0G: 0x0 0x0 0x0
+ 6.25G: NS 0x0 0x0
+ 8.0G: 0x0 0x0 NS
+ 10.3125G: NS NS 0x1
+ \</pre\>
+
+ For SATA, [PLL_16P5EN] should always be 0.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_cpadj : 2; /**< [ 12: 11](R/W/H) PLL charge adjust.
+
+ Recommended settings, based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x2 0x2 0x3
+ 2.5G: 0x2 0x1 0x2
+ 3.125G: NS 0x2 0x2
+ 5.0G: 0x2 0x2 0x2
+ 6.25G: NS 0x2 0x2
+ 8.0G: 0x2 0x1 NS
+ 10.3125G: NS NS 0x2
+ \</pre\>
+
+ For SATA with 100 MHz reference clock, [PLL_CPADJ] should always be 2.
+ For PCIE 1.1 @100MHz, [PLL_CPADJ] should be 2.
+ For PCIE 2.1 @100MHz, [PLL_CPADJ] should be 2.
+ For PCIE 1.1 @125MHz, [PLL_CPADJ] should be 1.
+ For PCIE 2.1 @125MHz, [PLL_CPADJ] should be 1.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_pcie3en : 1; /**< [ 10: 10](R/W/H) Enable PCIE3 mode.
+
+ Recommended settings:
+ 0 = Any rate other than 8 Gbaud.
+ 1 = Rate is equal to 8 Gbaud.
+
+ For SATA, [PLL_PCIE3EN] should always be 0. */
+ uint64_t pll_opr : 1; /**< [ 9: 9](R/W/H) PLL op range:
+ 0 = Use ring oscillator VCO. Recommended for rates 6.25 Gbaud and lower.
+ 1 = Use LC-tank VCO. Recommended for rates 8 Gbaud and higher.
+
+ For SATA, [PLL_OPR] should always be 0. */
+ uint64_t pll_div : 9; /**< [ 8: 0](R/W/H) PLL divider in feedback path which sets the PLL frequency.
+
+ Recommended settings:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x19 0x14 0x10
+ 2.5G: 0x19 0x14 0x10
+ 3.125G: NS 0x19 0x14
+ 5.0G: 0x19 0x14 0x10
+ 6.25G: NS 0x19 0x14
+ 8.0G: 0x28 0x20 NS
+ 10.3125G: NS NS 0x21
+ \</pre\>
+
+ For SATA with 100MHz reference clock, [PLL_DIV] should always be 0x1E.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t pll_div : 9; /**< [ 8: 0](R/W/H) PLL divider in feedback path which sets the PLL frequency.
+
+ Recommended settings:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x19 0x14 0x10
+ 2.5G: 0x19 0x14 0x10
+ 3.125G: NS 0x19 0x14
+ 5.0G: 0x19 0x14 0x10
+ 6.25G: NS 0x19 0x14
+ 8.0G: 0x28 0x20 NS
+ 10.3125G: NS NS 0x21
+ \</pre\>
+
+ For SATA with 100MHz reference clock, [PLL_DIV] should always be 0x1E.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_opr : 1; /**< [ 9: 9](R/W/H) PLL op range:
+ 0 = Use ring oscillator VCO. Recommended for rates 6.25 Gbaud and lower.
+ 1 = Use LC-tank VCO. Recommended for rates 8 Gbaud and higher.
+
+ For SATA, [PLL_OPR] should always be 0. */
+ uint64_t pll_pcie3en : 1; /**< [ 10: 10](R/W/H) Enable PCIE3 mode.
+
+ Recommended settings:
+ 0 = Any rate other than 8 Gbaud.
+ 1 = Rate is equal to 8 Gbaud.
+
+ For SATA, [PLL_PCIE3EN] should always be 0. */
+ uint64_t pll_cpadj : 2; /**< [ 12: 11](R/W/H) PLL charge adjust.
+
+ Recommended settings, based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x2 0x2 0x3
+ 2.5G: 0x2 0x1 0x2
+ 3.125G: NS 0x2 0x2
+ 5.0G: 0x2 0x2 0x2
+ 6.25G: NS 0x2 0x2
+ 8.0G: 0x2 0x1 NS
+ 10.3125G: NS NS 0x2
+ \</pre\>
+
+ For SATA with 100 MHz reference clock, [PLL_CPADJ] should always be 2.
+ For PCIE 1.1 @100MHz, [PLL_CPADJ] should be 2.
+ For PCIE 2.1 @100MHz, [PLL_CPADJ] should be 2.
+ For PCIE 1.1 @125MHz, [PLL_CPADJ] should be 1.
+ For PCIE 2.1 @125MHz, [PLL_CPADJ] should be 1.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_16p5en : 1; /**< [ 13: 13](R/W/H) Enable for the DIV 16.5 divided down clock.
+
+ Recommended settings, based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x1 0x1 0x1
+ 2.5G: 0x0 0x0 0x0
+ 3.125G: NS 0x1 0x1
+ 5.0G: 0x0 0x0 0x0
+ 6.25G: NS 0x0 0x0
+ 8.0G: 0x0 0x0 NS
+ 10.3125G: NS NS 0x1
+ \</pre\>
+
+ For SATA, [PLL_16P5EN] should always be 0.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_pll_px_mode_1_s cn81xx; */
+ /* struct bdk_gserx_pll_px_mode_1_s cn88xx; */
+ struct bdk_gserx_pll_px_mode_1_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t pll_16p5en : 1; /**< [ 13: 13](R/W/H) Enable for the DIV 16.5 divided down clock.
+
+ Recommended settings, based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x1 0x1 0x1
+ 2.5G: 0x0 0x0 0x0
+ 3.125G: NS 0x1 0x1
+ 5.0G: 0x0 0x0 0x0
+ 6.25G: NS 0x0 0x0
+ 8.0G: 0x0 0x0 NS
+ 10.3125G: NS NS 0x1
+ \</pre\>
+
+ For SATA, [PLL_16P5EN] should always be 0.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_cpadj : 2; /**< [ 12: 11](R/W/H) PLL charge adjust.
+
+ Recommended settings, based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x2 0x2 0x3
+ 2.5G: 0x2 0x1 0x2
+ 3.125G: NS 0x2 0x2
+ 5.0G: 0x2 0x2 0x2
+ 6.25G: NS 0x2 0x2
+ 8.0G: 0x2 0x1 NS
+ 10.3125G: NS NS 0x2
+ \</pre\>
+
+ For SATA with 100 MHz reference clock, [PLL_CPADJ] should always be 2.
+ For PCIE 1.1 @100MHz, [PLL_CPADJ] should be 2.
+ For PCIE 2.1 @100MHz, [PLL_CPADJ] should be 2.
+ For PCIE 1.1 @125MHz, [PLL_CPADJ] should be 1.
+ For PCIE 2.1 @125MHz, [PLL_CPADJ] should be 1.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_pcie3en : 1; /**< [ 10: 10](R/W/H) Enable PCIE3 mode.
+
+ Recommended settings:
+ 0 = Any rate other than 8 Gbaud.
+ 1 = Rate is equal to 8 Gbaud.
+
+ For SATA, [PLL_PCIE3EN] should always be 0. */
+ uint64_t pll_opr : 1; /**< [ 9: 9](R/W/H) PLL op range:
+ 0 = Use ring oscillator VCO. Recommended for rates 6.25 Gbaud and lower.
+ 1 = Use LC-tank VCO. Recommended for rates 8 Gbaud and higher.
+
+ For SATA, [PLL_OPR] should always be 0. */
+ uint64_t pll_div : 9; /**< [ 8: 0](R/W/H) PLL divider in feedback path which sets the PLL frequency.
+
+ Recommended settings:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x19 0x14 0x10
+ 2.5G: 0x19 0x14 0x10
+ 3.125G: NS 0x19 0x14
+ 5.0G: 0x19 0x14 0x10
+ 6.25G: NS 0x19 0x14
+ 8.0G: 0x28 0x20 NS
+ 10.3125G: NS NS 0x21
+ \</pre\>
+
+ For SATA with 100 MHz reference clock, [PLL_DIV] should always be 0x1E.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t pll_div : 9; /**< [ 8: 0](R/W/H) PLL divider in feedback path which sets the PLL frequency.
+
+ Recommended settings:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x19 0x14 0x10
+ 2.5G: 0x19 0x14 0x10
+ 3.125G: NS 0x19 0x14
+ 5.0G: 0x19 0x14 0x10
+ 6.25G: NS 0x19 0x14
+ 8.0G: 0x28 0x20 NS
+ 10.3125G: NS NS 0x21
+ \</pre\>
+
+ For SATA with 100 MHz reference clock, [PLL_DIV] should always be 0x1E.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_opr : 1; /**< [ 9: 9](R/W/H) PLL op range:
+ 0 = Use ring oscillator VCO. Recommended for rates 6.25 Gbaud and lower.
+ 1 = Use LC-tank VCO. Recommended for rates 8 Gbaud and higher.
+
+ For SATA, [PLL_OPR] should always be 0. */
+ uint64_t pll_pcie3en : 1; /**< [ 10: 10](R/W/H) Enable PCIE3 mode.
+
+ Recommended settings:
+ 0 = Any rate other than 8 Gbaud.
+ 1 = Rate is equal to 8 Gbaud.
+
+ For SATA, [PLL_PCIE3EN] should always be 0. */
+ uint64_t pll_cpadj : 2; /**< [ 12: 11](R/W/H) PLL charge adjust.
+
+ Recommended settings, based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x2 0x2 0x3
+ 2.5G: 0x2 0x1 0x2
+ 3.125G: NS 0x2 0x2
+ 5.0G: 0x2 0x2 0x2
+ 6.25G: NS 0x2 0x2
+ 8.0G: 0x2 0x1 NS
+ 10.3125G: NS NS 0x2
+ \</pre\>
+
+ For SATA with 100 MHz reference clock, [PLL_CPADJ] should always be 2.
+ For PCIE 1.1 @100MHz, [PLL_CPADJ] should be 2.
+ For PCIE 2.1 @100MHz, [PLL_CPADJ] should be 2.
+ For PCIE 1.1 @125MHz, [PLL_CPADJ] should be 1.
+ For PCIE 2.1 @125MHz, [PLL_CPADJ] should be 1.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t pll_16p5en : 1; /**< [ 13: 13](R/W/H) Enable for the DIV 16.5 divided down clock.
+
+ Recommended settings, based on the reference clock speed:
+
+ \<pre\>
+ 100MHz 125MHz 156.25MHz
+ 1.25G: 0x1 0x1 0x1
+ 2.5G: 0x0 0x0 0x0
+ 3.125G: NS 0x1 0x1
+ 5.0G: 0x0 0x0 0x0
+ 6.25G: NS 0x0 0x0
+ 8.0G: 0x0 0x0 NS
+ 10.3125G: NS NS 0x1
+ \</pre\>
+
+ For SATA, [PLL_16P5EN] should always be 0.
+
+ A 'NS' indicates that the rate is not supported at the specified reference clock. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_pll_px_mode_1 bdk_gserx_pll_px_mode_1_t;
+
+static inline uint64_t BDK_GSERX_PLL_PX_MODE_1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_PLL_PX_MODE_1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=11)))
+ return 0x87e0904e0038ll + 0x1000000ll * ((a) & 0x3) + 0x20ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=11)))
+ return 0x87e0904e0038ll + 0x1000000ll * ((a) & 0x7) + 0x20ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=11)))
+ return 0x87e0904e0038ll + 0x1000000ll * ((a) & 0xf) + 0x20ll * ((b) & 0xf);
+ __bdk_csr_fatal("GSERX_PLL_PX_MODE_1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_PLL_PX_MODE_1(a,b) bdk_gserx_pll_px_mode_1_t
+#define bustype_BDK_GSERX_PLL_PX_MODE_1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_PLL_PX_MODE_1(a,b) "GSERX_PLL_PX_MODE_1"
+#define device_bar_BDK_GSERX_PLL_PX_MODE_1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_PLL_PX_MODE_1(a,b) (a)
+#define arguments_BDK_GSERX_PLL_PX_MODE_1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_pll_stat
+ *
+ * GSER PLL Status Register
+ */
+union bdk_gserx_pll_stat
+{
+ uint64_t u;
+ struct bdk_gserx_pll_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t pll_lock : 1; /**< [ 0: 0](RO/H) When set, indicates that the PHY PLL is locked. */
+#else /* Word 0 - Little Endian */
+ uint64_t pll_lock : 1; /**< [ 0: 0](RO/H) When set, indicates that the PHY PLL is locked. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_pll_stat_s cn; */
+};
+typedef union bdk_gserx_pll_stat bdk_gserx_pll_stat_t;
+
+static inline uint64_t BDK_GSERX_PLL_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_PLL_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000010ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000010ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000010ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_PLL_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_PLL_STAT(a) bdk_gserx_pll_stat_t
+#define bustype_BDK_GSERX_PLL_STAT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_PLL_STAT(a) "GSERX_PLL_STAT"
+#define device_bar_BDK_GSERX_PLL_STAT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_PLL_STAT(a) (a)
+#define arguments_BDK_GSERX_PLL_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_qlm_stat
+ *
+ * GSER QLM Status Register
+ */
+union bdk_gserx_qlm_stat
+{
+ uint64_t u;
+ struct bdk_gserx_qlm_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t rst_rdy : 1; /**< [ 1: 1](RO/H) When asserted, the QLM is configured and the PLLs are stable. The GSER
+ is ready to accept TX traffic from the MAC. */
+ uint64_t dcok : 1; /**< [ 0: 0](RO) When asserted, there is a PLL reference clock indicating there is power to the QLM. */
+#else /* Word 0 - Little Endian */
+ uint64_t dcok : 1; /**< [ 0: 0](RO) When asserted, there is a PLL reference clock indicating there is power to the QLM. */
+ uint64_t rst_rdy : 1; /**< [ 1: 1](RO/H) When asserted, the QLM is configured and the PLLs are stable. The GSER
+ is ready to accept TX traffic from the MAC. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_qlm_stat_s cn; */
+};
+typedef union bdk_gserx_qlm_stat bdk_gserx_qlm_stat_t;
+
+static inline uint64_t BDK_GSERX_QLM_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_QLM_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0900000a0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0900000a0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0900000a0ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_QLM_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_QLM_STAT(a) bdk_gserx_qlm_stat_t
+#define bustype_BDK_GSERX_QLM_STAT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_QLM_STAT(a) "GSERX_QLM_STAT"
+#define device_bar_BDK_GSERX_QLM_STAT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_QLM_STAT(a) (a)
+#define arguments_BDK_GSERX_QLM_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rdet_time
+ *
+ * GSER Receiver Detect Wait Times Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_rdet_time
+{
+ uint64_t u;
+ struct bdk_gserx_rdet_time_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t rdet_time_3 : 4; /**< [ 15: 12](R/W) Determines the time allocated for disabling the RX detect
+ circuit, and returning to common mode. */
+ uint64_t rdet_time_2 : 4; /**< [ 11: 8](R/W) Determines the time allocated for the RX detect circuit to
+ detect a receiver. */
+ uint64_t rdet_time_1 : 8; /**< [ 7: 0](R/W) Determines the time allocated for enabling the RX detect circuit. */
+#else /* Word 0 - Little Endian */
+ uint64_t rdet_time_1 : 8; /**< [ 7: 0](R/W) Determines the time allocated for enabling the RX detect circuit. */
+ uint64_t rdet_time_2 : 4; /**< [ 11: 8](R/W) Determines the time allocated for the RX detect circuit to
+ detect a receiver. */
+ uint64_t rdet_time_3 : 4; /**< [ 15: 12](R/W) Determines the time allocated for disabling the RX detect
+ circuit, and returning to common mode. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rdet_time_s cn; */
+};
+typedef union bdk_gserx_rdet_time bdk_gserx_rdet_time_t;
+
+static inline uint64_t BDK_GSERX_RDET_TIME(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RDET_TIME(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904e0008ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904e0008ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904e0008ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RDET_TIME", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RDET_TIME(a) bdk_gserx_rdet_time_t
+#define bustype_BDK_GSERX_RDET_TIME(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RDET_TIME(a) "GSERX_RDET_TIME"
+#define device_bar_BDK_GSERX_RDET_TIME(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RDET_TIME(a) (a)
+#define arguments_BDK_GSERX_RDET_TIME(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_refclk_evt_cntr
+ *
+ * GSER QLM Reference Clock Event Counter Register
+ */
+union bdk_gserx_refclk_evt_cntr
+{
+ uint64_t u;
+ struct bdk_gserx_refclk_evt_cntr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t count : 32; /**< [ 31: 0](RO) This register can only be reliably read when GSER()_REFCLK_EVT_CTRL[ENB]
+ is clear.
+
+ When GSER()_REFCLK_EVT_CTRL[CLR] is set, [COUNT] goes to zero.
+
+ When GSER()_REFCLK_EVT_CTRL[ENB] is set, [COUNT] is incremented
+ in positive edges of the QLM reference clock.
+
+ When GSER()_REFCLK_EVT_CTRL[ENB] is not set, [COUNT] is held; this must
+ be used when [COUNT] is being read for reliable results. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 32; /**< [ 31: 0](RO) This register can only be reliably read when GSER()_REFCLK_EVT_CTRL[ENB]
+ is clear.
+
+ When GSER()_REFCLK_EVT_CTRL[CLR] is set, [COUNT] goes to zero.
+
+ When GSER()_REFCLK_EVT_CTRL[ENB] is set, [COUNT] is incremented
+ in positive edges of the QLM reference clock.
+
+ When GSER()_REFCLK_EVT_CTRL[ENB] is not set, [COUNT] is held; this must
+ be used when [COUNT] is being read for reliable results. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_refclk_evt_cntr_s cn; */
+};
+typedef union bdk_gserx_refclk_evt_cntr bdk_gserx_refclk_evt_cntr_t;
+
+static inline uint64_t BDK_GSERX_REFCLK_EVT_CNTR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_REFCLK_EVT_CNTR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000178ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000178ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000178ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_REFCLK_EVT_CNTR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_REFCLK_EVT_CNTR(a) bdk_gserx_refclk_evt_cntr_t
+#define bustype_BDK_GSERX_REFCLK_EVT_CNTR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_REFCLK_EVT_CNTR(a) "GSERX_REFCLK_EVT_CNTR"
+#define device_bar_BDK_GSERX_REFCLK_EVT_CNTR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_REFCLK_EVT_CNTR(a) (a)
+#define arguments_BDK_GSERX_REFCLK_EVT_CNTR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_refclk_evt_ctrl
+ *
+ * GSER QLM Reference Clock Event Counter Control Register
+ */
+union bdk_gserx_refclk_evt_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_refclk_evt_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t clr : 1; /**< [ 1: 1](R/W) When set, clears GSER()_REFCLK_EVT_CNTR[COUNT]. */
+ uint64_t enb : 1; /**< [ 0: 0](R/W) When set, enables the GSER()_REFCLK_EVT_CNTR[COUNT] to increment
+ on positive edges of the QLM reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t enb : 1; /**< [ 0: 0](R/W) When set, enables the GSER()_REFCLK_EVT_CNTR[COUNT] to increment
+ on positive edges of the QLM reference clock. */
+ uint64_t clr : 1; /**< [ 1: 1](R/W) When set, clears GSER()_REFCLK_EVT_CNTR[COUNT]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_refclk_evt_ctrl_s cn; */
+};
+typedef union bdk_gserx_refclk_evt_ctrl bdk_gserx_refclk_evt_ctrl_t;
+
+static inline uint64_t BDK_GSERX_REFCLK_EVT_CTRL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_REFCLK_EVT_CTRL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000170ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000170ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000170ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_REFCLK_EVT_CTRL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_REFCLK_EVT_CTRL(a) bdk_gserx_refclk_evt_ctrl_t
+#define bustype_BDK_GSERX_REFCLK_EVT_CTRL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_REFCLK_EVT_CTRL(a) "GSERX_REFCLK_EVT_CTRL"
+#define device_bar_BDK_GSERX_REFCLK_EVT_CTRL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_REFCLK_EVT_CTRL(a) (a)
+#define arguments_BDK_GSERX_REFCLK_EVT_CTRL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_refclk_sel
+ *
+ * GSER Reference Clock Select Register
+ * This register selects the reference clock.
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_refclk_sel
+{
+ uint64_t u;
+ struct bdk_gserx_refclk_sel_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t pcie_refclk125 : 1; /**< [ 2: 2](R/W/H) For bootable PCIe links, this is loaded with
+ PCIE0/2_REFCLK_125 at cold reset and indicates a 125 MHz reference clock when set. For
+ non-bootable PCIe links, this bit is set to zero at cold reset and indicates a 100 MHz
+ reference clock. It is not used for non-PCIe links. */
+ uint64_t com_clk_sel : 1; /**< [ 1: 1](R/W/H) When set, the reference clock is sourced from the external clock mux. For bootable PCIe
+ links, this bit is loaded with the PCIEn_COM0_CLK_EN pin at cold reset.
+
+ For CN80XX, this field must be set. */
+ uint64_t use_com1 : 1; /**< [ 0: 0](R/W) This bit controls the external mux select. When set, DLMC_REF_CLK1_N/P
+ are selected as the reference clock. When clear, DLMC_REF_CLK0_N/P are selected as the
+ reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t use_com1 : 1; /**< [ 0: 0](R/W) This bit controls the external mux select. When set, DLMC_REF_CLK1_N/P
+ are selected as the reference clock. When clear, DLMC_REF_CLK0_N/P are selected as the
+ reference clock. */
+ uint64_t com_clk_sel : 1; /**< [ 1: 1](R/W/H) When set, the reference clock is sourced from the external clock mux. For bootable PCIe
+ links, this bit is loaded with the PCIEn_COM0_CLK_EN pin at cold reset.
+
+ For CN80XX, this field must be set. */
+ uint64_t pcie_refclk125 : 1; /**< [ 2: 2](R/W/H) For bootable PCIe links, this is loaded with
+ PCIE0/2_REFCLK_125 at cold reset and indicates a 125 MHz reference clock when set. For
+ non-bootable PCIe links, this bit is set to zero at cold reset and indicates a 100 MHz
+ reference clock. It is not used for non-PCIe links. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_refclk_sel_s cn81xx; */
+ struct bdk_gserx_refclk_sel_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t pcie_refclk125 : 1; /**< [ 2: 2](R/W/H) For bootable PCIe links, this is loaded with
+ PCIE0/2_REFCLK_125 at cold reset and indicates a 125 MHz reference clock when set. For
+ non-bootable PCIe links, this bit is set to zero at cold reset and indicates a 100 MHz
+ reference clock. It is not used for non-PCIe links. */
+ uint64_t com_clk_sel : 1; /**< [ 1: 1](R/W/H) When set, the reference clock is sourced from the external clock mux. For bootable PCIe
+ links, this bit is loaded with the PCIEn_COM0_CLK_EN pin at cold reset. */
+ uint64_t use_com1 : 1; /**< [ 0: 0](R/W) For non-CCPI links, this bit controls the external mux select. When set, QLMC_REF_CLK1_N/P
+ are selected as the reference clock. When clear, QLMC_REF_CLK0_N/P are selected as the
+ reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t use_com1 : 1; /**< [ 0: 0](R/W) For non-CCPI links, this bit controls the external mux select. When set, QLMC_REF_CLK1_N/P
+ are selected as the reference clock. When clear, QLMC_REF_CLK0_N/P are selected as the
+ reference clock. */
+ uint64_t com_clk_sel : 1; /**< [ 1: 1](R/W/H) When set, the reference clock is sourced from the external clock mux. For bootable PCIe
+ links, this bit is loaded with the PCIEn_COM0_CLK_EN pin at cold reset. */
+ uint64_t pcie_refclk125 : 1; /**< [ 2: 2](R/W/H) For bootable PCIe links, this is loaded with
+ PCIE0/2_REFCLK_125 at cold reset and indicates a 125 MHz reference clock when set. For
+ non-bootable PCIe links, this bit is set to zero at cold reset and indicates a 100 MHz
+ reference clock. It is not used for non-PCIe links. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_refclk_sel_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t pcie_refclk125 : 1; /**< [ 2: 2](R/W/H) For bootable PCIe links, this is loaded with
+ PCIE0/2_REFCLK_125 at cold reset and indicates a 125 MHz reference clock when set. For
+ non-bootable PCIe links, this bit is set to zero at cold reset and indicates a 100 MHz
+ reference clock. It is not used for non-PCIe links. */
+ uint64_t com_clk_sel : 1; /**< [ 1: 1](R/W/H) When set, the reference clock is sourced from the external clock mux. For bootable PCIe
+ links, this bit is loaded with the PCIEn_COM0_CLK_EN pin at cold reset. */
+ uint64_t use_com1 : 1; /**< [ 0: 0](R/W) This bit controls the external mux select. When set, QLMC_REF_CLK1_N/P
+ are selected as the reference clock. When clear, QLMC_REF_CLK0_N/P are selected as the
+ reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t use_com1 : 1; /**< [ 0: 0](R/W) This bit controls the external mux select. When set, QLMC_REF_CLK1_N/P
+ are selected as the reference clock. When clear, QLMC_REF_CLK0_N/P are selected as the
+ reference clock. */
+ uint64_t com_clk_sel : 1; /**< [ 1: 1](R/W/H) When set, the reference clock is sourced from the external clock mux. For bootable PCIe
+ links, this bit is loaded with the PCIEn_COM0_CLK_EN pin at cold reset. */
+ uint64_t pcie_refclk125 : 1; /**< [ 2: 2](R/W/H) For bootable PCIe links, this is loaded with
+ PCIE0/2_REFCLK_125 at cold reset and indicates a 125 MHz reference clock when set. For
+ non-bootable PCIe links, this bit is set to zero at cold reset and indicates a 100 MHz
+ reference clock. It is not used for non-PCIe links. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_refclk_sel bdk_gserx_refclk_sel_t;
+
+static inline uint64_t BDK_GSERX_REFCLK_SEL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_REFCLK_SEL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000008ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000008ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000008ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_REFCLK_SEL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_REFCLK_SEL(a) bdk_gserx_refclk_sel_t
+#define bustype_BDK_GSERX_REFCLK_SEL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_REFCLK_SEL(a) "GSERX_REFCLK_SEL"
+#define device_bar_BDK_GSERX_REFCLK_SEL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_REFCLK_SEL(a) (a)
+#define arguments_BDK_GSERX_REFCLK_SEL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rx_coast
+ *
+ * GSER RX Coast Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_rx_coast
+{
+ uint64_t u;
+ struct bdk_gserx_rx_coast_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t coast : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode, control signals to freeze
+ the frequency of the per lane CDR in the PHY. The COAST signals are only valid in P0
+ state, come up asserted and are deasserted in hardware after detecting the electrical idle
+ exit (GSER()_RX_EIE_DETSTS[EIESTS]). Once the COAST signal deasserts, the CDR is
+ allowed to lock. In BGX mode, the BGX MAC can also control the COAST inputs to the PHY to
+ allow Auto-Negotiation for backplane Ethernet. For diagnostic use only.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t coast : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode, control signals to freeze
+ the frequency of the per lane CDR in the PHY. The COAST signals are only valid in P0
+ state, come up asserted and are deasserted in hardware after detecting the electrical idle
+ exit (GSER()_RX_EIE_DETSTS[EIESTS]). Once the COAST signal deasserts, the CDR is
+ allowed to lock. In BGX mode, the BGX MAC can also control the COAST inputs to the PHY to
+ allow Auto-Negotiation for backplane Ethernet. For diagnostic use only.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rx_coast_s cn81xx; */
+ struct bdk_gserx_rx_coast_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t coast : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode (including all CCPI links), control signals to
+ freeze
+ the frequency of the per lane CDR in the PHY. The COAST signals are only valid in P0
+ state, come up asserted and are deasserted in hardware after detecting the electrical idle
+ exit (GSER()_RX_EIE_DETSTS[EIESTS]). Once the COAST signal deasserts, the CDR is
+ allowed to lock. In BGX mode, the BGX MAC can also control the COAST inputs to the PHY to
+ allow Auto-Negotiation for backplane Ethernet. For diagnostic use only.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t coast : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode (including all CCPI links), control signals to
+ freeze
+ the frequency of the per lane CDR in the PHY. The COAST signals are only valid in P0
+ state, come up asserted and are deasserted in hardware after detecting the electrical idle
+ exit (GSER()_RX_EIE_DETSTS[EIESTS]). Once the COAST signal deasserts, the CDR is
+ allowed to lock. In BGX mode, the BGX MAC can also control the COAST inputs to the PHY to
+ allow Auto-Negotiation for backplane Ethernet. For diagnostic use only.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_rx_coast_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t coast : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode, control signals to freeze
+ the frequency of the per lane CDR in the PHY. The COAST signals are only valid in P0
+ state, come up asserted and are deasserted in hardware after detecting the electrical idle
+ exit (GSER()_RX_EIE_DETSTS[EIESTS]). Once the COAST signal deasserts, the CDR is
+ allowed to lock. In BGX mode, the BGX MAC can also control the COAST inputs to the PHY to
+ allow Auto-Negotiation for backplane Ethernet. For diagnostic use only.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t coast : 4; /**< [ 3: 0](R/W/H) For links that are not in PCIE or SATA mode, control signals to freeze
+ the frequency of the per lane CDR in the PHY. The COAST signals are only valid in P0
+ state, come up asserted and are deasserted in hardware after detecting the electrical idle
+ exit (GSER()_RX_EIE_DETSTS[EIESTS]). Once the COAST signal deasserts, the CDR is
+ allowed to lock. In BGX mode, the BGX MAC can also control the COAST inputs to the PHY to
+ allow Auto-Negotiation for backplane Ethernet. For diagnostic use only.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_rx_coast bdk_gserx_rx_coast_t;
+
+static inline uint64_t BDK_GSERX_RX_COAST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RX_COAST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000138ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000138ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000138ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RX_COAST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RX_COAST(a) bdk_gserx_rx_coast_t
+#define bustype_BDK_GSERX_RX_COAST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RX_COAST(a) "GSERX_RX_COAST"
+#define device_bar_BDK_GSERX_RX_COAST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RX_COAST(a) (a)
+#define arguments_BDK_GSERX_RX_COAST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rx_eie_deten
+ *
+ * GSER RX Electrical Idle Detect Enable Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_rx_eie_deten
+{
+ uint64_t u;
+ struct bdk_gserx_rx_eie_deten_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t eiede : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode, these bits enable per lane
+ electrical idle exit (EIE) detection. When EIE is detected,
+ GSER()_RX_EIE_DETSTS[EIELTCH] is asserted. [EIEDE] defaults to the enabled state. Once
+ EIE has been detected, [EIEDE] must be disabled, and then enabled again to perform another
+ EIE detection.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t eiede : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode, these bits enable per lane
+ electrical idle exit (EIE) detection. When EIE is detected,
+ GSER()_RX_EIE_DETSTS[EIELTCH] is asserted. [EIEDE] defaults to the enabled state. Once
+ EIE has been detected, [EIEDE] must be disabled, and then enabled again to perform another
+ EIE detection.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rx_eie_deten_s cn81xx; */
+ struct bdk_gserx_rx_eie_deten_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t eiede : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode (including all CCPI links), these bits enable
+ per lane
+ electrical idle exit (EIE) detection. When EIE is detected,
+ GSER()_RX_EIE_DETSTS[EIELTCH] is asserted. [EIEDE] defaults to the enabled state. Once
+ EIE has been detected, [EIEDE] must be disabled, and then enabled again to perform another
+ EIE detection.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t eiede : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode (including all CCPI links), these bits enable
+ per lane
+ electrical idle exit (EIE) detection. When EIE is detected,
+ GSER()_RX_EIE_DETSTS[EIELTCH] is asserted. [EIEDE] defaults to the enabled state. Once
+ EIE has been detected, [EIEDE] must be disabled, and then enabled again to perform another
+ EIE detection.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_rx_eie_deten_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t eiede : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode, these bits enable per lane
+ electrical idle exit (EIE) detection. When EIE is detected,
+ GSER()_RX_EIE_DETSTS[EIELTCH] is asserted. [EIEDE] defaults to the enabled state. Once
+ EIE has been detected, [EIEDE] must be disabled, and then enabled again to perform another
+ EIE detection.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t eiede : 4; /**< [ 3: 0](R/W) For links that are not in PCIE or SATA mode, these bits enable per lane
+ electrical idle exit (EIE) detection. When EIE is detected,
+ GSER()_RX_EIE_DETSTS[EIELTCH] is asserted. [EIEDE] defaults to the enabled state. Once
+ EIE has been detected, [EIEDE] must be disabled, and then enabled again to perform another
+ EIE detection.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_rx_eie_deten bdk_gserx_rx_eie_deten_t;
+
+static inline uint64_t BDK_GSERX_RX_EIE_DETEN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RX_EIE_DETEN(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000148ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000148ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000148ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RX_EIE_DETEN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RX_EIE_DETEN(a) bdk_gserx_rx_eie_deten_t
+#define bustype_BDK_GSERX_RX_EIE_DETEN(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RX_EIE_DETEN(a) "GSERX_RX_EIE_DETEN"
+#define device_bar_BDK_GSERX_RX_EIE_DETEN(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RX_EIE_DETEN(a) (a)
+#define arguments_BDK_GSERX_RX_EIE_DETEN(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rx_eie_detsts
+ *
+ * GSER RX Electrical Idle Detect Status Register
+ */
+union bdk_gserx_rx_eie_detsts
+{
+ uint64_t u;
+ struct bdk_gserx_rx_eie_detsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t cdrlock : 4; /**< [ 11: 8](RO/H) After an electrical idle exit condition (EIE) has been detected, the CDR needs 10000 UI to
+ lock. During this time, there may be RX bit errors. These bits will set when the CDR is
+ guaranteed to be locked. Note that link training can't start until the lane CDRLOCK is
+ set. Software can use CDRLOCK to determine when to expect error free RX data.
+ \<11\>: Lane 3. Reserved.
+ \<10\>: Lane 2. Reserved.
+ \<9\>: Lane 1.
+ \<8\>: Lane 0. */
+ uint64_t eiests : 4; /**< [ 7: 4](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. For higher
+ data rates, the received data needs to have sufficient low frequency content (for example,
+ idle symbols) for data transitions to be detected and for [EIESTS] to stay set
+ accordingly.
+ Under most conditions, [EIESTS]
+ will stay asserted until GSER()_RX_EIE_DETEN[EIEDE] is deasserted.
+ \<7\>: Lane 3. Reserved.
+ \<6\>: Lane 2. Reserved.
+ \<5\>: Lane 1.
+ \<4\>: Lane 0. */
+ uint64_t eieltch : 4; /**< [ 3: 0](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. Once an
+ EIE condition has been detected, the per-lane [EIELTCH] will stay set until
+ GSER()_RX_EIE_DETEN[EIEDE] is deasserted. Note that there may be RX bit errors until
+ CDRLOCK
+ is set.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t eieltch : 4; /**< [ 3: 0](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. Once an
+ EIE condition has been detected, the per-lane [EIELTCH] will stay set until
+ GSER()_RX_EIE_DETEN[EIEDE] is deasserted. Note that there may be RX bit errors until
+ CDRLOCK
+ is set.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t eiests : 4; /**< [ 7: 4](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. For higher
+ data rates, the received data needs to have sufficient low frequency content (for example,
+ idle symbols) for data transitions to be detected and for [EIESTS] to stay set
+ accordingly.
+ Under most conditions, [EIESTS]
+ will stay asserted until GSER()_RX_EIE_DETEN[EIEDE] is deasserted.
+ \<7\>: Lane 3. Reserved.
+ \<6\>: Lane 2. Reserved.
+ \<5\>: Lane 1.
+ \<4\>: Lane 0. */
+ uint64_t cdrlock : 4; /**< [ 11: 8](RO/H) After an electrical idle exit condition (EIE) has been detected, the CDR needs 10000 UI to
+ lock. During this time, there may be RX bit errors. These bits will set when the CDR is
+ guaranteed to be locked. Note that link training can't start until the lane CDRLOCK is
+ set. Software can use CDRLOCK to determine when to expect error free RX data.
+ \<11\>: Lane 3. Reserved.
+ \<10\>: Lane 2. Reserved.
+ \<9\>: Lane 1.
+ \<8\>: Lane 0. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rx_eie_detsts_s cn81xx; */
+ struct bdk_gserx_rx_eie_detsts_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t cdrlock : 4; /**< [ 11: 8](RO/H) After an electrical idle exit condition (EIE) has been detected, the CDR needs 10000 UI to
+ lock. During this time, there may be RX bit errors. These bits will set when the CDR is
+ guaranteed to be locked. Note that link training can't start until the lane CDRLOCK is
+ set. Software can use CDRLOCK to determine when to expect error free RX data.
+ \<11\>: Lane 3.
+ \<10\>: Lane 2.
+ \<9\>: Lane 1.
+ \<8\>: Lane 0. */
+ uint64_t eiests : 4; /**< [ 7: 4](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. For higher
+ data rates, the received data needs to have sufficient low frequency content (for example,
+ idle symbols) for data transitions to be detected and for [EIESTS] to stay set
+ accordingly.
+ Under most conditions, [EIESTS]
+ will stay asserted until GSER()_RX_EIE_DETEN[EIEDE] is deasserted.
+ \<7\>: Lane 3.
+ \<6\>: Lane 2.
+ \<5\>: Lane 1.
+ \<4\>: Lane 0. */
+ uint64_t eieltch : 4; /**< [ 3: 0](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. Once an
+ EIE condition has been detected, the per-lane [EIELTCH] will stay set until
+ GSER()_RX_EIE_DETEN[EIEDE] is deasserted. Note that there may be RX bit errors until
+ CDRLOCK
+ is set.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t eieltch : 4; /**< [ 3: 0](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. Once an
+ EIE condition has been detected, the per-lane [EIELTCH] will stay set until
+ GSER()_RX_EIE_DETEN[EIEDE] is deasserted. Note that there may be RX bit errors until
+ CDRLOCK
+ is set.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t eiests : 4; /**< [ 7: 4](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. For higher
+ data rates, the received data needs to have sufficient low frequency content (for example,
+ idle symbols) for data transitions to be detected and for [EIESTS] to stay set
+ accordingly.
+ Under most conditions, [EIESTS]
+ will stay asserted until GSER()_RX_EIE_DETEN[EIEDE] is deasserted.
+ \<7\>: Lane 3.
+ \<6\>: Lane 2.
+ \<5\>: Lane 1.
+ \<4\>: Lane 0. */
+ uint64_t cdrlock : 4; /**< [ 11: 8](RO/H) After an electrical idle exit condition (EIE) has been detected, the CDR needs 10000 UI to
+ lock. During this time, there may be RX bit errors. These bits will set when the CDR is
+ guaranteed to be locked. Note that link training can't start until the lane CDRLOCK is
+ set. Software can use CDRLOCK to determine when to expect error free RX data.
+ \<11\>: Lane 3.
+ \<10\>: Lane 2.
+ \<9\>: Lane 1.
+ \<8\>: Lane 0. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_rx_eie_detsts_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t cdrlock : 4; /**< [ 11: 8](RO/H) After an electrical idle exit condition (EIE) has been detected, the CDR needs 10000 UI to
+ lock. During this time, there may be RX bit errors. These bits will set when the CDR is
+ guaranteed to be locked. Note that link training can't start until the lane CDRLOCK is
+ set. Software can use CDRLOCK to determine when to expect error free RX data.
+ \<11\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<10\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<9\>: Lane 1.
+ \<8\>: Lane 0. */
+ uint64_t eiests : 4; /**< [ 7: 4](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. For higher
+ data rates, the received data needs to have sufficient low frequency content (for example,
+ idle symbols) for data transitions to be detected and for [EIESTS] to stay set
+ accordingly.
+ Under most conditions, [EIESTS]
+ will stay asserted until GSER()_RX_EIE_DETEN[EIEDE] is deasserted.
+ \<7\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<6\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<5\>: Lane 1.
+ \<4\>: Lane 0. */
+ uint64_t eieltch : 4; /**< [ 3: 0](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. Once an
+ EIE condition has been detected, the per-lane [EIELTCH] will stay set until
+ GSER()_RX_EIE_DETEN[EIEDE] is deasserted. Note that there may be RX bit errors until
+ CDRLOCK
+ is set.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t eieltch : 4; /**< [ 3: 0](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. Once an
+ EIE condition has been detected, the per-lane [EIELTCH] will stay set until
+ GSER()_RX_EIE_DETEN[EIEDE] is deasserted. Note that there may be RX bit errors until
+ CDRLOCK
+ is set.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t eiests : 4; /**< [ 7: 4](RO/H) When electrical idle exit detection is enabled (GSER()_RX_EIE_DETEN[EIEDE] is
+ asserted), indicates that an electrical idle exit condition (EIE) was detected. For higher
+ data rates, the received data needs to have sufficient low frequency content (for example,
+ idle symbols) for data transitions to be detected and for [EIESTS] to stay set
+ accordingly.
+ Under most conditions, [EIESTS]
+ will stay asserted until GSER()_RX_EIE_DETEN[EIEDE] is deasserted.
+ \<7\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<6\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<5\>: Lane 1.
+ \<4\>: Lane 0. */
+ uint64_t cdrlock : 4; /**< [ 11: 8](RO/H) After an electrical idle exit condition (EIE) has been detected, the CDR needs 10000 UI to
+ lock. During this time, there may be RX bit errors. These bits will set when the CDR is
+ guaranteed to be locked. Note that link training can't start until the lane CDRLOCK is
+ set. Software can use CDRLOCK to determine when to expect error free RX data.
+ \<11\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<10\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<9\>: Lane 1.
+ \<8\>: Lane 0. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_rx_eie_detsts bdk_gserx_rx_eie_detsts_t;
+
+static inline uint64_t BDK_GSERX_RX_EIE_DETSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RX_EIE_DETSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000150ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000150ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000150ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RX_EIE_DETSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RX_EIE_DETSTS(a) bdk_gserx_rx_eie_detsts_t
+#define bustype_BDK_GSERX_RX_EIE_DETSTS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RX_EIE_DETSTS(a) "GSERX_RX_EIE_DETSTS"
+#define device_bar_BDK_GSERX_RX_EIE_DETSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RX_EIE_DETSTS(a) (a)
+#define arguments_BDK_GSERX_RX_EIE_DETSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rx_eie_filter
+ *
+ * GSER RX Electrical Idle Detect Filter Settings Register
+ */
+union bdk_gserx_rx_eie_filter
+{
+ uint64_t u;
+ struct bdk_gserx_rx_eie_filter_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t eii_filt : 16; /**< [ 15: 0](R/W) The GSER uses electrical idle inference to determine when a RX lane has reentered
+ electrical idle (EI). The PHY electrical idle exit detection supports a minimum pulse
+ width of 400 ps, therefore configurations that run faster than 2.5 G can indicate EI when
+ the serial lines are still driven. For rates faster than 2.5 G, it takes 16 K * 8 UI of
+ consecutive deasserted GSER()_RX_EIE_DETSTS[EIESTS] for the GSER to infer EI. In the
+ event of electrical idle inference, the following happens:
+ * GSER()_RX_EIE_DETSTS[CDRLOCK]\<lane\> is zeroed.
+ * GSER()_RX_EIE_DETSTS[EIELTCH]\<lane\> is zeroed.
+ * GSER()_RX_EIE_DETSTS[EIESTS]\<lane\> is zeroed.
+ * GSER()_RX_COAST[COAST]\<lane\> is asserted to prevent the CDR from trying to lock on
+ the incoming data stream.
+ * GSER()_RX_EIE_DETEN[EIEDE]\<lane\> deasserts for a short period of time, and then is
+ asserted to begin looking for the Electrical idle Exit condition.
+
+ Writing this register to a nonzero value causes the electrical idle inference to use the
+ [EII_FILT] count instead of the default settings. Each [EII_FILT] count represents 20 ns
+ of
+ incremental EI inference time.
+
+ It is not expected that software will need to use the Electrical Idle Inference logic. */
+#else /* Word 0 - Little Endian */
+ uint64_t eii_filt : 16; /**< [ 15: 0](R/W) The GSER uses electrical idle inference to determine when a RX lane has reentered
+ electrical idle (EI). The PHY electrical idle exit detection supports a minimum pulse
+ width of 400 ps, therefore configurations that run faster than 2.5 G can indicate EI when
+ the serial lines are still driven. For rates faster than 2.5 G, it takes 16 K * 8 UI of
+ consecutive deasserted GSER()_RX_EIE_DETSTS[EIESTS] for the GSER to infer EI. In the
+ event of electrical idle inference, the following happens:
+ * GSER()_RX_EIE_DETSTS[CDRLOCK]\<lane\> is zeroed.
+ * GSER()_RX_EIE_DETSTS[EIELTCH]\<lane\> is zeroed.
+ * GSER()_RX_EIE_DETSTS[EIESTS]\<lane\> is zeroed.
+ * GSER()_RX_COAST[COAST]\<lane\> is asserted to prevent the CDR from trying to lock on
+ the incoming data stream.
+ * GSER()_RX_EIE_DETEN[EIEDE]\<lane\> deasserts for a short period of time, and then is
+ asserted to begin looking for the Electrical idle Exit condition.
+
+ Writing this register to a nonzero value causes the electrical idle inference to use the
+ [EII_FILT] count instead of the default settings. Each [EII_FILT] count represents 20 ns
+ of
+ incremental EI inference time.
+
+ It is not expected that software will need to use the Electrical Idle Inference logic. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rx_eie_filter_s cn; */
+};
+typedef union bdk_gserx_rx_eie_filter bdk_gserx_rx_eie_filter_t;
+
+static inline uint64_t BDK_GSERX_RX_EIE_FILTER(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RX_EIE_FILTER(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000158ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000158ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000158ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RX_EIE_FILTER", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RX_EIE_FILTER(a) bdk_gserx_rx_eie_filter_t
+#define bustype_BDK_GSERX_RX_EIE_FILTER(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RX_EIE_FILTER(a) "GSERX_RX_EIE_FILTER"
+#define device_bar_BDK_GSERX_RX_EIE_FILTER(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RX_EIE_FILTER(a) (a)
+#define arguments_BDK_GSERX_RX_EIE_FILTER(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rx_polarity
+ *
+ * GSER RX Polarity Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_rx_polarity
+{
+ uint64_t u;
+ struct bdk_gserx_rx_polarity_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t rx_inv : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, control signal to invert
+ the polarity of received data. When asserted, the polarity of the received data is
+ inverted.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_inv : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, control signal to invert
+ the polarity of received data. When asserted, the polarity of the received data is
+ inverted.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rx_polarity_s cn81xx; */
+ struct bdk_gserx_rx_polarity_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t rx_inv : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode (including all CCPI links), control signal to invert
+ the polarity of received data. When asserted, the polarity of the received data is
+ inverted.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_inv : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode (including all CCPI links), control signal to invert
+ the polarity of received data. When asserted, the polarity of the received data is
+ inverted.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_rx_polarity_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t rx_inv : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, control signal to invert
+ the polarity of received data. When asserted, the polarity of the received data is
+ inverted.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_inv : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, control signal to invert
+ the polarity of received data. When asserted, the polarity of the received data is
+ inverted.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_rx_polarity bdk_gserx_rx_polarity_t;
+
+static inline uint64_t BDK_GSERX_RX_POLARITY(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RX_POLARITY(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000160ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000160ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000160ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RX_POLARITY", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RX_POLARITY(a) bdk_gserx_rx_polarity_t
+#define bustype_BDK_GSERX_RX_POLARITY(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RX_POLARITY(a) "GSERX_RX_POLARITY"
+#define device_bar_BDK_GSERX_RX_POLARITY(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RX_POLARITY(a) (a)
+#define arguments_BDK_GSERX_RX_POLARITY(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rx_pwr_ctrl_p1
+ *
+ * GSER RX Power Control P1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_rx_pwr_ctrl_p1
+{
+ uint64_t u;
+ struct bdk_gserx_rx_pwr_ctrl_p1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t p1_rx_resetn : 1; /**< [ 13: 13](R/W) Place the receiver in reset (active low). */
+ uint64_t pq_rx_allow_pll_pd : 1; /**< [ 12: 12](R/W) When asserted, permit PLL powerdown (PLL is powered
+ down if all other factors permit). */
+ uint64_t pq_rx_pcs_reset : 1; /**< [ 11: 11](R/W) When asserted, the RX power state machine puts the raw PCS RX logic
+ in reset state to save power. */
+ uint64_t p1_rx_agc_en : 1; /**< [ 10: 10](R/W) AGC enable. */
+ uint64_t p1_rx_dfe_en : 1; /**< [ 9: 9](R/W) DFE enable. */
+ uint64_t p1_rx_cdr_en : 1; /**< [ 8: 8](R/W) CDR enable. */
+ uint64_t p1_rx_cdr_coast : 1; /**< [ 7: 7](R/W) CDR coast; freezes the frequency of the CDR. */
+ uint64_t p1_rx_cdr_clr : 1; /**< [ 6: 6](R/W) CDR clear; clears the frequency of the CDR. */
+ uint64_t p1_rx_subblk_pd : 5; /**< [ 5: 1](R/W) RX sub-block powerdown controls to RX:
+ \<4\> = CTLE.
+ \<3\> = Reserved.
+ \<2\> = Lane DLL.
+ \<1\> = DFE/samplers.
+ \<0\> = Termination. */
+ uint64_t p1_rx_chpd : 1; /**< [ 0: 0](R/W) RX lane powerdown. */
+#else /* Word 0 - Little Endian */
+ uint64_t p1_rx_chpd : 1; /**< [ 0: 0](R/W) RX lane powerdown. */
+ uint64_t p1_rx_subblk_pd : 5; /**< [ 5: 1](R/W) RX sub-block powerdown controls to RX:
+ \<4\> = CTLE.
+ \<3\> = Reserved.
+ \<2\> = Lane DLL.
+ \<1\> = DFE/samplers.
+ \<0\> = Termination. */
+ uint64_t p1_rx_cdr_clr : 1; /**< [ 6: 6](R/W) CDR clear; clears the frequency of the CDR. */
+ uint64_t p1_rx_cdr_coast : 1; /**< [ 7: 7](R/W) CDR coast; freezes the frequency of the CDR. */
+ uint64_t p1_rx_cdr_en : 1; /**< [ 8: 8](R/W) CDR enable. */
+ uint64_t p1_rx_dfe_en : 1; /**< [ 9: 9](R/W) DFE enable. */
+ uint64_t p1_rx_agc_en : 1; /**< [ 10: 10](R/W) AGC enable. */
+ uint64_t pq_rx_pcs_reset : 1; /**< [ 11: 11](R/W) When asserted, the RX power state machine puts the raw PCS RX logic
+ in reset state to save power. */
+ uint64_t pq_rx_allow_pll_pd : 1; /**< [ 12: 12](R/W) When asserted, permit PLL powerdown (PLL is powered
+ down if all other factors permit). */
+ uint64_t p1_rx_resetn : 1; /**< [ 13: 13](R/W) Place the receiver in reset (active low). */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rx_pwr_ctrl_p1_s cn; */
+};
+typedef union bdk_gserx_rx_pwr_ctrl_p1 bdk_gserx_rx_pwr_ctrl_p1_t;
+
+static inline uint64_t BDK_GSERX_RX_PWR_CTRL_P1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RX_PWR_CTRL_P1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904600b0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904600b0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904600b0ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RX_PWR_CTRL_P1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RX_PWR_CTRL_P1(a) bdk_gserx_rx_pwr_ctrl_p1_t
+#define bustype_BDK_GSERX_RX_PWR_CTRL_P1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RX_PWR_CTRL_P1(a) "GSERX_RX_PWR_CTRL_P1"
+#define device_bar_BDK_GSERX_RX_PWR_CTRL_P1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RX_PWR_CTRL_P1(a) (a)
+#define arguments_BDK_GSERX_RX_PWR_CTRL_P1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rx_pwr_ctrl_p2
+ *
+ * GSER RX Power Controls in Power State P2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_rx_pwr_ctrl_p2
+{
+ uint64_t u;
+ struct bdk_gserx_rx_pwr_ctrl_p2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t p2_rx_resetn : 1; /**< [ 13: 13](R/W) Place the receiver in reset (active low). */
+ uint64_t p2_rx_allow_pll_pd : 1; /**< [ 12: 12](R/W) When asserted, it permits PLL powerdown (PLL is
+ powered down if all other factors permit). */
+ uint64_t p2_rx_pcs_reset : 1; /**< [ 11: 11](R/W) When asserted, the RX Power state machine puts the Raw PCS
+ RX logic in reset state to save power. */
+ uint64_t p2_rx_agc_en : 1; /**< [ 10: 10](R/W) AGC enable. */
+ uint64_t p2_rx_dfe_en : 1; /**< [ 9: 9](R/W) DFE enable. */
+ uint64_t p2_rx_cdr_en : 1; /**< [ 8: 8](R/W) CDR enable. */
+ uint64_t p2_rx_cdr_coast : 1; /**< [ 7: 7](R/W) CDR coast; freezes the frequency of the CDR. */
+ uint64_t p2_rx_cdr_clr : 1; /**< [ 6: 6](R/W) CDR clear; clears the frequency register in the CDR. */
+ uint64_t p2_rx_subblk_pd : 5; /**< [ 5: 1](R/W) RX sub-block powerdown to RX:
+ \<4\> = CTLE.
+ \<3\> = Reserved.
+ \<2\> = Lane DLL.
+ \<1\> = DFE/Samplers.
+ \<0\> = Termination.
+
+ Software needs to clear the termination bit in SATA mode
+ (i.e. when GSER()_CFG[SATA] is set). */
+ uint64_t p2_rx_chpd : 1; /**< [ 0: 0](R/W) RX lane power down. */
+#else /* Word 0 - Little Endian */
+ uint64_t p2_rx_chpd : 1; /**< [ 0: 0](R/W) RX lane power down. */
+ uint64_t p2_rx_subblk_pd : 5; /**< [ 5: 1](R/W) RX sub-block powerdown to RX:
+ \<4\> = CTLE.
+ \<3\> = Reserved.
+ \<2\> = Lane DLL.
+ \<1\> = DFE/Samplers.
+ \<0\> = Termination.
+
+ Software needs to clear the termination bit in SATA mode
+ (i.e. when GSER()_CFG[SATA] is set). */
+ uint64_t p2_rx_cdr_clr : 1; /**< [ 6: 6](R/W) CDR clear; clears the frequency register in the CDR. */
+ uint64_t p2_rx_cdr_coast : 1; /**< [ 7: 7](R/W) CDR coast; freezes the frequency of the CDR. */
+ uint64_t p2_rx_cdr_en : 1; /**< [ 8: 8](R/W) CDR enable. */
+ uint64_t p2_rx_dfe_en : 1; /**< [ 9: 9](R/W) DFE enable. */
+ uint64_t p2_rx_agc_en : 1; /**< [ 10: 10](R/W) AGC enable. */
+ uint64_t p2_rx_pcs_reset : 1; /**< [ 11: 11](R/W) When asserted, the RX Power state machine puts the Raw PCS
+ RX logic in reset state to save power. */
+ uint64_t p2_rx_allow_pll_pd : 1; /**< [ 12: 12](R/W) When asserted, it permits PLL powerdown (PLL is
+ powered down if all other factors permit). */
+ uint64_t p2_rx_resetn : 1; /**< [ 13: 13](R/W) Place the receiver in reset (active low). */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rx_pwr_ctrl_p2_s cn; */
+};
+typedef union bdk_gserx_rx_pwr_ctrl_p2 bdk_gserx_rx_pwr_ctrl_p2_t;
+
+static inline uint64_t BDK_GSERX_RX_PWR_CTRL_P2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RX_PWR_CTRL_P2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904600b8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904600b8ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904600b8ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RX_PWR_CTRL_P2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RX_PWR_CTRL_P2(a) bdk_gserx_rx_pwr_ctrl_p2_t
+#define bustype_BDK_GSERX_RX_PWR_CTRL_P2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RX_PWR_CTRL_P2(a) "GSERX_RX_PWR_CTRL_P2"
+#define device_bar_BDK_GSERX_RX_PWR_CTRL_P2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RX_PWR_CTRL_P2(a) (a)
+#define arguments_BDK_GSERX_RX_PWR_CTRL_P2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rx_txdir_ctrl_0
+ *
+ * GSER Far-end TX Direction Control 0 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_rx_txdir_ctrl_0
+{
+ uint64_t u;
+ struct bdk_gserx_rx_txdir_ctrl_0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t rx_boost_hi_thrs : 4; /**< [ 12: 9](R/W/H) The high threshold for RX boost.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir, is set to
+ increment if the local RX boost value from the VMA (after RX-EQ) is
+ higher than this value, and the local RX tap1 value is higher than its
+ high threshold GSER()_RX_TXDIR_CTRL_1[RX_TAP1_HI_THRS].
+ Note, that if GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is decrement. */
+ uint64_t rx_boost_lo_thrs : 4; /**< [ 8: 5](R/W/H) The low threshold for RX boost.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir, is set to
+ decrement if the local RX boost value from the VMA (after RX-EQ) is
+ lower than this value, and the local RX tap1 value is lower than its
+ low threshold GSER()_RX_TXDIR_CTRL_1[RX_TAP1_LO_THRS].
+ Note, that if GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is increment. */
+ uint64_t rx_boost_hi_val : 5; /**< [ 4: 0](R/W) The far-end TX POST direction output, pcs_mac_rx_txpost_dir,
+ is set to increment if the local RX boost value from the VMA (after RX-EQ)
+ equals RX_BOOST_HI_VAL.
+ Note, that if GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is decrement.
+ To disable the check against RX_BOOST_HI_VAL, assert RX_BOOST_HI_VAL[4]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_boost_hi_val : 5; /**< [ 4: 0](R/W) The far-end TX POST direction output, pcs_mac_rx_txpost_dir,
+ is set to increment if the local RX boost value from the VMA (after RX-EQ)
+ equals RX_BOOST_HI_VAL.
+ Note, that if GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is decrement.
+ To disable the check against RX_BOOST_HI_VAL, assert RX_BOOST_HI_VAL[4]. */
+ uint64_t rx_boost_lo_thrs : 4; /**< [ 8: 5](R/W/H) The low threshold for RX boost.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir, is set to
+ decrement if the local RX boost value from the VMA (after RX-EQ) is
+ lower than this value, and the local RX tap1 value is lower than its
+ low threshold GSER()_RX_TXDIR_CTRL_1[RX_TAP1_LO_THRS].
+ Note, that if GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is increment. */
+ uint64_t rx_boost_hi_thrs : 4; /**< [ 12: 9](R/W/H) The high threshold for RX boost.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir, is set to
+ increment if the local RX boost value from the VMA (after RX-EQ) is
+ higher than this value, and the local RX tap1 value is higher than its
+ high threshold GSER()_RX_TXDIR_CTRL_1[RX_TAP1_HI_THRS].
+ Note, that if GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is decrement. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rx_txdir_ctrl_0_s cn; */
+};
+typedef union bdk_gserx_rx_txdir_ctrl_0 bdk_gserx_rx_txdir_ctrl_0_t;
+
+static inline uint64_t BDK_GSERX_RX_TXDIR_CTRL_0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RX_TXDIR_CTRL_0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904600e8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904600e8ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904600e8ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RX_TXDIR_CTRL_0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RX_TXDIR_CTRL_0(a) bdk_gserx_rx_txdir_ctrl_0_t
+#define bustype_BDK_GSERX_RX_TXDIR_CTRL_0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RX_TXDIR_CTRL_0(a) "GSERX_RX_TXDIR_CTRL_0"
+#define device_bar_BDK_GSERX_RX_TXDIR_CTRL_0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RX_TXDIR_CTRL_0(a) (a)
+#define arguments_BDK_GSERX_RX_TXDIR_CTRL_0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rx_txdir_ctrl_1
+ *
+ * GSER Far-end TX Direction Control 1 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_rx_txdir_ctrl_1
+{
+ uint64_t u;
+ struct bdk_gserx_rx_txdir_ctrl_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t rx_precorr_chg_dir : 1; /**< [ 11: 11](R/W/H) When asserted, the default direction output for the far-end TX Pre is reversed. */
+ uint64_t rx_tap1_chg_dir : 1; /**< [ 10: 10](R/W/H) When asserted, the default direction output for the far-end TX Post is reversed. */
+ uint64_t rx_tap1_hi_thrs : 5; /**< [ 9: 5](R/W) The high threshold for the local RX Tap1 count.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir,
+ is set to increment if the local RX tap1 value from the VMA (after RX-EQ)
+ is higher than this value, and the local RX boost value is higher than
+ its high threshold GSER()_RX_TXDIR_CTRL_0[RX_BOOST_HI_THRS].
+ Note that if GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is decrement. */
+ uint64_t rx_tap1_lo_thrs : 5; /**< [ 4: 0](R/W) The low threshold for the local RX Tap1 count.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir,
+ is set to decrement if the local RX tap1 value from the VMA (after RX-EQ)
+ is lower than this value, and the local RX boost value is lower than
+ its low threshold GSER()_RX_TXDIR_CTRL_0[RX_BOOST_LO_THRS].
+ Note that if GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is increment. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_tap1_lo_thrs : 5; /**< [ 4: 0](R/W) The low threshold for the local RX Tap1 count.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir,
+ is set to decrement if the local RX tap1 value from the VMA (after RX-EQ)
+ is lower than this value, and the local RX boost value is lower than
+ its low threshold GSER()_RX_TXDIR_CTRL_0[RX_BOOST_LO_THRS].
+ Note that if GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is increment. */
+ uint64_t rx_tap1_hi_thrs : 5; /**< [ 9: 5](R/W) The high threshold for the local RX Tap1 count.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir,
+ is set to increment if the local RX tap1 value from the VMA (after RX-EQ)
+ is higher than this value, and the local RX boost value is higher than
+ its high threshold GSER()_RX_TXDIR_CTRL_0[RX_BOOST_HI_THRS].
+ Note that if GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is decrement. */
+ uint64_t rx_tap1_chg_dir : 1; /**< [ 10: 10](R/W/H) When asserted, the default direction output for the far-end TX Post is reversed. */
+ uint64_t rx_precorr_chg_dir : 1; /**< [ 11: 11](R/W/H) When asserted, the default direction output for the far-end TX Pre is reversed. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rx_txdir_ctrl_1_s cn81xx; */
+ struct bdk_gserx_rx_txdir_ctrl_1_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t rx_precorr_chg_dir : 1; /**< [ 11: 11](R/W/H) When asserted, the default direction output for the far-end TX Pre is reversed. */
+ uint64_t rx_tap1_chg_dir : 1; /**< [ 10: 10](R/W/H) When asserted, the default direction output for the far-end TX Post is reversed. */
+ uint64_t rx_tap1_hi_thrs : 5; /**< [ 9: 5](R/W) The high threshold for the local RX Tap1 count.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir,
+ is set to increment if the local RX tap1 value from the VMA (after RX-EQ)
+ is higher than this value, and the local RX boost value is higher than
+ its high threshold GSER()_RX_TXDIR_CTRL_0[RX_BOOST_HI_THRS].
+ If GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is decrement. */
+ uint64_t rx_tap1_lo_thrs : 5; /**< [ 4: 0](R/W) The low threshold for the local RX Tap1 count.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir,
+ is set to decrement if the local RX tap1 value from the VMA (after RX-EQ)
+ is lower than this value, and the local RX boost value is lower than
+ its low threshold GSER()_RX_TXDIR_CTRL_0[RX_BOOST_LO_THRS].
+ If GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is increment. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_tap1_lo_thrs : 5; /**< [ 4: 0](R/W) The low threshold for the local RX Tap1 count.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir,
+ is set to decrement if the local RX tap1 value from the VMA (after RX-EQ)
+ is lower than this value, and the local RX boost value is lower than
+ its low threshold GSER()_RX_TXDIR_CTRL_0[RX_BOOST_LO_THRS].
+ If GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is increment. */
+ uint64_t rx_tap1_hi_thrs : 5; /**< [ 9: 5](R/W) The high threshold for the local RX Tap1 count.
+ The far-end TX POST direction output, pcs_mac_rx_txpost_dir,
+ is set to increment if the local RX tap1 value from the VMA (after RX-EQ)
+ is higher than this value, and the local RX boost value is higher than
+ its high threshold GSER()_RX_TXDIR_CTRL_0[RX_BOOST_HI_THRS].
+ If GSER()_RX_TXDIR_CTRL_1[RX_TAP1_CHG_DIR]=1 then
+ the direction is decrement. */
+ uint64_t rx_tap1_chg_dir : 1; /**< [ 10: 10](R/W/H) When asserted, the default direction output for the far-end TX Post is reversed. */
+ uint64_t rx_precorr_chg_dir : 1; /**< [ 11: 11](R/W/H) When asserted, the default direction output for the far-end TX Pre is reversed. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gserx_rx_txdir_ctrl_1_s cn83xx; */
+};
+typedef union bdk_gserx_rx_txdir_ctrl_1 bdk_gserx_rx_txdir_ctrl_1_t;
+
+static inline uint64_t BDK_GSERX_RX_TXDIR_CTRL_1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RX_TXDIR_CTRL_1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904600f0ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904600f0ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904600f0ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RX_TXDIR_CTRL_1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RX_TXDIR_CTRL_1(a) bdk_gserx_rx_txdir_ctrl_1_t
+#define bustype_BDK_GSERX_RX_TXDIR_CTRL_1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RX_TXDIR_CTRL_1(a) "GSERX_RX_TXDIR_CTRL_1"
+#define device_bar_BDK_GSERX_RX_TXDIR_CTRL_1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RX_TXDIR_CTRL_1(a) (a)
+#define arguments_BDK_GSERX_RX_TXDIR_CTRL_1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_rx_txdir_ctrl_2
+ *
+ * GSER Far-end TX Direction Control 2 Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_rx_txdir_ctrl_2
+{
+ uint64_t u;
+ struct bdk_gserx_rx_txdir_ctrl_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t rx_precorr_hi_thrs : 8; /**< [ 15: 8](R/W/H) High threshold for RX precursor correlation count.
+ The far-end TX PRE direction output, pcs_mac_rx_txpre_dir, is set to
+ decrement if the local RX precursor correlation count from the VMA (after RX-EQ)
+ is lower than this value.
+ Note, that if GSER()_RX_TXDIR_CTRL_1[RX_PRECORR_CHG_DIR]=1 then
+ the direction is increment. */
+ uint64_t rx_precorr_lo_thrs : 8; /**< [ 7: 0](R/W/H) Low threshold for RX precursor correlation count.
+ The far-end TX PRE direction output, pcs_mac_rx_txpre_dir, is set to
+ increment if the local RX precursor correlation count from the VMA (after RX-EQ)
+ is lower than this value.
+ Note, that if GSER()_RX_TXDIR_CTRL_1[RX_PRECORR_CHG_DIR]=1 then
+ the direction is decrement. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_precorr_lo_thrs : 8; /**< [ 7: 0](R/W/H) Low threshold for RX precursor correlation count.
+ The far-end TX PRE direction output, pcs_mac_rx_txpre_dir, is set to
+ increment if the local RX precursor correlation count from the VMA (after RX-EQ)
+ is lower than this value.
+ Note, that if GSER()_RX_TXDIR_CTRL_1[RX_PRECORR_CHG_DIR]=1 then
+ the direction is decrement. */
+ uint64_t rx_precorr_hi_thrs : 8; /**< [ 15: 8](R/W/H) High threshold for RX precursor correlation count.
+ The far-end TX PRE direction output, pcs_mac_rx_txpre_dir, is set to
+ decrement if the local RX precursor correlation count from the VMA (after RX-EQ)
+ is lower than this value.
+ Note, that if GSER()_RX_TXDIR_CTRL_1[RX_PRECORR_CHG_DIR]=1 then
+ the direction is increment. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_rx_txdir_ctrl_2_s cn; */
+};
+typedef union bdk_gserx_rx_txdir_ctrl_2 bdk_gserx_rx_txdir_ctrl_2_t;
+
+static inline uint64_t BDK_GSERX_RX_TXDIR_CTRL_2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_RX_TXDIR_CTRL_2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e0904600f8ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e0904600f8ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e0904600f8ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_RX_TXDIR_CTRL_2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_RX_TXDIR_CTRL_2(a) bdk_gserx_rx_txdir_ctrl_2_t
+#define bustype_BDK_GSERX_RX_TXDIR_CTRL_2(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_RX_TXDIR_CTRL_2(a) "GSERX_RX_TXDIR_CTRL_2"
+#define device_bar_BDK_GSERX_RX_TXDIR_CTRL_2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_RX_TXDIR_CTRL_2(a) (a)
+#define arguments_BDK_GSERX_RX_TXDIR_CTRL_2(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_sata_lane#_tx_amp#
+ *
+ * GSER SATA Lane Transmit Amplitude Gen Register
+ * SATA lane TX launch amplitude at Gen 1, 2 and 3 speeds.
+ * * AMP(0) is for Gen1.
+ * * AMP(1) is for Gen2.
+ * * AMP(2) is for Gen3.
+ */
+union bdk_gserx_sata_lanex_tx_ampx
+{
+ uint64_t u;
+ struct bdk_gserx_sata_lanex_tx_ampx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t tx_amp : 7; /**< [ 6: 0](R/W) This status value sets the TX driver launch amplitude in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_amp : 7; /**< [ 6: 0](R/W) This status value sets the TX driver launch amplitude in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_sata_lanex_tx_ampx_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t tx_amp : 7; /**< [ 6: 0](R/W) This status value sets the TX driver launch amplitude in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance.
+ This register is used for SATA lanes only for GSER(3). */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_amp : 7; /**< [ 6: 0](R/W) This status value sets the TX driver launch amplitude in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gserx_sata_lanex_tx_ampx_s cn88xx; */
+ struct bdk_gserx_sata_lanex_tx_ampx_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t tx_amp : 7; /**< [ 6: 0](R/W) This status value sets the TX driver launch amplitude in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance.
+ This register is used for SATA lanes only GSER(4..6).
+
+ Only SATA lanes 0 and 1 are used. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_amp : 7; /**< [ 6: 0](R/W) This status value sets the TX driver launch amplitude in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance.
+ This register is used for SATA lanes only GSER(4..6).
+
+ Only SATA lanes 0 and 1 are used. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_sata_lanex_tx_ampx bdk_gserx_sata_lanex_tx_ampx_t;
+
+static inline uint64_t BDK_GSERX_SATA_LANEX_TX_AMPX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SATA_LANEX_TX_AMPX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1) && (c<=2)))
+ return 0x87e090000b00ll + 0x1000000ll * ((a) & 0x3) + 0x20ll * ((b) & 0x1) + 8ll * ((c) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3) && (c<=2)))
+ return 0x87e090000b00ll + 0x1000000ll * ((a) & 0x7) + 0x20ll * ((b) & 0x3) + 8ll * ((c) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3) && (c<=2)))
+ return 0x87e090000b00ll + 0x1000000ll * ((a) & 0xf) + 0x20ll * ((b) & 0x3) + 8ll * ((c) & 0x3);
+ __bdk_csr_fatal("GSERX_SATA_LANEX_TX_AMPX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_GSERX_SATA_LANEX_TX_AMPX(a,b,c) bdk_gserx_sata_lanex_tx_ampx_t
+#define bustype_BDK_GSERX_SATA_LANEX_TX_AMPX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SATA_LANEX_TX_AMPX(a,b,c) "GSERX_SATA_LANEX_TX_AMPX"
+#define device_bar_BDK_GSERX_SATA_LANEX_TX_AMPX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SATA_LANEX_TX_AMPX(a,b,c) (a)
+#define arguments_BDK_GSERX_SATA_LANEX_TX_AMPX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) gser#_sata_lane#_tx_preemph#
+ *
+ * GSER SATA Lane Transmit Preemphsis Gen Register
+ * SATA TX preemphasis at Gen 1, 2 and 3 speeds. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ * * PREEMPH(0) is for Gen1.
+ * * PREEMPH(1) is for Gen2.
+ * * PREEMPH(2) is for Gen3.
+ */
+union bdk_gserx_sata_lanex_tx_preemphx
+{
+ uint64_t u;
+ struct bdk_gserx_sata_lanex_tx_preemphx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t tx_preemph : 7; /**< [ 6: 0](R/W/H) This static value sets the TX driver deemphasis value in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_preemph : 7; /**< [ 6: 0](R/W/H) This static value sets the TX driver deemphasis value in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_sata_lanex_tx_preemphx_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t tx_preemph : 7; /**< [ 6: 0](R/W/H) This static value sets the TX driver deemphasis value in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance.
+ This register is used for SATA lanes only for GSER(3). */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_preemph : 7; /**< [ 6: 0](R/W/H) This static value sets the TX driver deemphasis value in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gserx_sata_lanex_tx_preemphx_s cn88xx; */
+ struct bdk_gserx_sata_lanex_tx_preemphx_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t tx_preemph : 7; /**< [ 6: 0](R/W/H) This static value sets the TX driver deemphasis value in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance.
+
+ This register is used for SATA lanes only GSER(4..6).
+ Only SATA lanes 0 and 1 are used. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_preemph : 7; /**< [ 6: 0](R/W/H) This static value sets the TX driver deemphasis value in the
+ case where the PHY is running at the Gen1, Gen2, and Gen3
+ rates. Used for tuning at the board level for RX eye compliance.
+
+ This register is used for SATA lanes only GSER(4..6).
+ Only SATA lanes 0 and 1 are used. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_sata_lanex_tx_preemphx bdk_gserx_sata_lanex_tx_preemphx_t;
+
+static inline uint64_t BDK_GSERX_SATA_LANEX_TX_PREEMPHX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SATA_LANEX_TX_PREEMPHX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1) && (c<=2)))
+ return 0x87e090000a00ll + 0x1000000ll * ((a) & 0x3) + 0x20ll * ((b) & 0x1) + 8ll * ((c) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=3) && (c<=2)))
+ return 0x87e090000a00ll + 0x1000000ll * ((a) & 0x7) + 0x20ll * ((b) & 0x3) + 8ll * ((c) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=3) && (c<=2)))
+ return 0x87e090000a00ll + 0x1000000ll * ((a) & 0xf) + 0x20ll * ((b) & 0x3) + 8ll * ((c) & 0x3);
+ __bdk_csr_fatal("GSERX_SATA_LANEX_TX_PREEMPHX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_GSERX_SATA_LANEX_TX_PREEMPHX(a,b,c) bdk_gserx_sata_lanex_tx_preemphx_t
+#define bustype_BDK_GSERX_SATA_LANEX_TX_PREEMPHX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SATA_LANEX_TX_PREEMPHX(a,b,c) "GSERX_SATA_LANEX_TX_PREEMPHX"
+#define device_bar_BDK_GSERX_SATA_LANEX_TX_PREEMPHX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SATA_LANEX_TX_PREEMPHX(a,b,c) (a)
+#define arguments_BDK_GSERX_SATA_LANEX_TX_PREEMPHX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) gser#_sata_lane_rst
+ *
+ * GSER SATA Lane Reset Register
+ * Lane Reset Control.
+ */
+union bdk_gserx_sata_lane_rst
+{
+ uint64_t u;
+ struct bdk_gserx_sata_lane_rst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t l3_rst : 1; /**< [ 3: 3](R/W) Independent reset for Lane 3. */
+ uint64_t l2_rst : 1; /**< [ 2: 2](R/W) Independent reset for Lane 2. */
+ uint64_t l1_rst : 1; /**< [ 1: 1](R/W) Independent reset for Lane 1. */
+ uint64_t l0_rst : 1; /**< [ 0: 0](R/W) Independent reset for Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0_rst : 1; /**< [ 0: 0](R/W) Independent reset for Lane 0. */
+ uint64_t l1_rst : 1; /**< [ 1: 1](R/W) Independent reset for Lane 1. */
+ uint64_t l2_rst : 1; /**< [ 2: 2](R/W) Independent reset for Lane 2. */
+ uint64_t l3_rst : 1; /**< [ 3: 3](R/W) Independent reset for Lane 3. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_sata_lane_rst_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t l3_rst : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t l2_rst : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t l1_rst : 1; /**< [ 1: 1](R/W) Independent reset for lane 1.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t l0_rst : 1; /**< [ 0: 0](R/W) Independent reset for lane 0.
+ This register is used for SATA lanes only for GSER(3). */
+#else /* Word 0 - Little Endian */
+ uint64_t l0_rst : 1; /**< [ 0: 0](R/W) Independent reset for lane 0.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t l1_rst : 1; /**< [ 1: 1](R/W) Independent reset for lane 1.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t l2_rst : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t l3_rst : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gserx_sata_lane_rst_s cn88xx; */
+ struct bdk_gserx_sata_lane_rst_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t l3_rst : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t l2_rst : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t l1_rst : 1; /**< [ 1: 1](R/W) Independent reset for lane 1.
+ This register is used for SATA lanes only for GSER(4..6). */
+ uint64_t l0_rst : 1; /**< [ 0: 0](R/W) Independent reset for lane 0.
+ This register is used for SATA lanes only for GSER(4..6). */
+#else /* Word 0 - Little Endian */
+ uint64_t l0_rst : 1; /**< [ 0: 0](R/W) Independent reset for lane 0.
+ This register is used for SATA lanes only for GSER(4..6). */
+ uint64_t l1_rst : 1; /**< [ 1: 1](R/W) Independent reset for lane 1.
+ This register is used for SATA lanes only for GSER(4..6). */
+ uint64_t l2_rst : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t l3_rst : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_sata_lane_rst bdk_gserx_sata_lane_rst_t;
+
+static inline uint64_t BDK_GSERX_SATA_LANE_RST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SATA_LANE_RST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000908ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000908ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000908ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_SATA_LANE_RST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SATA_LANE_RST(a) bdk_gserx_sata_lane_rst_t
+#define bustype_BDK_GSERX_SATA_LANE_RST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SATA_LANE_RST(a) "GSERX_SATA_LANE_RST"
+#define device_bar_BDK_GSERX_SATA_LANE_RST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SATA_LANE_RST(a) (a)
+#define arguments_BDK_GSERX_SATA_LANE_RST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_sata_status
+ *
+ * GSER SATA Status Register
+ * SATA PHY Ready Status.
+ */
+union bdk_gserx_sata_status
+{
+ uint64_t u;
+ struct bdk_gserx_sata_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t p3_rdy : 1; /**< [ 3: 3](RO/H) PHY Lane 3 is ready to send and receive data. */
+ uint64_t p2_rdy : 1; /**< [ 2: 2](RO/H) PHY Lane 2 is ready to send and receive data. */
+ uint64_t p1_rdy : 1; /**< [ 1: 1](RO/H) PHY Lane 1 is ready to send and receive data. */
+ uint64_t p0_rdy : 1; /**< [ 0: 0](RO/H) PHY Lane 0 is ready to send and receive data. */
+#else /* Word 0 - Little Endian */
+ uint64_t p0_rdy : 1; /**< [ 0: 0](RO/H) PHY Lane 0 is ready to send and receive data. */
+ uint64_t p1_rdy : 1; /**< [ 1: 1](RO/H) PHY Lane 1 is ready to send and receive data. */
+ uint64_t p2_rdy : 1; /**< [ 2: 2](RO/H) PHY Lane 2 is ready to send and receive data. */
+ uint64_t p3_rdy : 1; /**< [ 3: 3](RO/H) PHY Lane 3 is ready to send and receive data. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_sata_status_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t p3_rdy : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t p2_rdy : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t p1_rdy : 1; /**< [ 1: 1](RO/H) PHY lane 1 is ready to send and receive data.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t p0_rdy : 1; /**< [ 0: 0](RO/H) PHY lane 0 is ready to send and receive data.
+ This register is used for SATA lanes only for GSER(3). */
+#else /* Word 0 - Little Endian */
+ uint64_t p0_rdy : 1; /**< [ 0: 0](RO/H) PHY lane 0 is ready to send and receive data.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t p1_rdy : 1; /**< [ 1: 1](RO/H) PHY lane 1 is ready to send and receive data.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t p2_rdy : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t p3_rdy : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gserx_sata_status_s cn88xx; */
+ struct bdk_gserx_sata_status_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t p3_rdy : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t p2_rdy : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t p1_rdy : 1; /**< [ 1: 1](RO/H) PHY lane 1 is ready to send and receive data.
+ This register is used for SATA lanes only GSER(4..6). */
+ uint64_t p0_rdy : 1; /**< [ 0: 0](RO/H) PHY lane 0 is ready to send and receive data.
+ This register is used for SATA lanes only GSER(4..6). */
+#else /* Word 0 - Little Endian */
+ uint64_t p0_rdy : 1; /**< [ 0: 0](RO/H) PHY lane 0 is ready to send and receive data.
+ This register is used for SATA lanes only GSER(4..6). */
+ uint64_t p1_rdy : 1; /**< [ 1: 1](RO/H) PHY lane 1 is ready to send and receive data.
+ This register is used for SATA lanes only GSER(4..6). */
+ uint64_t p2_rdy : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t p3_rdy : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_sata_status bdk_gserx_sata_status_t;
+
+static inline uint64_t BDK_GSERX_SATA_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SATA_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090100900ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090100900ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090100900ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_SATA_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SATA_STATUS(a) bdk_gserx_sata_status_t
+#define bustype_BDK_GSERX_SATA_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SATA_STATUS(a) "GSERX_SATA_STATUS"
+#define device_bar_BDK_GSERX_SATA_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SATA_STATUS(a) (a)
+#define arguments_BDK_GSERX_SATA_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_sata_tx_invert
+ *
+ * GSER SATA TX Invert Register
+ * Lane Reset Control.
+ */
+union bdk_gserx_sata_tx_invert
+{
+ uint64_t u;
+ struct bdk_gserx_sata_tx_invert_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t l3_inv : 1; /**< [ 3: 3](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 3 transmitted data. */
+ uint64_t l2_inv : 1; /**< [ 2: 2](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 2 transmitted data. */
+ uint64_t l1_inv : 1; /**< [ 1: 1](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 1 transmitted data. */
+ uint64_t l0_inv : 1; /**< [ 0: 0](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 0 transmitted data. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0_inv : 1; /**< [ 0: 0](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 0 transmitted data. */
+ uint64_t l1_inv : 1; /**< [ 1: 1](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 1 transmitted data. */
+ uint64_t l2_inv : 1; /**< [ 2: 2](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 2 transmitted data. */
+ uint64_t l3_inv : 1; /**< [ 3: 3](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 3 transmitted data. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gserx_sata_tx_invert_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t l3_inv : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t l2_inv : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t l1_inv : 1; /**< [ 1: 1](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 1 transmitted data.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t l0_inv : 1; /**< [ 0: 0](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 0 transmitted data.
+ This register is used for SATA lanes only for GSER(3). */
+#else /* Word 0 - Little Endian */
+ uint64_t l0_inv : 1; /**< [ 0: 0](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 0 transmitted data.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t l1_inv : 1; /**< [ 1: 1](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 1 transmitted data.
+ This register is used for SATA lanes only for GSER(3). */
+ uint64_t l2_inv : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t l3_inv : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gserx_sata_tx_invert_s cn88xx; */
+ struct bdk_gserx_sata_tx_invert_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t l3_inv : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t l2_inv : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t l1_inv : 1; /**< [ 1: 1](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 1 transmitted data.
+ This register is used for SATA lanes only for GSER(4..6). */
+ uint64_t l0_inv : 1; /**< [ 0: 0](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 0 transmitted data.
+ This register is used for SATA lanes only for GSER(4..6). */
+#else /* Word 0 - Little Endian */
+ uint64_t l0_inv : 1; /**< [ 0: 0](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 0 transmitted data.
+ This register is used for SATA lanes only for GSER(4..6). */
+ uint64_t l1_inv : 1; /**< [ 1: 1](R/W) Instructs the SATA PCS to perform a polarity inversion on the
+ lane 1 transmitted data.
+ This register is used for SATA lanes only for GSER(4..6). */
+ uint64_t l2_inv : 1; /**< [ 2: 2](RO/H) Reserved. */
+ uint64_t l3_inv : 1; /**< [ 3: 3](RO/H) Reserved. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_sata_tx_invert bdk_gserx_sata_tx_invert_t;
+
+static inline uint64_t BDK_GSERX_SATA_TX_INVERT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SATA_TX_INVERT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000910ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000910ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000910ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_SATA_TX_INVERT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SATA_TX_INVERT(a) bdk_gserx_sata_tx_invert_t
+#define bustype_BDK_GSERX_SATA_TX_INVERT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SATA_TX_INVERT(a) "GSERX_SATA_TX_INVERT"
+#define device_bar_BDK_GSERX_SATA_TX_INVERT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SATA_TX_INVERT(a) (a)
+#define arguments_BDK_GSERX_SATA_TX_INVERT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_scratch
+ *
+ * GSER General Purpose Scratch Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_scratch
+{
+ uint64_t u;
+ struct bdk_gserx_scratch_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t scratch : 16; /**< [ 15: 0](R/W) General purpose scratch register. */
+#else /* Word 0 - Little Endian */
+ uint64_t scratch : 16; /**< [ 15: 0](R/W) General purpose scratch register. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_scratch_s cn; */
+};
+typedef union bdk_gserx_scratch bdk_gserx_scratch_t;
+
+static inline uint64_t BDK_GSERX_SCRATCH(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SCRATCH(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000020ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000020ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000020ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_SCRATCH", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SCRATCH(a) bdk_gserx_scratch_t
+#define bustype_BDK_GSERX_SCRATCH(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SCRATCH(a) "GSERX_SCRATCH"
+#define device_bar_BDK_GSERX_SCRATCH(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SCRATCH(a) (a)
+#define arguments_BDK_GSERX_SCRATCH(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_cei_6g_sr_mode
+ *
+ * GSER Slice CEI_6G_SR MODE Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_cei_6g_sr_mode
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_cei_6g_sr_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_cei_6g_sr_mode_s cn; */
+};
+typedef union bdk_gserx_slicex_cei_6g_sr_mode bdk_gserx_slicex_cei_6g_sr_mode_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_CEI_6G_SR_MODE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_CEI_6G_SR_MODE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460268ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460268ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460268ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_CEI_6G_SR_MODE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_CEI_6G_SR_MODE(a,b) bdk_gserx_slicex_cei_6g_sr_mode_t
+#define bustype_BDK_GSERX_SLICEX_CEI_6G_SR_MODE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_CEI_6G_SR_MODE(a,b) "GSERX_SLICEX_CEI_6G_SR_MODE"
+#define device_bar_BDK_GSERX_SLICEX_CEI_6G_SR_MODE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_CEI_6G_SR_MODE(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_CEI_6G_SR_MODE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_kr_mode
+ *
+ * GSER Slice KR MODE Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_kr_mode
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_kr_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_kr_mode_s cn; */
+};
+typedef union bdk_gserx_slicex_kr_mode bdk_gserx_slicex_kr_mode_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_KR_MODE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_KR_MODE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460250ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460250ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460250ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_KR_MODE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_KR_MODE(a,b) bdk_gserx_slicex_kr_mode_t
+#define bustype_BDK_GSERX_SLICEX_KR_MODE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_KR_MODE(a,b) "GSERX_SLICEX_KR_MODE"
+#define device_bar_BDK_GSERX_SLICEX_KR_MODE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_KR_MODE(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_KR_MODE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_kx4_mode
+ *
+ * GSER Slice KX4 MODE Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_kx4_mode
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_kx4_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_kx4_mode_s cn; */
+};
+typedef union bdk_gserx_slicex_kx4_mode bdk_gserx_slicex_kx4_mode_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_KX4_MODE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_KX4_MODE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460248ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460248ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460248ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_KX4_MODE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_KX4_MODE(a,b) bdk_gserx_slicex_kx4_mode_t
+#define bustype_BDK_GSERX_SLICEX_KX4_MODE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_KX4_MODE(a,b) "GSERX_SLICEX_KX4_MODE"
+#define device_bar_BDK_GSERX_SLICEX_KX4_MODE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_KX4_MODE(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_KX4_MODE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_kx_mode
+ *
+ * GSER Slice KX MODE Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_kx_mode
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_kx_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_kx_mode_s cn; */
+};
+typedef union bdk_gserx_slicex_kx_mode bdk_gserx_slicex_kx_mode_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_KX_MODE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_KX_MODE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460240ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460240ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460240ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_KX_MODE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_KX_MODE(a,b) bdk_gserx_slicex_kx_mode_t
+#define bustype_BDK_GSERX_SLICEX_KX_MODE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_KX_MODE(a,b) "GSERX_SLICEX_KX_MODE"
+#define device_bar_BDK_GSERX_SLICEX_KX_MODE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_KX_MODE(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_KX_MODE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_pcie1_mode
+ *
+ * GSER Slice PCIE1 MODE Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_pcie1_mode
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_pcie1_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved.
+
+ In SATA Mode program RX_SDLL_BWSEL = 0x1. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved.
+
+ In SATA Mode program RX_SDLL_BWSEL = 0x1. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_pcie1_mode_s cn81xx; */
+ struct bdk_gserx_slicex_pcie1_mode_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gserx_slicex_pcie1_mode_cn88xx cn83xx; */
+};
+typedef union bdk_gserx_slicex_pcie1_mode bdk_gserx_slicex_pcie1_mode_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_PCIE1_MODE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_PCIE1_MODE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460228ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460228ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460228ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_PCIE1_MODE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_PCIE1_MODE(a,b) bdk_gserx_slicex_pcie1_mode_t
+#define bustype_BDK_GSERX_SLICEX_PCIE1_MODE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_PCIE1_MODE(a,b) "GSERX_SLICEX_PCIE1_MODE"
+#define device_bar_BDK_GSERX_SLICEX_PCIE1_MODE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_PCIE1_MODE(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_PCIE1_MODE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_pcie2_mode
+ *
+ * GSER Slice PCIE2 MODE Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_pcie2_mode
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_pcie2_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved.
+
+ In SATA Mode program RX_SDLL_BWSEL = 0x1. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved.
+
+ In SATA Mode program RX_SDLL_BWSEL = 0x1. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_pcie2_mode_s cn81xx; */
+ /* struct bdk_gserx_slicex_pcie2_mode_s cn88xx; */
+ struct bdk_gserx_slicex_pcie2_mode_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_slicex_pcie2_mode bdk_gserx_slicex_pcie2_mode_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_PCIE2_MODE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_PCIE2_MODE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460230ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460230ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460230ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_PCIE2_MODE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_PCIE2_MODE(a,b) bdk_gserx_slicex_pcie2_mode_t
+#define bustype_BDK_GSERX_SLICEX_PCIE2_MODE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_PCIE2_MODE(a,b) "GSERX_SLICEX_PCIE2_MODE"
+#define device_bar_BDK_GSERX_SLICEX_PCIE2_MODE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_PCIE2_MODE(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_PCIE2_MODE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_pcie3_mode
+ *
+ * GSER Slice PCIE3 MODE Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_pcie3_mode
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_pcie3_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_pcie3_mode_s cn81xx; */
+ struct bdk_gserx_slicex_pcie3_mode_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved.
+
+ In SATA Mode program RX_SDLL_BWSEL = 0x1. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved.
+
+ In SATA Mode program RX_SDLL_BWSEL = 0x1. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz, or SATA mode.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_gserx_slicex_pcie3_mode_s cn83xx; */
+};
+typedef union bdk_gserx_slicex_pcie3_mode bdk_gserx_slicex_pcie3_mode_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_PCIE3_MODE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_PCIE3_MODE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460238ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460238ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460238ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_PCIE3_MODE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_PCIE3_MODE(a,b) bdk_gserx_slicex_pcie3_mode_t
+#define bustype_BDK_GSERX_SLICEX_PCIE3_MODE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_PCIE3_MODE(a,b) "GSERX_SLICEX_PCIE3_MODE"
+#define device_bar_BDK_GSERX_SLICEX_PCIE3_MODE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_PCIE3_MODE(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_PCIE3_MODE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_qsgmii_mode
+ *
+ * GSER Slice QSGMII MODE Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_qsgmii_mode
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_qsgmii_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_qsgmii_mode_s cn; */
+};
+typedef union bdk_gserx_slicex_qsgmii_mode bdk_gserx_slicex_qsgmii_mode_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_QSGMII_MODE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_QSGMII_MODE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460260ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460260ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460260ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_QSGMII_MODE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_QSGMII_MODE(a,b) bdk_gserx_slicex_qsgmii_mode_t
+#define bustype_BDK_GSERX_SLICEX_QSGMII_MODE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_QSGMII_MODE(a,b) "GSERX_SLICEX_QSGMII_MODE"
+#define device_bar_BDK_GSERX_SLICEX_QSGMII_MODE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_QSGMII_MODE(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_QSGMII_MODE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_rx_ldll_ctrl
+ *
+ * GSER Slice RX LDLL Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_rx_ldll_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_rx_ldll_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t pcs_sds_rx_ldll_tune : 3; /**< [ 7: 5](R/W/H) Tuning bits for the regulator and loop filter.
+ Bit 7 controls the initial value of the regulator output,
+ 0 for 0.9V and 1 for 0.925V.
+ Bits 6:5 are connected to the loop filter, to reduce
+ its corner frequency (for testing purposes).
+
+ This parameter is for debugging purposes and should not
+ be written in normal operation. */
+ uint64_t pcs_sds_rx_ldll_swsel : 4; /**< [ 4: 1](R/W/H) DMON control, selects which signal is passed to the output
+ of DMON:
+ 0x8 = vdda_int
+ 0x4 = pi clock (output of the PI)
+ 0x2 = dllout[1] (second output clock phase, out of 4 phases,
+ of the Lane DLL)
+ 0x1 = dllout[0] (first output clock phase, out of 4 phases,
+ of the Lane DLL). Ensure that
+ GSER()_SLICE_RX_SDLL_CTRL[PCS_SDS_RX_SDLL_SWSEL]=0x0 during
+ this test.
+
+ This parameter is for debugging purposes and should not
+ be written in normal operation. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t pcs_sds_rx_ldll_swsel : 4; /**< [ 4: 1](R/W/H) DMON control, selects which signal is passed to the output
+ of DMON:
+ 0x8 = vdda_int
+ 0x4 = pi clock (output of the PI)
+ 0x2 = dllout[1] (second output clock phase, out of 4 phases,
+ of the Lane DLL)
+ 0x1 = dllout[0] (first output clock phase, out of 4 phases,
+ of the Lane DLL). Ensure that
+ GSER()_SLICE_RX_SDLL_CTRL[PCS_SDS_RX_SDLL_SWSEL]=0x0 during
+ this test.
+
+ This parameter is for debugging purposes and should not
+ be written in normal operation. */
+ uint64_t pcs_sds_rx_ldll_tune : 3; /**< [ 7: 5](R/W/H) Tuning bits for the regulator and loop filter.
+ Bit 7 controls the initial value of the regulator output,
+ 0 for 0.9V and 1 for 0.925V.
+ Bits 6:5 are connected to the loop filter, to reduce
+ its corner frequency (for testing purposes).
+
+ This parameter is for debugging purposes and should not
+ be written in normal operation. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_rx_ldll_ctrl_s cn; */
+};
+typedef union bdk_gserx_slicex_rx_ldll_ctrl bdk_gserx_slicex_rx_ldll_ctrl_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_RX_LDLL_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_RX_LDLL_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460218ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460218ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460218ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_RX_LDLL_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_RX_LDLL_CTRL(a,b) bdk_gserx_slicex_rx_ldll_ctrl_t
+#define bustype_BDK_GSERX_SLICEX_RX_LDLL_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_RX_LDLL_CTRL(a,b) "GSERX_SLICEX_RX_LDLL_CTRL"
+#define device_bar_BDK_GSERX_SLICEX_RX_LDLL_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_RX_LDLL_CTRL(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_RX_LDLL_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_rx_sdll_ctrl
+ *
+ * GSER Slice RX SDLL Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_rx_sdll_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_rx_sdll_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t pcs_sds_oob_clk_ctrl : 2; /**< [ 15: 14](R/W/H) OOB clock oscillator output frequency selection:
+ 0x0 = 506 MHz (min) 682 MHz (typ) 782 MHz (max).
+ 0x1 = 439 MHz (min) 554 MHz (typ) 595 MHz (max).
+ 0x2 = 379 MHz (min) 453 MHz (typ) 482 MHz (max).
+ 0x3 = 303 MHz (min) 378 MHz (typ) 414 MHz (max).
+
+ This parameter is for debugging purposes and should not
+ be written in normal operation. */
+ uint64_t reserved_7_13 : 7;
+ uint64_t pcs_sds_rx_sdll_tune : 3; /**< [ 6: 4](R/W) Tuning bits for the regulator and the loop filter. */
+ uint64_t pcs_sds_rx_sdll_swsel : 4; /**< [ 3: 0](R/W) DMON control; selects which signal is passed to the output
+ of DMON.
+ 0x1 = dllout[0] (first output clock phase, out of 8 phases,
+ of the Slice DLL).
+ 0x2 = dllout[1] (second output clock phase, out of 8 phases,
+ of the Slice DLL).
+ 0x4 = piclk (output clock of the PI).
+ 0x8 = vdda_int.
+
+ All other values in this field are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_rx_sdll_swsel : 4; /**< [ 3: 0](R/W) DMON control; selects which signal is passed to the output
+ of DMON.
+ 0x1 = dllout[0] (first output clock phase, out of 8 phases,
+ of the Slice DLL).
+ 0x2 = dllout[1] (second output clock phase, out of 8 phases,
+ of the Slice DLL).
+ 0x4 = piclk (output clock of the PI).
+ 0x8 = vdda_int.
+
+ All other values in this field are reserved. */
+ uint64_t pcs_sds_rx_sdll_tune : 3; /**< [ 6: 4](R/W) Tuning bits for the regulator and the loop filter. */
+ uint64_t reserved_7_13 : 7;
+ uint64_t pcs_sds_oob_clk_ctrl : 2; /**< [ 15: 14](R/W/H) OOB clock oscillator output frequency selection:
+ 0x0 = 506 MHz (min) 682 MHz (typ) 782 MHz (max).
+ 0x1 = 439 MHz (min) 554 MHz (typ) 595 MHz (max).
+ 0x2 = 379 MHz (min) 453 MHz (typ) 482 MHz (max).
+ 0x3 = 303 MHz (min) 378 MHz (typ) 414 MHz (max).
+
+ This parameter is for debugging purposes and should not
+ be written in normal operation. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_rx_sdll_ctrl_s cn; */
+};
+typedef union bdk_gserx_slicex_rx_sdll_ctrl bdk_gserx_slicex_rx_sdll_ctrl_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_RX_SDLL_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_RX_SDLL_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460220ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460220ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460220ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_RX_SDLL_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_RX_SDLL_CTRL(a,b) bdk_gserx_slicex_rx_sdll_ctrl_t
+#define bustype_BDK_GSERX_SLICEX_RX_SDLL_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_RX_SDLL_CTRL(a,b) "GSERX_SLICEX_RX_SDLL_CTRL"
+#define device_bar_BDK_GSERX_SLICEX_RX_SDLL_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_RX_SDLL_CTRL(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_RX_SDLL_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice#_sgmii_mode
+ *
+ * GSER Slice SGMII MODE Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slicex_sgmii_mode
+{
+ uint64_t u;
+ struct bdk_gserx_slicex_sgmii_mode_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_sdll_bwsel : 3; /**< [ 2: 0](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_ldll_bwsel : 3; /**< [ 5: 3](R/W/H) Controls capacitors in delay line for different data rates; should be set
+ based on the PLL clock frequency as follows:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x3 = 4 GHz.
+ 0x5 = 5.15625 GHz.
+ 0x6 = 5.65 GHz.
+ 0x7 = 6.25 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_pi_bwsel : 3; /**< [ 8: 6](R/W/H) Controls PI different data rates:
+ 0x0 = 2.5 GHz.
+ 0x1 = 3.125 GHz.
+ 0x6 = 4 GHz.
+ 0x7 = 5.15625 GHz.
+
+ All other values in this field are reserved. */
+ uint64_t rx_sdll_isel : 2; /**< [ 10: 9](R/W/H) Controls charge pump current for slice DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t rx_ldll_isel : 2; /**< [ 12: 11](R/W/H) Controls charge pump current for lane DLL:
+ 0x0 = 500 uA.
+ 0x1 = 1000 uA.
+ 0x2 = 250 uA.
+ 0x3 = 330 uA. */
+ uint64_t slice_spare_1_0 : 2; /**< [ 14: 13](R/W/H) Controls enable of pcs_sds_rx_div33 for lane 0 and 1 in the slice:
+ Bit 13 controls enable for lane 0.
+ Bit 14 controls enable for lane 1. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slicex_sgmii_mode_s cn; */
+};
+typedef union bdk_gserx_slicex_sgmii_mode bdk_gserx_slicex_sgmii_mode_t;
+
+static inline uint64_t BDK_GSERX_SLICEX_SGMII_MODE(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICEX_SGMII_MODE(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=3) && (b<=1)))
+ return 0x87e090460258ll + 0x1000000ll * ((a) & 0x3) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=6) && (b<=1)))
+ return 0x87e090460258ll + 0x1000000ll * ((a) & 0x7) + 0x200000ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=13) && (b<=1)))
+ return 0x87e090460258ll + 0x1000000ll * ((a) & 0xf) + 0x200000ll * ((b) & 0x1);
+ __bdk_csr_fatal("GSERX_SLICEX_SGMII_MODE", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICEX_SGMII_MODE(a,b) bdk_gserx_slicex_sgmii_mode_t
+#define bustype_BDK_GSERX_SLICEX_SGMII_MODE(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICEX_SGMII_MODE(a,b) "GSERX_SLICEX_SGMII_MODE"
+#define device_bar_BDK_GSERX_SLICEX_SGMII_MODE(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICEX_SGMII_MODE(a,b) (a)
+#define arguments_BDK_GSERX_SLICEX_SGMII_MODE(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gser#_slice_cfg
+ *
+ * GSER Slice Configuration Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_slice_cfg
+{
+ uint64_t u;
+ struct bdk_gserx_slice_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t tx_rx_detect_lvl_enc : 4; /**< [ 11: 8](R/W) Determines the RX detect level, pcs_sds_tx_rx_detect_lvl[9:0],
+ (which is a 1-hot signal), where the level is equal to
+ 2^TX_RX_DETECT_LVL_ENC. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t pcs_sds_rx_pcie_pterm : 2; /**< [ 5: 4](R/W) Reserved. */
+ uint64_t pcs_sds_rx_pcie_nterm : 2; /**< [ 3: 2](R/W) Reserved. */
+ uint64_t pcs_sds_tx_stress_eye : 2; /**< [ 1: 0](R/W) Controls TX stress eye. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcs_sds_tx_stress_eye : 2; /**< [ 1: 0](R/W) Controls TX stress eye. */
+ uint64_t pcs_sds_rx_pcie_nterm : 2; /**< [ 3: 2](R/W) Reserved. */
+ uint64_t pcs_sds_rx_pcie_pterm : 2; /**< [ 5: 4](R/W) Reserved. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t tx_rx_detect_lvl_enc : 4; /**< [ 11: 8](R/W) Determines the RX detect level, pcs_sds_tx_rx_detect_lvl[9:0],
+ (which is a 1-hot signal), where the level is equal to
+ 2^TX_RX_DETECT_LVL_ENC. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_slice_cfg_s cn; */
+};
+typedef union bdk_gserx_slice_cfg bdk_gserx_slice_cfg_t;
+
+static inline uint64_t BDK_GSERX_SLICE_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SLICE_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460060ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460060ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460060ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_SLICE_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SLICE_CFG(a) bdk_gserx_slice_cfg_t
+#define bustype_BDK_GSERX_SLICE_CFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SLICE_CFG(a) "GSERX_SLICE_CFG"
+#define device_bar_BDK_GSERX_SLICE_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SLICE_CFG(a) (a)
+#define arguments_BDK_GSERX_SLICE_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_spd
+ *
+ * GSER Speed Bits Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_spd
+{
+ uint64_t u;
+ struct bdk_gserx_spd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t spd : 4; /**< [ 3: 0](R/W/H) For CCPI links (i.e. GSER8..13), the hardware loads this CSR field from the OCI_SPD\<3:0\>
+ pins during chip cold reset. For non-CCPI links, this field is not used.
+ For SPD settings that configure a non-default reference clock, hardware updates the PLL
+ settings of the specific lane mode (LMODE) table entry to derive the correct link rate.
+
+ \<pre\>
+ REFCLK Link Rate
+ SPD (MHz) (Gb) LMODE
+ ---- ------ ------ -----------------------
+ 0x0: 100 1.25 R_125G_REFCLK15625_KX
+ 0x1: 100 2.5 R_25G_REFCLK100
+ 0x2: 100 5 R_5G_REFCLK100
+ 0x3: 100 8 R_8G_REFCLK100
+ 0x4: 125 1.25 R_125G_REFCLK15625_KX
+ 0x5: 125 2.5 R_25G_REFCLK125
+ 0x6: 125 3.125 R_3125G_REFCLK15625_XAUI
+ 0x7: 125 5 R_5G_REFCLK125
+ 0x8: 125 6.25 R_625G_REFCLK15625_RXAUI
+ 0x9: 125 8 R_8G_REFCLK125
+ 0xA: 156.25 2.5 R_25G_REFCLK100
+ 0xB: 156.25 3.125 R_3125G_REFCLK15625_XAUI
+ 0xC: 156.25 5 R_5G_REFCLK125
+ 0xD: 156.25 6.25 R_625G_REFCLK15625_RXAUI
+ 0xE: 156.25 10.3125 R_103125G_REFCLK15625_KR
+ 0xF: SW_MODE
+ \</pre\>
+
+ Note that a value of 0xF is called SW_MODE. The CCPI link does not come up configured in
+ SW_MODE.
+ (Software must do all the CCPI GSER configuration to use CCPI in the case of SW_MODE.)
+ When SPD!=SW_MODE after a chip cold reset, the hardware has initialized the following
+ registers (based on the OCI_SPD selection):
+
+ * GSER()_LANE_MODE[LMODE]=Z.
+ * GSER()_PLL_P()_MODE_0.
+ * GSER()_PLL_P()_MODE_1.
+ * GSER()_LANE_P()_MODE_0.
+ * GSER()_LANE_P()_MODE_1.
+ * GSER()_LANE()_RX_VALBBD_CTRL_0.
+ * GSER()_LANE()_RX_VALBBD_CTRL_1.
+ * GSER()_LANE()_RX_VALBBD_CTRL_2.
+
+ where in "GSER(x)", x is 8..13, and in "P(z)", z equals LMODE. */
+#else /* Word 0 - Little Endian */
+ uint64_t spd : 4; /**< [ 3: 0](R/W/H) For CCPI links (i.e. GSER8..13), the hardware loads this CSR field from the OCI_SPD\<3:0\>
+ pins during chip cold reset. For non-CCPI links, this field is not used.
+ For SPD settings that configure a non-default reference clock, hardware updates the PLL
+ settings of the specific lane mode (LMODE) table entry to derive the correct link rate.
+
+ \<pre\>
+ REFCLK Link Rate
+ SPD (MHz) (Gb) LMODE
+ ---- ------ ------ -----------------------
+ 0x0: 100 1.25 R_125G_REFCLK15625_KX
+ 0x1: 100 2.5 R_25G_REFCLK100
+ 0x2: 100 5 R_5G_REFCLK100
+ 0x3: 100 8 R_8G_REFCLK100
+ 0x4: 125 1.25 R_125G_REFCLK15625_KX
+ 0x5: 125 2.5 R_25G_REFCLK125
+ 0x6: 125 3.125 R_3125G_REFCLK15625_XAUI
+ 0x7: 125 5 R_5G_REFCLK125
+ 0x8: 125 6.25 R_625G_REFCLK15625_RXAUI
+ 0x9: 125 8 R_8G_REFCLK125
+ 0xA: 156.25 2.5 R_25G_REFCLK100
+ 0xB: 156.25 3.125 R_3125G_REFCLK15625_XAUI
+ 0xC: 156.25 5 R_5G_REFCLK125
+ 0xD: 156.25 6.25 R_625G_REFCLK15625_RXAUI
+ 0xE: 156.25 10.3125 R_103125G_REFCLK15625_KR
+ 0xF: SW_MODE
+ \</pre\>
+
+ Note that a value of 0xF is called SW_MODE. The CCPI link does not come up configured in
+ SW_MODE.
+ (Software must do all the CCPI GSER configuration to use CCPI in the case of SW_MODE.)
+ When SPD!=SW_MODE after a chip cold reset, the hardware has initialized the following
+ registers (based on the OCI_SPD selection):
+
+ * GSER()_LANE_MODE[LMODE]=Z.
+ * GSER()_PLL_P()_MODE_0.
+ * GSER()_PLL_P()_MODE_1.
+ * GSER()_LANE_P()_MODE_0.
+ * GSER()_LANE_P()_MODE_1.
+ * GSER()_LANE()_RX_VALBBD_CTRL_0.
+ * GSER()_LANE()_RX_VALBBD_CTRL_1.
+ * GSER()_LANE()_RX_VALBBD_CTRL_2.
+
+ where in "GSER(x)", x is 8..13, and in "P(z)", z equals LMODE. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_spd_s cn88xxp1; */
+ struct bdk_gserx_spd_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t spd : 4; /**< [ 3: 0](R/W/H) Not used. */
+#else /* Word 0 - Little Endian */
+ uint64_t spd : 4; /**< [ 3: 0](R/W/H) Not used. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_gserx_spd_cn81xx cn83xx; */
+ struct bdk_gserx_spd_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t spd : 4; /**< [ 3: 0](R/W/H) For CCPI links (i.e. GSER8..13), the hardware loads this CSR field from the OCI_SPD\<3:0\>
+ pins during chip cold reset. For non-CCPI links, this field is not used.
+ For SPD settings that configure a non-default reference clock, hardware updates the PLL
+ settings of the specific lane mode (LMODE) table entry to derive the correct link rate.
+
+ \<pre\>
+ REFCLK Link Rate
+ SPD (MHz) (Gb) Train LMODE
+ ---- ------ ------ ----- -----------------------
+ 0x0: 100 5 TS R_5G_REFCLK100
+ 0x1: 100 2.5 -- R_25G_REFCLK100
+ 0x2: 100 5 -- R_5G_REFCLK100
+ 0x3: 100 8 -- R_8G_REFCLK100
+ 0x4: 100 8 TS R_8G_REFCLK100
+ 0x5: 100 8 KR R_8G_REFCLK100
+ 0x6: 125 3.125 -- R_3125G_REFCLK15625_XAUI
+ 0x7: 125 5 -- R_5G_REFCLK125
+ 0x8: 125 6.25 -- R_625G_REFCLK15625_RXAUI
+ 0x9: 125 8 -- R_8G_REFCLK125
+ 0xA: 156.25 10.3125 TS R_103125G_REFCLK15625_KR
+ 0xB: 156.25 3.125 -- R_3125G_REFCLK15625_XAUI
+ 0xC: 156.25 5 TS R_5G_REFCLK125
+ 0xD: 156.25 6.25 TS R_625G_REFCLK15625_RXAUI
+ 0xE: 156.25 10.3125 KR R_103125G_REFCLK15625_KR
+ 0xF: SW_MODE
+ \</pre\>
+
+ Train column indicates training method. TS indicates short training, i.e., local RX
+ equalization only. KR indicates KR training, i.e., local RX equalization and link
+ partner TX equalizer adaptation. -- indicates not applicable.
+ Note that a value of 0xF is called SW_MODE. The CCPI link does not come up configured in
+ SW_MODE.
+ (Software must do all the CCPI GSER configuration to use CCPI in the case of SW_MODE.)
+ When SPD!=SW_MODE after a chip cold reset, the hardware has initialized the following
+ registers (based on the OCI_SPD selection):
+
+ * GSER()_LANE_MODE[LMODE]=Z.
+ * GSER()_PLL_P()_MODE_0.
+ * GSER()_PLL_P()_MODE_1.
+ * GSER()_LANE_P()_MODE_0.
+ * GSER()_LANE_P()_MODE_1.
+ * GSER()_LANE()_RX_VALBBD_CTRL_0.
+ * GSER()_LANE()_RX_VALBBD_CTRL_1.
+ * GSER()_LANE()_RX_VALBBD_CTRL_2.
+
+ where in "GSER(x)", x is 8..13, and in "P(z)", z equals LMODE. */
+#else /* Word 0 - Little Endian */
+ uint64_t spd : 4; /**< [ 3: 0](R/W/H) For CCPI links (i.e. GSER8..13), the hardware loads this CSR field from the OCI_SPD\<3:0\>
+ pins during chip cold reset. For non-CCPI links, this field is not used.
+ For SPD settings that configure a non-default reference clock, hardware updates the PLL
+ settings of the specific lane mode (LMODE) table entry to derive the correct link rate.
+
+ \<pre\>
+ REFCLK Link Rate
+ SPD (MHz) (Gb) Train LMODE
+ ---- ------ ------ ----- -----------------------
+ 0x0: 100 5 TS R_5G_REFCLK100
+ 0x1: 100 2.5 -- R_25G_REFCLK100
+ 0x2: 100 5 -- R_5G_REFCLK100
+ 0x3: 100 8 -- R_8G_REFCLK100
+ 0x4: 100 8 TS R_8G_REFCLK100
+ 0x5: 100 8 KR R_8G_REFCLK100
+ 0x6: 125 3.125 -- R_3125G_REFCLK15625_XAUI
+ 0x7: 125 5 -- R_5G_REFCLK125
+ 0x8: 125 6.25 -- R_625G_REFCLK15625_RXAUI
+ 0x9: 125 8 -- R_8G_REFCLK125
+ 0xA: 156.25 10.3125 TS R_103125G_REFCLK15625_KR
+ 0xB: 156.25 3.125 -- R_3125G_REFCLK15625_XAUI
+ 0xC: 156.25 5 TS R_5G_REFCLK125
+ 0xD: 156.25 6.25 TS R_625G_REFCLK15625_RXAUI
+ 0xE: 156.25 10.3125 KR R_103125G_REFCLK15625_KR
+ 0xF: SW_MODE
+ \</pre\>
+
+ Train column indicates training method. TS indicates short training, i.e., local RX
+ equalization only. KR indicates KR training, i.e., local RX equalization and link
+ partner TX equalizer adaptation. -- indicates not applicable.
+ Note that a value of 0xF is called SW_MODE. The CCPI link does not come up configured in
+ SW_MODE.
+ (Software must do all the CCPI GSER configuration to use CCPI in the case of SW_MODE.)
+ When SPD!=SW_MODE after a chip cold reset, the hardware has initialized the following
+ registers (based on the OCI_SPD selection):
+
+ * GSER()_LANE_MODE[LMODE]=Z.
+ * GSER()_PLL_P()_MODE_0.
+ * GSER()_PLL_P()_MODE_1.
+ * GSER()_LANE_P()_MODE_0.
+ * GSER()_LANE_P()_MODE_1.
+ * GSER()_LANE()_RX_VALBBD_CTRL_0.
+ * GSER()_LANE()_RX_VALBBD_CTRL_1.
+ * GSER()_LANE()_RX_VALBBD_CTRL_2.
+
+ where in "GSER(x)", x is 8..13, and in "P(z)", z equals LMODE. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_gserx_spd bdk_gserx_spd_t;
+
+static inline uint64_t BDK_GSERX_SPD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SPD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000088ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000088ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000088ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_SPD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SPD(a) bdk_gserx_spd_t
+#define bustype_BDK_GSERX_SPD(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SPD(a) "GSERX_SPD"
+#define device_bar_BDK_GSERX_SPD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SPD(a) (a)
+#define arguments_BDK_GSERX_SPD(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_srst
+ *
+ * GSER Soft Reset Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_srst
+{
+ uint64_t u;
+ struct bdk_gserx_srst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t srst : 1; /**< [ 0: 0](R/W) When asserted, resets all per-lane state in the GSER with the exception of the PHY and the
+ GSER()_CFG. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t srst : 1; /**< [ 0: 0](R/W) When asserted, resets all per-lane state in the GSER with the exception of the PHY and the
+ GSER()_CFG. For diagnostic use only. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_srst_s cn; */
+};
+typedef union bdk_gserx_srst bdk_gserx_srst_t;
+
+static inline uint64_t BDK_GSERX_SRST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_SRST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000090ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000090ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000090ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_SRST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_SRST(a) bdk_gserx_srst_t
+#define bustype_BDK_GSERX_SRST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_SRST(a) "GSERX_SRST"
+#define device_bar_BDK_GSERX_SRST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_SRST(a) (a)
+#define arguments_BDK_GSERX_SRST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_term_cfg
+ *
+ * GSER Termination Calibration Configuration Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_term_cfg
+{
+ uint64_t u;
+ struct bdk_gserx_term_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t fast_term_cal : 1; /**< [ 8: 8](R/W/H) Set to enable fast termination calibration.
+ For simulation use only. */
+ uint64_t reserved_7 : 1;
+ uint64_t cal_start_ovrd_en : 1; /**< [ 6: 6](R/W/H) When set, calibration start is defined by
+ GSER()_TERM_CFG[CAL_START_OVRD_VAL]. */
+ uint64_t cal_start_ovrd_val : 1; /**< [ 5: 5](R/W/H) Override calibration start value. */
+ uint64_t cal_code_ovrd_en : 1; /**< [ 4: 4](R/W/H) When set, calibration code is defined by
+ GSER()_TERM_CFG[CAL_CODE_OVRD]. */
+ uint64_t cal_code_ovrd : 4; /**< [ 3: 0](R/W/H) Override calibration code value. */
+#else /* Word 0 - Little Endian */
+ uint64_t cal_code_ovrd : 4; /**< [ 3: 0](R/W/H) Override calibration code value. */
+ uint64_t cal_code_ovrd_en : 1; /**< [ 4: 4](R/W/H) When set, calibration code is defined by
+ GSER()_TERM_CFG[CAL_CODE_OVRD]. */
+ uint64_t cal_start_ovrd_val : 1; /**< [ 5: 5](R/W/H) Override calibration start value. */
+ uint64_t cal_start_ovrd_en : 1; /**< [ 6: 6](R/W/H) When set, calibration start is defined by
+ GSER()_TERM_CFG[CAL_START_OVRD_VAL]. */
+ uint64_t reserved_7 : 1;
+ uint64_t fast_term_cal : 1; /**< [ 8: 8](R/W/H) Set to enable fast termination calibration.
+ For simulation use only. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_term_cfg_s cn; */
+};
+typedef union bdk_gserx_term_cfg bdk_gserx_term_cfg_t;
+
+static inline uint64_t BDK_GSERX_TERM_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_TERM_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460070ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460070ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460070ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_TERM_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_TERM_CFG(a) bdk_gserx_term_cfg_t
+#define bustype_BDK_GSERX_TERM_CFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_TERM_CFG(a) "GSERX_TERM_CFG"
+#define device_bar_BDK_GSERX_TERM_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_TERM_CFG(a) (a)
+#define arguments_BDK_GSERX_TERM_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_term_mon_1
+ *
+ * GSER Termination Cal Code Monitor Register
+ * These registers are for diagnostic use only.
+ * These registers are reset by hardware only during chip cold reset.
+ * The values of the CSR fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_term_mon_1
+{
+ uint64_t u;
+ struct bdk_gserx_term_mon_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t cal_code_mgmt : 4; /**< [ 3: 0](RO/H) RX and TX termination resistance calibration code. */
+#else /* Word 0 - Little Endian */
+ uint64_t cal_code_mgmt : 4; /**< [ 3: 0](RO/H) RX and TX termination resistance calibration code. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_term_mon_1_s cn; */
+};
+typedef union bdk_gserx_term_mon_1 bdk_gserx_term_mon_1_t;
+
+static inline uint64_t BDK_GSERX_TERM_MON_1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_TERM_MON_1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090460110ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090460110ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090460110ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_TERM_MON_1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_TERM_MON_1(a) bdk_gserx_term_mon_1_t
+#define bustype_BDK_GSERX_TERM_MON_1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_TERM_MON_1(a) "GSERX_TERM_MON_1"
+#define device_bar_BDK_GSERX_TERM_MON_1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_TERM_MON_1(a) (a)
+#define arguments_BDK_GSERX_TERM_MON_1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_tx_vboost
+ *
+ * GSER TX Voltage Boost Enable Register
+ * These registers are reset by hardware only during chip cold reset. The values of the CSR
+ * fields in these registers do not change during chip warm or soft resets.
+ */
+union bdk_gserx_tx_vboost
+{
+ uint64_t u;
+ struct bdk_gserx_tx_vboost_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t vboost : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, boosts the TX Vswing from
+ VDD to 1.0 VPPD.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t vboost : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, boosts the TX Vswing from
+ VDD to 1.0 VPPD.
+ \<3\>: Lane 3. Reserved.
+ \<2\>: Lane 2. Reserved.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_tx_vboost_s cn81xx; */
+ struct bdk_gserx_tx_vboost_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t vboost : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode (including all CCPI links), boosts the TX Vswing from
+ VDD to 1.0 VPPD.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t vboost : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode (including all CCPI links), boosts the TX Vswing from
+ VDD to 1.0 VPPD.
+ \<3\>: Lane 3.
+ \<2\>: Lane 2.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_gserx_tx_vboost_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t vboost : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, boosts the TX Vswing from
+ VDD to 1.0 VPPD.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t vboost : 4; /**< [ 3: 0](R/W) For links that are not in PCIE mode, boosts the TX Vswing from
+ VDD to 1.0 VPPD.
+ \<3\>: Lane 3. Not supported in GSER4, GSER5, or GSER6.
+ \<2\>: Lane 2. Not supported in GSER4, GSER5, or GSER6.
+ \<1\>: Lane 1.
+ \<0\>: Lane 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_gserx_tx_vboost bdk_gserx_tx_vboost_t;
+
+static inline uint64_t BDK_GSERX_TX_VBOOST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_TX_VBOOST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000130ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000130ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000130ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_TX_VBOOST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_TX_VBOOST(a) bdk_gserx_tx_vboost_t
+#define bustype_BDK_GSERX_TX_VBOOST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_TX_VBOOST(a) "GSERX_TX_VBOOST"
+#define device_bar_BDK_GSERX_TX_VBOOST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_TX_VBOOST(a) (a)
+#define arguments_BDK_GSERX_TX_VBOOST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_txclk_evt_cntr
+ *
+ * GSER QLM Transmit Clock Event Counter Register
+ */
+union bdk_gserx_txclk_evt_cntr
+{
+ uint64_t u;
+ struct bdk_gserx_txclk_evt_cntr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t count : 32; /**< [ 31: 0](RO) This register can only be reliably read when GSER()_TXCLK_EVT_CTRL[ENB]
+ is clear.
+
+ When GSER()_TXCLK_EVT_CTRL[CLR] is set, [COUNT] goes to zero.
+
+ When GSER()_TXCLK_EVT_CTRL[ENB] is set, [COUNT] is incremented
+ in positive edges of the QLM reference clock.
+
+ When GSER()_TXCLK_EVT_CTRL[ENB] is not set, [COUNT] value is held;
+ this must be used when [COUNT] is being read for reliable results. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 32; /**< [ 31: 0](RO) This register can only be reliably read when GSER()_TXCLK_EVT_CTRL[ENB]
+ is clear.
+
+ When GSER()_TXCLK_EVT_CTRL[CLR] is set, [COUNT] goes to zero.
+
+ When GSER()_TXCLK_EVT_CTRL[ENB] is set, [COUNT] is incremented
+ in positive edges of the QLM reference clock.
+
+ When GSER()_TXCLK_EVT_CTRL[ENB] is not set, [COUNT] value is held;
+ this must be used when [COUNT] is being read for reliable results. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_txclk_evt_cntr_s cn; */
+};
+typedef union bdk_gserx_txclk_evt_cntr bdk_gserx_txclk_evt_cntr_t;
+
+static inline uint64_t BDK_GSERX_TXCLK_EVT_CNTR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_TXCLK_EVT_CNTR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000188ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000188ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000188ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_TXCLK_EVT_CNTR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_TXCLK_EVT_CNTR(a) bdk_gserx_txclk_evt_cntr_t
+#define bustype_BDK_GSERX_TXCLK_EVT_CNTR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_TXCLK_EVT_CNTR(a) "GSERX_TXCLK_EVT_CNTR"
+#define device_bar_BDK_GSERX_TXCLK_EVT_CNTR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_TXCLK_EVT_CNTR(a) (a)
+#define arguments_BDK_GSERX_TXCLK_EVT_CNTR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gser#_txclk_evt_ctrl
+ *
+ * GSER QLM Transmit Clock Event Counter Control Register
+ */
+union bdk_gserx_txclk_evt_ctrl
+{
+ uint64_t u;
+ struct bdk_gserx_txclk_evt_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t clr : 1; /**< [ 1: 1](R/W) When set, clears GSER()_TXCLK_EVT_CNTR[COUNT]. */
+ uint64_t enb : 1; /**< [ 0: 0](R/W) When set, enables the GSER()_TXCLK_EVT_CNTR[COUNT] to increment
+ on positive edges of the QLM reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t enb : 1; /**< [ 0: 0](R/W) When set, enables the GSER()_TXCLK_EVT_CNTR[COUNT] to increment
+ on positive edges of the QLM reference clock. */
+ uint64_t clr : 1; /**< [ 1: 1](R/W) When set, clears GSER()_TXCLK_EVT_CNTR[COUNT]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gserx_txclk_evt_ctrl_s cn; */
+};
+typedef union bdk_gserx_txclk_evt_ctrl bdk_gserx_txclk_evt_ctrl_t;
+
+static inline uint64_t BDK_GSERX_TXCLK_EVT_CTRL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERX_TXCLK_EVT_CTRL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=3))
+ return 0x87e090000180ll + 0x1000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=6))
+ return 0x87e090000180ll + 0x1000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=13))
+ return 0x87e090000180ll + 0x1000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("GSERX_TXCLK_EVT_CTRL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERX_TXCLK_EVT_CTRL(a) bdk_gserx_txclk_evt_ctrl_t
+#define bustype_BDK_GSERX_TXCLK_EVT_CTRL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERX_TXCLK_EVT_CTRL(a) "GSERX_TXCLK_EVT_CTRL"
+#define device_bar_BDK_GSERX_TXCLK_EVT_CTRL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERX_TXCLK_EVT_CTRL(a) (a)
+#define arguments_BDK_GSERX_TXCLK_EVT_CTRL(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_GSER_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gsern.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gsern.h
new file mode 100644
index 0000000000..d1b23fccff
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-gsern.h
@@ -0,0 +1,20807 @@
+#ifndef __BDK_CSRS_GSERN_H__
+#define __BDK_CSRS_GSERN_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium GSERN.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration gsern_bar_e
+ *
+ * GSER Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_GSERN_BAR_E_GSERNX_PF_BAR0(a) (0x87e090000000ll + 0x1000000ll * (a))
+#define BDK_GSERN_BAR_E_GSERNX_PF_BAR0_SIZE 0x100000ull
+
+/**
+ * Enumeration gsern_psb_acc_e
+ *
+ * GSERN Power Serial Bus Accumulator Enumeration
+ * Enumerates the GSERN accumulators for LMC slaves, which correspond to index {b} of
+ * PSBS_SYS()_ACCUM().
+ */
+#define BDK_GSERN_PSB_ACC_E_TBD0 (0)
+#define BDK_GSERN_PSB_ACC_E_TBD1 (1)
+#define BDK_GSERN_PSB_ACC_E_TBD2 (2)
+#define BDK_GSERN_PSB_ACC_E_TBD3 (3)
+
+/**
+ * Enumeration gsern_psb_event_e
+ *
+ * GSERN Power Serial Bus Event Enumeration
+ * Enumerates the event numbers for GSERN slaves, which correspond to index {b} of
+ * PSBS_SYS()_EVENT()_CFG.
+ */
+#define BDK_GSERN_PSB_EVENT_E_TBD0 (0)
+
+/**
+ * Register (RSL) gsern#_common_bias_bcfg
+ *
+ * GSER Common Bias Base Configuration Register
+ */
+union bdk_gsernx_common_bias_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_bias_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t dac1 : 4; /**< [ 35: 32](R/W) Ir25 reference current trim. Default setting (0x8) selects 0% trim. Minimum and
+ Maximum settings allow for up to + or - 12.5% trim. For debug use only. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t dac0 : 4; /**< [ 27: 24](R/W) Ic25 reference current trim. Default setting (0x8) selects 0% trim. Minimum and
+ Maximum settings allow for up to + or - 12.5% trim. For debug use only. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t bias : 2; /**< [ 17: 16](R/W) Opamp bias current setting. For debug use only.
+ 0x0 = 33 uA.
+ 0x1 = 25 uA.
+ 0x2 = 20 uA.
+ 0x3 = 17 uA. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t bypass : 1; /**< [ 8: 8](R/W) Assert to bypass the bandgap reference and use a resistive divider from VDDA
+ instead. For diagnostic use only. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t bias_pwdn : 1; /**< [ 0: 0](R/W) Bias current power down control. */
+#else /* Word 0 - Little Endian */
+ uint64_t bias_pwdn : 1; /**< [ 0: 0](R/W) Bias current power down control. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t bypass : 1; /**< [ 8: 8](R/W) Assert to bypass the bandgap reference and use a resistive divider from VDDA
+ instead. For diagnostic use only. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t bias : 2; /**< [ 17: 16](R/W) Opamp bias current setting. For debug use only.
+ 0x0 = 33 uA.
+ 0x1 = 25 uA.
+ 0x2 = 20 uA.
+ 0x3 = 17 uA. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t dac0 : 4; /**< [ 27: 24](R/W) Ic25 reference current trim. Default setting (0x8) selects 0% trim. Minimum and
+ Maximum settings allow for up to + or - 12.5% trim. For debug use only. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t dac1 : 4; /**< [ 35: 32](R/W) Ir25 reference current trim. Default setting (0x8) selects 0% trim. Minimum and
+ Maximum settings allow for up to + or - 12.5% trim. For debug use only. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_bias_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_bias_bcfg bdk_gsernx_common_bias_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_BIAS_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_BIAS_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0330ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_BIAS_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_BIAS_BCFG(a) bdk_gsernx_common_bias_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_BIAS_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_BIAS_BCFG(a) "GSERNX_COMMON_BIAS_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_BIAS_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_BIAS_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_BIAS_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_const
+ *
+ * GSER Common Constants Register
+ */
+union bdk_gsernx_common_const
+{
+ uint64_t u;
+ struct bdk_gsernx_common_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_const_s cn; */
+};
+typedef union bdk_gsernx_common_const bdk_gsernx_common_const_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0088ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_CONST(a) bdk_gsernx_common_const_t
+#define bustype_BDK_GSERNX_COMMON_CONST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_CONST(a) "GSERNX_COMMON_CONST"
+#define device_bar_BDK_GSERNX_COMMON_CONST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_CONST(a) (a)
+#define arguments_BDK_GSERNX_COMMON_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_const1
+ *
+ * GSER Common Constants Register 1
+ */
+union bdk_gsernx_common_const1
+{
+ uint64_t u;
+ struct bdk_gsernx_common_const1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t number_lanes : 4; /**< [ 3: 0](RO/H) The number of lanes in this module, e.g., 4 for a QLM or 2 for a DLM.
+ Internal:
+ FIXME reset value 4 (done). Add reset_matches_size (not done). Note: for dlm
+ tieoffs will set reset value to 2. */
+#else /* Word 0 - Little Endian */
+ uint64_t number_lanes : 4; /**< [ 3: 0](RO/H) The number of lanes in this module, e.g., 4 for a QLM or 2 for a DLM.
+ Internal:
+ FIXME reset value 4 (done). Add reset_matches_size (not done). Note: for dlm
+ tieoffs will set reset value to 2. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_const1_s cn; */
+};
+typedef union bdk_gsernx_common_const1 bdk_gsernx_common_const1_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_CONST1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_CONST1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0110ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_CONST1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_CONST1(a) bdk_gsernx_common_const1_t
+#define bustype_BDK_GSERNX_COMMON_CONST1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_CONST1(a) "GSERNX_COMMON_CONST1"
+#define device_bar_BDK_GSERNX_COMMON_CONST1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_CONST1(a) (a)
+#define arguments_BDK_GSERNX_COMMON_CONST1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_eco
+ *
+ * INTERNAL: GSER Common ECO Register
+ */
+union bdk_gsernx_common_eco
+{
+ uint64_t u;
+ struct bdk_gsernx_common_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t eco_rw : 62; /**< [ 63: 2](R/W) Internal:
+ Reserved for ECO use. */
+ uint64_t eco_rw_pll : 2; /**< [ 1: 0](R/W) Internal:
+ Pre-connected to the PLL. Reserved for ECO use. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw_pll : 2; /**< [ 1: 0](R/W) Internal:
+ Pre-connected to the PLL. Reserved for ECO use. */
+ uint64_t eco_rw : 62; /**< [ 63: 2](R/W) Internal:
+ Reserved for ECO use. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_eco_s cn; */
+};
+typedef union bdk_gsernx_common_eco bdk_gsernx_common_eco_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_ECO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_ECO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0770ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_ECO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_ECO(a) bdk_gsernx_common_eco_t
+#define bustype_BDK_GSERNX_COMMON_ECO(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_ECO(a) "GSERNX_COMMON_ECO"
+#define device_bar_BDK_GSERNX_COMMON_ECO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_ECO(a) (a)
+#define arguments_BDK_GSERNX_COMMON_ECO(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_init_bsts
+ *
+ * GSER Common Initialization Base-level Status Register
+ */
+union bdk_gsernx_common_init_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_common_init_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t pll_cp_cal : 4; /**< [ 19: 16](RO/H) PLL calibration state machine's resulting charge pump setting. Only
+ valid if [CAL_READY] is set. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pll_band_cal : 5; /**< [ 12: 8](RO/H) PLL calibration state machine's resulting VCO band setting. Only valid
+ if [CAL_READY] is set. */
+ uint64_t reserved_7 : 1;
+ uint64_t deep_idle : 1; /**< [ 6: 6](RO/H) PLL reset state machine state is deep idle. */
+ uint64_t rst_sm_complete : 1; /**< [ 5: 5](RO/H) PLL reset state machine has completed. If
+ [RST_SM_COMPLETE] is set and [RST_SM_READY] is not, there may still
+ be CSR register settings preventing the PLL from being ready
+ for use, e.g., power-down or reset overrides. */
+ uint64_t rst_sm_ready : 1; /**< [ 4: 4](RO/H) PLL reset state machine status indicating that the reset
+ sequence has completed and this PLL is ready for use. */
+ uint64_t lock : 1; /**< [ 3: 3](RO/H) PLL lock status; only valid if [LOCK_READY] is set. */
+ uint64_t lock_ready : 1; /**< [ 2: 2](RO/H) PLL lock status check is complete following most recent PLL
+ reset or assertion of GSERN()_COMMON_RST_BCFG[LOCK_CHECK]. */
+ uint64_t cal_fail : 1; /**< [ 1: 1](RO/H) PLL calibration failed; valid only if [CAL_READY] is set. */
+ uint64_t cal_ready : 1; /**< [ 0: 0](RO/H) PLL calibration completed. */
+#else /* Word 0 - Little Endian */
+ uint64_t cal_ready : 1; /**< [ 0: 0](RO/H) PLL calibration completed. */
+ uint64_t cal_fail : 1; /**< [ 1: 1](RO/H) PLL calibration failed; valid only if [CAL_READY] is set. */
+ uint64_t lock_ready : 1; /**< [ 2: 2](RO/H) PLL lock status check is complete following most recent PLL
+ reset or assertion of GSERN()_COMMON_RST_BCFG[LOCK_CHECK]. */
+ uint64_t lock : 1; /**< [ 3: 3](RO/H) PLL lock status; only valid if [LOCK_READY] is set. */
+ uint64_t rst_sm_ready : 1; /**< [ 4: 4](RO/H) PLL reset state machine status indicating that the reset
+ sequence has completed and this PLL is ready for use. */
+ uint64_t rst_sm_complete : 1; /**< [ 5: 5](RO/H) PLL reset state machine has completed. If
+ [RST_SM_COMPLETE] is set and [RST_SM_READY] is not, there may still
+ be CSR register settings preventing the PLL from being ready
+ for use, e.g., power-down or reset overrides. */
+ uint64_t deep_idle : 1; /**< [ 6: 6](RO/H) PLL reset state machine state is deep idle. */
+ uint64_t reserved_7 : 1;
+ uint64_t pll_band_cal : 5; /**< [ 12: 8](RO/H) PLL calibration state machine's resulting VCO band setting. Only valid
+ if [CAL_READY] is set. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pll_cp_cal : 4; /**< [ 19: 16](RO/H) PLL calibration state machine's resulting charge pump setting. Only
+ valid if [CAL_READY] is set. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_init_bsts_s cn; */
+};
+typedef union bdk_gsernx_common_init_bsts bdk_gsernx_common_init_bsts_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_INIT_BSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_INIT_BSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f05d8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_INIT_BSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_INIT_BSTS(a) bdk_gsernx_common_init_bsts_t
+#define bustype_BDK_GSERNX_COMMON_INIT_BSTS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_INIT_BSTS(a) "GSERNX_COMMON_INIT_BSTS"
+#define device_bar_BDK_GSERNX_COMMON_INIT_BSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_INIT_BSTS(a) (a)
+#define arguments_BDK_GSERNX_COMMON_INIT_BSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_pll_1_bcfg
+ *
+ * GSER Common PLL Base Configuration Register 1
+ */
+union bdk_gsernx_common_pll_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_pll_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t reserved_36_37 : 2;
+ uint64_t post_div : 9; /**< [ 35: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV]. Divider range is
+ between 8 - 511. If a number less than 8 is selected it will be added to
+ the minimum value of 8. For example, if 2 is specified the value will be
+ interpreted to be 10. */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion (divide by 2^18 to find fraction, e.g., 2621 is
+ ~10,000 ppm). */
+#else /* Word 0 - Little Endian */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion (divide by 2^18 to find fraction, e.g., 2621 is
+ ~10,000 ppm). */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t post_div : 9; /**< [ 35: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV]. Divider range is
+ between 8 - 511. If a number less than 8 is selected it will be added to
+ the minimum value of 8. For example, if 2 is specified the value will be
+ interpreted to be 10. */
+ uint64_t reserved_36_37 : 2;
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_pll_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_pll_1_bcfg bdk_gsernx_common_pll_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_PLL_1_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_PLL_1_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0220ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_PLL_1_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_PLL_1_BCFG(a) bdk_gsernx_common_pll_1_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_PLL_1_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_PLL_1_BCFG(a) "GSERNX_COMMON_PLL_1_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_PLL_1_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_PLL_1_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_PLL_1_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_pll_2_bcfg
+ *
+ * GSER Common PLL Base Configuration Register 2
+ */
+union bdk_gsernx_common_pll_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_pll_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t mio_refclk_en : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Enable sending the common PLL reference clock to the counter in MIO. */
+ uint64_t lock_check_cnt_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [LOCK_CHECK_CNT_OVRD]. */
+ uint64_t lock_check_cnt_ovrd : 15; /**< [ 54: 40](R/W) Lock check counter override value. This counter is used to wait for PLL lock to
+ be valid. It counts every reference clock cycle and once its done asserts
+ GSERN()_COMMON_INIT_BSTS[LOCK_READY]. For common PLL, the reference clock is the
+ input from the pad. For lane PLLs, the reference clock is the output of the
+ common PLL. To use value assert GSERN()_LANE()_RST1_BCFG[LOCK_CHECK] or trigger
+ a PLL reset sequence. */
+ uint64_t reserved_34_39 : 6;
+ uint64_t vcm_sel : 1; /**< [ 33: 33](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t cp_boost : 1; /**< [ 32: 32](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t ssc_sata_mode : 2; /**< [ 31: 30](R/W) PLL SATA spread spectrum control.
+ 0x0 = Down spreading. PPM triangle wave total peak-to-peak spread subtracted from
+ nominal frequency.
+ 0x1 = Up spreading. PPM triangle wave total peak-to-peak spread added to nominal
+ frequency.
+ 0x2 = Center spreading. PPM triangle wave total peak-to-peak spread centered at nominal
+ frequency.
+ 0x3 = Square wave subtracted from nominal frequency. */
+ uint64_t ssc_ppm : 2; /**< [ 29: 28](R/W) Spread-spectrum clocking total peak-to-peak spread.
+ 0x0 = 5000 PPM.
+ 0x1 = 3000 PPM.
+ 0x2 = 2500 PPM.
+ 0x3 = 1000 PPM. */
+ uint64_t pnr_refclk_en : 1; /**< [ 27: 27](R/W) Enable PLL reference clock to internal logic. */
+ uint64_t ssc_en : 1; /**< [ 26: 26](R/W) Spread-spectrum clocking enable. */
+ uint64_t ref_clk_bypass : 1; /**< [ 25: 25](R/W) Bypass reference clock to the PLL output. */
+ uint64_t pfd_offset : 1; /**< [ 24: 24](R/W) PLL PFD offset enable. */
+ uint64_t opamp : 4; /**< [ 23: 20](R/W) PLL loop filter op-amp configuration. */
+ uint64_t res : 4; /**< [ 19: 16](R/W) PLL loop filter configuration. */
+ uint64_t reserved_15 : 1;
+ uint64_t vco_bias : 3; /**< [ 14: 12](R/W) VCO bias control. */
+ uint64_t cal_dac_low : 4; /**< [ 11: 8](R/W) PLL calibration DAC low control. */
+ uint64_t cal_dac_mid : 4; /**< [ 7: 4](R/W) PLL calibration DAC middle control. */
+ uint64_t cal_dac_high : 4; /**< [ 3: 0](R/W) PLL calibration DAC high control. */
+#else /* Word 0 - Little Endian */
+ uint64_t cal_dac_high : 4; /**< [ 3: 0](R/W) PLL calibration DAC high control. */
+ uint64_t cal_dac_mid : 4; /**< [ 7: 4](R/W) PLL calibration DAC middle control. */
+ uint64_t cal_dac_low : 4; /**< [ 11: 8](R/W) PLL calibration DAC low control. */
+ uint64_t vco_bias : 3; /**< [ 14: 12](R/W) VCO bias control. */
+ uint64_t reserved_15 : 1;
+ uint64_t res : 4; /**< [ 19: 16](R/W) PLL loop filter configuration. */
+ uint64_t opamp : 4; /**< [ 23: 20](R/W) PLL loop filter op-amp configuration. */
+ uint64_t pfd_offset : 1; /**< [ 24: 24](R/W) PLL PFD offset enable. */
+ uint64_t ref_clk_bypass : 1; /**< [ 25: 25](R/W) Bypass reference clock to the PLL output. */
+ uint64_t ssc_en : 1; /**< [ 26: 26](R/W) Spread-spectrum clocking enable. */
+ uint64_t pnr_refclk_en : 1; /**< [ 27: 27](R/W) Enable PLL reference clock to internal logic. */
+ uint64_t ssc_ppm : 2; /**< [ 29: 28](R/W) Spread-spectrum clocking total peak-to-peak spread.
+ 0x0 = 5000 PPM.
+ 0x1 = 3000 PPM.
+ 0x2 = 2500 PPM.
+ 0x3 = 1000 PPM. */
+ uint64_t ssc_sata_mode : 2; /**< [ 31: 30](R/W) PLL SATA spread spectrum control.
+ 0x0 = Down spreading. PPM triangle wave total peak-to-peak spread subtracted from
+ nominal frequency.
+ 0x1 = Up spreading. PPM triangle wave total peak-to-peak spread added to nominal
+ frequency.
+ 0x2 = Center spreading. PPM triangle wave total peak-to-peak spread centered at nominal
+ frequency.
+ 0x3 = Square wave subtracted from nominal frequency. */
+ uint64_t cp_boost : 1; /**< [ 32: 32](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t vcm_sel : 1; /**< [ 33: 33](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t reserved_34_39 : 6;
+ uint64_t lock_check_cnt_ovrd : 15; /**< [ 54: 40](R/W) Lock check counter override value. This counter is used to wait for PLL lock to
+ be valid. It counts every reference clock cycle and once its done asserts
+ GSERN()_COMMON_INIT_BSTS[LOCK_READY]. For common PLL, the reference clock is the
+ input from the pad. For lane PLLs, the reference clock is the output of the
+ common PLL. To use value assert GSERN()_LANE()_RST1_BCFG[LOCK_CHECK] or trigger
+ a PLL reset sequence. */
+ uint64_t lock_check_cnt_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [LOCK_CHECK_CNT_OVRD]. */
+ uint64_t mio_refclk_en : 1; /**< [ 56: 56](R/W) Reserved.
+ Internal:
+ Enable sending the common PLL reference clock to the counter in MIO. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_pll_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_pll_2_bcfg bdk_gsernx_common_pll_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_PLL_2_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_PLL_2_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f02a8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_PLL_2_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_PLL_2_BCFG(a) bdk_gsernx_common_pll_2_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_PLL_2_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_PLL_2_BCFG(a) "GSERNX_COMMON_PLL_2_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_PLL_2_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_PLL_2_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_PLL_2_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_refclk_bcfg
+ *
+ * GSER Common PLL Base Configuration Register 1
+ */
+union bdk_gsernx_common_refclk_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_refclk_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t hcsl : 1; /**< [ 4: 4](R/W) Enable [HCSL] and [OCT] to set HCSL on chip termination in the receiver of the
+ off-chip reference clock, e.g., for a PCIe reference clock. Leave [HCSL] low for
+ LVPECL on-chip termination. */
+ uint64_t oct : 1; /**< [ 3: 3](R/W) Enable on chip termination (OCT) in the receiver of the off-chip reference
+ clock. */
+ uint64_t pwdn : 1; /**< [ 2: 2](R/W) Power down.
+ 0 = Power on. Set to 0 if any lanes in this module will be used.
+ 1 = All paths through the common block reference clock receiver will be powered
+ off and no reference clock will reach the common PLL (or its bypass path). */
+ uint64_t cclksel : 2; /**< [ 1: 0](R/W) Selection controls for the reference clock
+ 0x0 = Choose on-chip common clock zero.
+ 0x1 = Choose on-chip common clock one.
+ 0x2 = Choose on-chip common clock two.
+ 0x3 = Choose the off-chip reference clock (requires that [PWDN] be low). */
+#else /* Word 0 - Little Endian */
+ uint64_t cclksel : 2; /**< [ 1: 0](R/W) Selection controls for the reference clock
+ 0x0 = Choose on-chip common clock zero.
+ 0x1 = Choose on-chip common clock one.
+ 0x2 = Choose on-chip common clock two.
+ 0x3 = Choose the off-chip reference clock (requires that [PWDN] be low). */
+ uint64_t pwdn : 1; /**< [ 2: 2](R/W) Power down.
+ 0 = Power on. Set to 0 if any lanes in this module will be used.
+ 1 = All paths through the common block reference clock receiver will be powered
+ off and no reference clock will reach the common PLL (or its bypass path). */
+ uint64_t oct : 1; /**< [ 3: 3](R/W) Enable on chip termination (OCT) in the receiver of the off-chip reference
+ clock. */
+ uint64_t hcsl : 1; /**< [ 4: 4](R/W) Enable [HCSL] and [OCT] to set HCSL on chip termination in the receiver of the
+ off-chip reference clock, e.g., for a PCIe reference clock. Leave [HCSL] low for
+ LVPECL on-chip termination. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_refclk_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_refclk_bcfg bdk_gsernx_common_refclk_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_REFCLK_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_REFCLK_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0198ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_REFCLK_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_REFCLK_BCFG(a) bdk_gsernx_common_refclk_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_REFCLK_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_REFCLK_BCFG(a) "GSERNX_COMMON_REFCLK_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_REFCLK_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_REFCLK_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_REFCLK_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_refclk_ctr
+ *
+ * GSER Common Reference Clock Cycle Counter Register
+ * A free-running counter of common PLL reference clock cycles to enable rough
+ * confirmation of reference clock frequency via software. Read the counter; wait some
+ * time, e.g., 100ms; read the counter; calculate frequency based on the difference in
+ * values during the known wait time.
+ */
+union bdk_gsernx_common_refclk_ctr
+{
+ uint64_t u;
+ struct bdk_gsernx_common_refclk_ctr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Running count of common PLL reference clock cycles. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Running count of common PLL reference clock cycles. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_refclk_ctr_s cn; */
+};
+typedef union bdk_gsernx_common_refclk_ctr bdk_gsernx_common_refclk_ctr_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_REFCLK_CTR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_REFCLK_CTR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f06e8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_REFCLK_CTR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_REFCLK_CTR(a) bdk_gsernx_common_refclk_ctr_t
+#define bustype_BDK_GSERNX_COMMON_REFCLK_CTR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_REFCLK_CTR(a) "GSERNX_COMMON_REFCLK_CTR"
+#define device_bar_BDK_GSERNX_COMMON_REFCLK_CTR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_REFCLK_CTR(a) (a)
+#define arguments_BDK_GSERNX_COMMON_REFCLK_CTR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rev
+ *
+ * GSER Common Revision Register
+ * Revision number
+ */
+union bdk_gsernx_common_rev
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rev : 8; /**< [ 7: 0](RO/H) Revision number for GSERN common subblock.
+ Internal:
+ Used primarily for E5. */
+#else /* Word 0 - Little Endian */
+ uint64_t rev : 8; /**< [ 7: 0](RO/H) Revision number for GSERN common subblock.
+ Internal:
+ Used primarily for E5. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rev_s cn; */
+};
+typedef union bdk_gsernx_common_rev bdk_gsernx_common_rev_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_REV(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_REV(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0000ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_REV", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_REV(a) bdk_gsernx_common_rev_t
+#define bustype_BDK_GSERNX_COMMON_REV(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_REV(a) "GSERNX_COMMON_REV"
+#define device_bar_BDK_GSERNX_COMMON_REV(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_REV(a) (a)
+#define arguments_BDK_GSERNX_COMMON_REV(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rst_bcfg
+ *
+ * GSER Common Reset State Machine Controls and Overrides Register
+ */
+union bdk_gsernx_common_rst_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rst_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t domain_rst_en : 1; /**< [ 55: 55](R/W) Domain reset enable.
+ 0 = Prevent reseting lane logic with domain reset.
+ 1 = Enable reseting all lane logic with domain reset.
+
+ For PCIe configurations, typically 1 for a root complex and 0 for an endpoint. */
+ uint64_t reserved_49_54 : 6;
+ uint64_t rst_pll_rst_sm : 1; /**< [ 48: 48](R/W) Set to reset the full PLL reset state machine;
+ deassert to run the complete reset initialization sequence
+ starting with common PLL initialization. */
+ uint64_t reserved_13_47 : 35;
+ uint64_t pll_go2deep_idle : 1; /**< [ 12: 12](R/W) Set to cycle the common PLL into deep idle. */
+ uint64_t lock_ppm : 2; /**< [ 11: 10](R/W) PLL lock PPM setting; after GSERN()_COMMON_RST_BCFG[LOCK_WAIT], compare
+ reference clock and divided VCO clock for this many cycles:
+ 0x0 = Compare after 5000 reference clock cycles.
+ 0x1 = Compare after 10000 reference clock cycles.
+ 0x2 = Compare after 20000 reference clock cycles.
+ 0x3 = Compare after 2500 reference clock cycles. */
+ uint64_t lock_wait : 2; /**< [ 9: 8](R/W) Wait time for PLL lock check function to start:
+ 0x0 = Wait 2500 reference clock cycles.
+ 0x1 = Wait 5000 reference clock cycles.
+ 0x2 = Wait 10000 reference clock cycles.
+ 0x3 = Wait 1250 reference clock cycles. */
+ uint64_t lock_check : 1; /**< [ 7: 7](R/W) Trigger a PLL lock status check; result returned in
+ GSERN()_COMMON_INIT_BSTS[LOCK] when GSERN()_COMMON_INIT_BSTS[LOCK_READY]
+ asserts. deassert and re-assert to repeat checking. */
+ uint64_t vco_cal_reset : 1; /**< [ 6: 6](R/W) PLL VCO calibration state machine reset. */
+ uint64_t fracn_reset : 1; /**< [ 5: 5](R/W) PLL fractional-N state machine reset. */
+ uint64_t ssc_reset : 1; /**< [ 4: 4](R/W) PLL SSC state machine reset. */
+ uint64_t post_div_reset : 1; /**< [ 3: 3](RO) Reserved.
+ Internal:
+ Was common PLL post divider reset. No longer used. */
+ uint64_t reset : 1; /**< [ 2: 2](R/W) PLL primary reset; must assert [POST_DIV_RESET] if [RESET] is asserted. */
+ uint64_t cal_en : 1; /**< [ 1: 1](R/W) Enable PLL calibration procedure. */
+ uint64_t pwdn : 1; /**< [ 0: 0](R/W) PLL power down control. */
+#else /* Word 0 - Little Endian */
+ uint64_t pwdn : 1; /**< [ 0: 0](R/W) PLL power down control. */
+ uint64_t cal_en : 1; /**< [ 1: 1](R/W) Enable PLL calibration procedure. */
+ uint64_t reset : 1; /**< [ 2: 2](R/W) PLL primary reset; must assert [POST_DIV_RESET] if [RESET] is asserted. */
+ uint64_t post_div_reset : 1; /**< [ 3: 3](RO) Reserved.
+ Internal:
+ Was common PLL post divider reset. No longer used. */
+ uint64_t ssc_reset : 1; /**< [ 4: 4](R/W) PLL SSC state machine reset. */
+ uint64_t fracn_reset : 1; /**< [ 5: 5](R/W) PLL fractional-N state machine reset. */
+ uint64_t vco_cal_reset : 1; /**< [ 6: 6](R/W) PLL VCO calibration state machine reset. */
+ uint64_t lock_check : 1; /**< [ 7: 7](R/W) Trigger a PLL lock status check; result returned in
+ GSERN()_COMMON_INIT_BSTS[LOCK] when GSERN()_COMMON_INIT_BSTS[LOCK_READY]
+ asserts. deassert and re-assert to repeat checking. */
+ uint64_t lock_wait : 2; /**< [ 9: 8](R/W) Wait time for PLL lock check function to start:
+ 0x0 = Wait 2500 reference clock cycles.
+ 0x1 = Wait 5000 reference clock cycles.
+ 0x2 = Wait 10000 reference clock cycles.
+ 0x3 = Wait 1250 reference clock cycles. */
+ uint64_t lock_ppm : 2; /**< [ 11: 10](R/W) PLL lock PPM setting; after GSERN()_COMMON_RST_BCFG[LOCK_WAIT], compare
+ reference clock and divided VCO clock for this many cycles:
+ 0x0 = Compare after 5000 reference clock cycles.
+ 0x1 = Compare after 10000 reference clock cycles.
+ 0x2 = Compare after 20000 reference clock cycles.
+ 0x3 = Compare after 2500 reference clock cycles. */
+ uint64_t pll_go2deep_idle : 1; /**< [ 12: 12](R/W) Set to cycle the common PLL into deep idle. */
+ uint64_t reserved_13_47 : 35;
+ uint64_t rst_pll_rst_sm : 1; /**< [ 48: 48](R/W) Set to reset the full PLL reset state machine;
+ deassert to run the complete reset initialization sequence
+ starting with common PLL initialization. */
+ uint64_t reserved_49_54 : 6;
+ uint64_t domain_rst_en : 1; /**< [ 55: 55](R/W) Domain reset enable.
+ 0 = Prevent reseting lane logic with domain reset.
+ 1 = Enable reseting all lane logic with domain reset.
+
+ For PCIe configurations, typically 1 for a root complex and 0 for an endpoint. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rst_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_rst_bcfg bdk_gsernx_common_rst_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_RST_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_RST_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f03b8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_RST_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_RST_BCFG(a) bdk_gsernx_common_rst_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_RST_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_RST_BCFG(a) "GSERNX_COMMON_RST_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_RST_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_RST_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_RST_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rst_cnt0_bcfg
+ *
+ * GSER Common Reset State Machine Delay Count Register 0
+ * Wait counts for the common block reset state machines. All fields must be set
+ * before bringing the common block out of reset.
+ */
+union bdk_gsernx_common_rst_cnt0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rst_cnt0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t pre_bias_pwup_wait : 7; /**< [ 6: 0](R/W) Wait count in service clock cycles after initial trigger before
+ deasserting powerdown to the bias generator. The actual delay will be
+ three cycles more than set here, so set this field to the minimum
+ specified delay, 0x40, minus three, or greater. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_bias_pwup_wait : 7; /**< [ 6: 0](R/W) Wait count in service clock cycles after initial trigger before
+ deasserting powerdown to the bias generator. The actual delay will be
+ three cycles more than set here, so set this field to the minimum
+ specified delay, 0x40, minus three, or greater. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rst_cnt0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_rst_cnt0_bcfg bdk_gsernx_common_rst_cnt0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT0_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT0_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0440ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_RST_CNT0_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) bdk_gsernx_common_rst_cnt0_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) "GSERNX_COMMON_RST_CNT0_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_RST_CNT0_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rst_cnt1_bcfg
+ *
+ * GSER Common Reset State Machine Delay Count Register 1
+ * Wait counts for the common block reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_common_rst_cnt1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rst_cnt1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t cal_en_wait : 18; /**< [ 49: 32](R/W) Wait count in service clock cycles after calibration enable before deasserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t pre_cal_en_wait : 12; /**< [ 27: 16](R/W) Wait count in service clock cycles after deasserting resets to the PLL fracn,
+ ssc, and cal_en state machines before asserting calibration enable to the
+ PLL. Set this to one less than the desired number of cycles of delay. */
+ uint64_t reserved_11_15 : 5;
+ uint64_t pre_pwup_wait : 11; /**< [ 10: 0](R/W) Wait count in service clock cycles after powering up the bias
+ generator before deasserting pwdn to the PLL. The actual delay will
+ be one cycle more than set here, so set this field to the minimum
+ specified delay, 0x400, minus one, or greater. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_pwup_wait : 11; /**< [ 10: 0](R/W) Wait count in service clock cycles after powering up the bias
+ generator before deasserting pwdn to the PLL. The actual delay will
+ be one cycle more than set here, so set this field to the minimum
+ specified delay, 0x400, minus one, or greater. */
+ uint64_t reserved_11_15 : 5;
+ uint64_t pre_cal_en_wait : 12; /**< [ 27: 16](R/W) Wait count in service clock cycles after deasserting resets to the PLL fracn,
+ ssc, and cal_en state machines before asserting calibration enable to the
+ PLL. Set this to one less than the desired number of cycles of delay. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t cal_en_wait : 18; /**< [ 49: 32](R/W) Wait count in service clock cycles after calibration enable before deasserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rst_cnt1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_rst_cnt1_bcfg bdk_gsernx_common_rst_cnt1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT1_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT1_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f04c8ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_RST_CNT1_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) bdk_gsernx_common_rst_cnt1_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) "GSERNX_COMMON_RST_CNT1_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_RST_CNT1_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rst_cnt2_bcfg
+ *
+ * GSER Common Reset State Machine Delay Count Register 2
+ * Wait counts for the common block reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_common_rst_cnt2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rst_cnt2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t pre_run_wait : 14; /**< [ 61: 48](R/W) Wait count in service clock cycles after the PLL is running before deasserting
+ common lane reset to bring the lanes out of reset. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t pre_pll_sm_reset_wait : 9; /**< [ 40: 32](R/W) Wait count in service clock cycles after deasserting pwdn before
+ deasserting resets to the PLL fracn, ssc, and cal_en state
+ machines. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t pre_pdiv_reset_wait : 13; /**< [ 28: 16](R/W) Reserved.
+ Internal:
+ The PLL no longer has a postdivider reset. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pre_pll_reset_wait : 12; /**< [ 11: 0](R/W) Wait count in service clock cycles after calibration enable deasserts
+ before deasserting reset to the PLL. Set this field to one less
+ than the desired number of cycles of delay. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_pll_reset_wait : 12; /**< [ 11: 0](R/W) Wait count in service clock cycles after calibration enable deasserts
+ before deasserting reset to the PLL. Set this field to one less
+ than the desired number of cycles of delay. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pre_pdiv_reset_wait : 13; /**< [ 28: 16](R/W) Reserved.
+ Internal:
+ The PLL no longer has a postdivider reset. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t pre_pll_sm_reset_wait : 9; /**< [ 40: 32](R/W) Wait count in service clock cycles after deasserting pwdn before
+ deasserting resets to the PLL fracn, ssc, and cal_en state
+ machines. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t pre_run_wait : 14; /**< [ 61: 48](R/W) Wait count in service clock cycles after the PLL is running before deasserting
+ common lane reset to bring the lanes out of reset. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rst_cnt2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_rst_cnt2_bcfg bdk_gsernx_common_rst_cnt2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT2_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_RST_CNT2_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0550ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_RST_CNT2_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) bdk_gsernx_common_rst_cnt2_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) "GSERNX_COMMON_RST_CNT2_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_RST_CNT2_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_common_rst_rdy_bcfg
+ *
+ * GSER Common Reset Ready Control Register
+ */
+union bdk_gsernx_common_rst_rdy_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_common_rst_rdy_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ln_en : 4; /**< [ 3: 0](R/W) Enables for lane reset ready inclusion in aggregated QLM reset ready output to
+ the reset controller. Each bit enables contribution from the corresponding lane.
+ \<0\> = Include lane 0.
+ \<1\> = Include lane 1.
+ \<2\> = Include lane 2.
+ \<3\> = Include lane 3. */
+#else /* Word 0 - Little Endian */
+ uint64_t ln_en : 4; /**< [ 3: 0](R/W) Enables for lane reset ready inclusion in aggregated QLM reset ready output to
+ the reset controller. Each bit enables contribution from the corresponding lane.
+ \<0\> = Include lane 0.
+ \<1\> = Include lane 1.
+ \<2\> = Include lane 2.
+ \<3\> = Include lane 3. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_common_rst_rdy_bcfg_s cn; */
+};
+typedef union bdk_gsernx_common_rst_rdy_bcfg bdk_gsernx_common_rst_rdy_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_COMMON_RST_RDY_BCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_COMMON_RST_RDY_BCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=7))
+ return 0x87e0900f0660ll + 0x1000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("GSERNX_COMMON_RST_RDY_BCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) bdk_gsernx_common_rst_rdy_bcfg_t
+#define bustype_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) "GSERNX_COMMON_RST_RDY_BCFG"
+#define device_bar_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) (a)
+#define arguments_BDK_GSERNX_COMMON_RST_RDY_BCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_btsclk_cfg
+ *
+ * GSER Lane BTS Synchronous Ethernet Clock Control Register
+ * Register controls settings for providing a clock output from the lane which is
+ * synchronous to the clock recovered from the received data stream.
+ */
+union bdk_gsernx_lanex_btsclk_cfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_btsclk_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t en : 1; /**< [ 8: 8](R/W) Enable driving the clock output from the lane. This bit should be set low before
+ changing [DRATIO]; it may be written to 1 in the same cycle that [DRATIO] is
+ written. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t dratio : 2; /**< [ 1: 0](R/W) Divider ratio for the clock output from the lane relative to the clock for the
+ parallel receive data.
+ 0x0 = Divide by 1, i.e., no division.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8. */
+#else /* Word 0 - Little Endian */
+ uint64_t dratio : 2; /**< [ 1: 0](R/W) Divider ratio for the clock output from the lane relative to the clock for the
+ parallel receive data.
+ 0x0 = Divide by 1, i.e., no division.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t en : 1; /**< [ 8: 8](R/W) Enable driving the clock output from the lane. This bit should be set low before
+ changing [DRATIO]; it may be written to 1 in the same cycle that [DRATIO] is
+ written. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_btsclk_cfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_btsclk_cfg bdk_gsernx_lanex_btsclk_cfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_BTSCLK_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_BTSCLK_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003870ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_BTSCLK_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) bdk_gsernx_lanex_btsclk_cfg_t
+#define bustype_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) "GSERNX_LANEX_BTSCLK_CFG"
+#define device_bar_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_BTSCLK_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_cdrfsm_bcfg
+ *
+ * GSER Lane Receiver CDR FSM Base Configuration Register
+ * Controls for the clock data recover PLL control finite state
+ * machine. Set these controls prior to bringing the analog receiver out of
+ * reset.
+ */
+union bdk_gsernx_lanex_cdrfsm_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_cdrfsm_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t voter_sp_mask : 1; /**< [ 33: 33](R/W/H) Set to mask out "010" and "101" patterns in RX cdr voter. */
+ uint64_t rst_n : 1; /**< [ 32: 32](R/W/H) Clear to hold the receive CDR FSM in reset. */
+ uint64_t clk_sel : 2; /**< [ 31: 30](R/W/H) 0x0 = Run off div5clk from TX.
+ 0x1 = Run off div3clk from TX.
+ 0x2 = Run off div5clk from RX.
+ 0x3 = Run off div3clk from RX.
+
+ [CLK_SEL]\<0\> is also used in GSER TX to allow clocking the CDR FSM
+ with a divided copy of the transmit clock. This field should be set
+ as desired before sequencing the transmitter and/or receiver reset
+ state machine(s). */
+ uint64_t trunc : 2; /**< [ 29: 28](R/W/H) Reserved.
+ Internal:
+ state2[16:0] is CDR state machine 2nd order loop state variable.
+
+ 0x0 = state2[16:0] is truncated to 13 bits (plus sign bit).
+ 0x1 = state2[16:0] is truncated to 14 bits (plus sign bit).
+ 0x2 = state2[16:0] is truncated to 15 bits (plus sign bit).
+ 0x3 = state2[16:0] is truncated to 16 bits (plus sign bit, no truncation). */
+ uint64_t limit : 2; /**< [ 27: 26](R/W/H) 0x0 = Pass-through next state at boundaries.
+ 0x1 = Limit next state at boundaries.
+ 0x2-3 = Limit & pause next state at boundaries. */
+ uint64_t eoffs : 7; /**< [ 25: 19](R/W/H) E interp state offset. */
+ uint64_t qoffs : 7; /**< [ 18: 12](R/W/H) Q interp state offset. */
+ uint64_t inc2 : 6; /**< [ 11: 6](R/W/H) 2nd order loop inc. */
+ uint64_t inc1 : 6; /**< [ 5: 0](R/W/H) 1st order loop inc. */
+#else /* Word 0 - Little Endian */
+ uint64_t inc1 : 6; /**< [ 5: 0](R/W/H) 1st order loop inc. */
+ uint64_t inc2 : 6; /**< [ 11: 6](R/W/H) 2nd order loop inc. */
+ uint64_t qoffs : 7; /**< [ 18: 12](R/W/H) Q interp state offset. */
+ uint64_t eoffs : 7; /**< [ 25: 19](R/W/H) E interp state offset. */
+ uint64_t limit : 2; /**< [ 27: 26](R/W/H) 0x0 = Pass-through next state at boundaries.
+ 0x1 = Limit next state at boundaries.
+ 0x2-3 = Limit & pause next state at boundaries. */
+ uint64_t trunc : 2; /**< [ 29: 28](R/W/H) Reserved.
+ Internal:
+ state2[16:0] is CDR state machine 2nd order loop state variable.
+
+ 0x0 = state2[16:0] is truncated to 13 bits (plus sign bit).
+ 0x1 = state2[16:0] is truncated to 14 bits (plus sign bit).
+ 0x2 = state2[16:0] is truncated to 15 bits (plus sign bit).
+ 0x3 = state2[16:0] is truncated to 16 bits (plus sign bit, no truncation). */
+ uint64_t clk_sel : 2; /**< [ 31: 30](R/W/H) 0x0 = Run off div5clk from TX.
+ 0x1 = Run off div3clk from TX.
+ 0x2 = Run off div5clk from RX.
+ 0x3 = Run off div3clk from RX.
+
+ [CLK_SEL]\<0\> is also used in GSER TX to allow clocking the CDR FSM
+ with a divided copy of the transmit clock. This field should be set
+ as desired before sequencing the transmitter and/or receiver reset
+ state machine(s). */
+ uint64_t rst_n : 1; /**< [ 32: 32](R/W/H) Clear to hold the receive CDR FSM in reset. */
+ uint64_t voter_sp_mask : 1; /**< [ 33: 33](R/W/H) Set to mask out "010" and "101" patterns in RX cdr voter. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_cdrfsm_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_cdrfsm_bcfg bdk_gsernx_lanex_cdrfsm_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_CDRFSM_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_CDRFSM_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001cf0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_CDRFSM_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) bdk_gsernx_lanex_cdrfsm_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) "GSERNX_LANEX_CDRFSM_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_CDRFSM_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_cgx_txeq_bcfg
+ *
+ * GSER Lane CGX Tx Equalizer Base Configuration Register
+ * Register controls settings for the transmitter equalizer taps
+ * when the GSER is configured for CGX mode and KR training is not enabled.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL] is set to 'CGX'.
+ */
+union bdk_gsernx_lanex_cgx_txeq_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_cgx_txeq_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t tx_coeff_update : 1; /**< [ 27: 27](R/W/H) Transmitter coefficient update.
+ An asserting edge will start the transmitter coefficient update
+ sequencer. This field self-clears when the sequence has completed.
+ To update the GSER transmitter euqalizer coefficients program
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CSPD].
+
+ then write [TX_COEFF_UPDATE] to 1. */
+ uint64_t tx_enable : 1; /**< [ 26: 26](R/W) Transmitter enable.
+ 0 = Disable the serdes transmitter.
+ 1 = Enable the serdes transmitter.
+
+ Internal:
+ Drives the cgx_tx_enable input to the GSERN src_mux. */
+ uint64_t tx_stuff : 1; /**< [ 25: 25](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter bit stuffing.
+ Programs the transmitter PCS lite layer for bit stuffing.
+ Not used for Ethernet connections.
+ Leave programmed to 0x0.
+ Drives the cgx_tx_stuff input to the GSERN src_mux. */
+ uint64_t tx_oob : 1; /**< [ 24: 24](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter OOB signaling.
+ Not typically used for Ethernet connnections.
+ Leave programmed to 0x0.
+ Drives the cgx_tx_oob input to the GSERN src_mux. */
+ uint64_t tx_idle : 1; /**< [ 23: 23](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter electrical idle.
+ Used to force the transmitter to electrical idle.
+ Not typically used for Ethernet connections.
+ Leave progreammed to 0x0.
+ Drives the cgx_tx_idle input to the GSERN src_mux. */
+ uint64_t tx_cspd : 1; /**< [ 22: 22](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+ uint64_t tx_bs : 6; /**< [ 21: 16](R/W) TX bias/swing selection. This setting only takes effect if [TX_CSPD]
+ is deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t tx_cpost : 5; /**< [ 15: 11](R/W) Transmitter Post (C+1) equalizer tap coefficient value.
+ Programs the transmitter Post tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_cmain : 6; /**< [ 10: 5](R/W) Transmitter Main (C0) equalizer tap coefficient value.
+ Programs the serdes transmitter Main tap.
+ Valid range is 0x30 to 0x18.
+ When programing the transmitter Pre, Main, and Post
+ taps the following rules must be adhered to:
+ _ ([TX_CMAIN] + [TX_CPRE] + [TX_CPOST]) \<= 0x30.
+ _ ([TX_CMAIN] - [TX_CPRE] - [TX_CPOST]) \>= 0x6.
+ _ 0x30 \<= [TX_CMAIN] \<= 0x18.
+ _ 0x16 \>= [TX_CPRE] \>= 0x0.
+ _ 0x16 \>= [TX_CPOST] \>= 0x0.
+
+ [TX_CMAIN] should be adjusted when either [TX_CPRE] or [TX_CPOST] is adjusted to
+ provide constant power transmitter amplitude adjustments.
+
+ To update the GSER serdes transmitter Pre, Main, and Post
+ equalizer taps from the [TX_CPOST], [TX_CMAIN], and [TX_CPRE]
+ fields write GSERN()_LANE()_CGX_TXEQ_BCFG[TX_COEFF_UPDATE]
+ to 1 and subsequently clear [TX_COEFF_UPDATE] to 0. This step
+ transfers the [TX_CPOST], [TX_CMAIN], and [TX_CPRE] to the
+ serdes transmitter equalizer.
+
+ Related CSRs:
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_COEFF_UPDATE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CSPD]. */
+ uint64_t tx_cpre : 5; /**< [ 4: 0](R/W) Transmitter Pre (C-1) equalizer tap coefficient value.
+ Programs the transmitter Pre tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_cpre : 5; /**< [ 4: 0](R/W) Transmitter Pre (C-1) equalizer tap coefficient value.
+ Programs the transmitter Pre tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_cmain : 6; /**< [ 10: 5](R/W) Transmitter Main (C0) equalizer tap coefficient value.
+ Programs the serdes transmitter Main tap.
+ Valid range is 0x30 to 0x18.
+ When programing the transmitter Pre, Main, and Post
+ taps the following rules must be adhered to:
+ _ ([TX_CMAIN] + [TX_CPRE] + [TX_CPOST]) \<= 0x30.
+ _ ([TX_CMAIN] - [TX_CPRE] - [TX_CPOST]) \>= 0x6.
+ _ 0x30 \<= [TX_CMAIN] \<= 0x18.
+ _ 0x16 \>= [TX_CPRE] \>= 0x0.
+ _ 0x16 \>= [TX_CPOST] \>= 0x0.
+
+ [TX_CMAIN] should be adjusted when either [TX_CPRE] or [TX_CPOST] is adjusted to
+ provide constant power transmitter amplitude adjustments.
+
+ To update the GSER serdes transmitter Pre, Main, and Post
+ equalizer taps from the [TX_CPOST], [TX_CMAIN], and [TX_CPRE]
+ fields write GSERN()_LANE()_CGX_TXEQ_BCFG[TX_COEFF_UPDATE]
+ to 1 and subsequently clear [TX_COEFF_UPDATE] to 0. This step
+ transfers the [TX_CPOST], [TX_CMAIN], and [TX_CPRE] to the
+ serdes transmitter equalizer.
+
+ Related CSRs:
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_COEFF_UPDATE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CSPD]. */
+ uint64_t tx_cpost : 5; /**< [ 15: 11](R/W) Transmitter Post (C+1) equalizer tap coefficient value.
+ Programs the transmitter Post tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_bs : 6; /**< [ 21: 16](R/W) TX bias/swing selection. This setting only takes effect if [TX_CSPD]
+ is deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t tx_cspd : 1; /**< [ 22: 22](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+ uint64_t tx_idle : 1; /**< [ 23: 23](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter electrical idle.
+ Used to force the transmitter to electrical idle.
+ Not typically used for Ethernet connections.
+ Leave progreammed to 0x0.
+ Drives the cgx_tx_idle input to the GSERN src_mux. */
+ uint64_t tx_oob : 1; /**< [ 24: 24](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter OOB signaling.
+ Not typically used for Ethernet connnections.
+ Leave programmed to 0x0.
+ Drives the cgx_tx_oob input to the GSERN src_mux. */
+ uint64_t tx_stuff : 1; /**< [ 25: 25](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter bit stuffing.
+ Programs the transmitter PCS lite layer for bit stuffing.
+ Not used for Ethernet connections.
+ Leave programmed to 0x0.
+ Drives the cgx_tx_stuff input to the GSERN src_mux. */
+ uint64_t tx_enable : 1; /**< [ 26: 26](R/W) Transmitter enable.
+ 0 = Disable the serdes transmitter.
+ 1 = Enable the serdes transmitter.
+
+ Internal:
+ Drives the cgx_tx_enable input to the GSERN src_mux. */
+ uint64_t tx_coeff_update : 1; /**< [ 27: 27](R/W/H) Transmitter coefficient update.
+ An asserting edge will start the transmitter coefficient update
+ sequencer. This field self-clears when the sequence has completed.
+ To update the GSER transmitter euqalizer coefficients program
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CMAIN].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_CGX_TXEQ_BCFG[TX_CSPD].
+
+ then write [TX_COEFF_UPDATE] to 1. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_cgx_txeq_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_cgx_txeq_bcfg bdk_gsernx_lanex_cgx_txeq_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003450ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_CGX_TXEQ_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) bdk_gsernx_lanex_cgx_txeq_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) "GSERNX_LANEX_CGX_TXEQ_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_CGX_TXEQ_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_const
+ *
+ * GSER Lane CONST Register
+ * Lane number within the multilane macro.
+ */
+union bdk_gsernx_lanex_const
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t lane_num : 8; /**< [ 7: 0](RO/H) Lane number of this lane within the multilane module */
+#else /* Word 0 - Little Endian */
+ uint64_t lane_num : 8; /**< [ 7: 0](RO/H) Lane number of this lane within the multilane module */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_const_s cn; */
+};
+typedef union bdk_gsernx_lanex_const bdk_gsernx_lanex_const_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_CONST(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_CONST(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000100ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_CONST", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_CONST(a,b) bdk_gsernx_lanex_const_t
+#define bustype_BDK_GSERNX_LANEX_CONST(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_CONST(a,b) "GSERNX_LANEX_CONST"
+#define device_bar_BDK_GSERNX_LANEX_CONST(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_CONST(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_CONST(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eco
+ *
+ * INTERNAL: GSER Lane ECO Register
+ */
+union bdk_gsernx_lanex_eco
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t eco_rw : 50; /**< [ 63: 14](R/W) Internal:
+ Reserved for ECO use. */
+ uint64_t eco_rw_pll : 2; /**< [ 13: 12](R/W) Internal:
+ Pre-connected to the PLL. Reserved for ECO use. */
+ uint64_t eco_rw_tx : 4; /**< [ 11: 8](R/W) Internal:
+ Pre-connected to Tx custom. Reserved for ECO use. */
+ uint64_t eco_rw_rx_top : 4; /**< [ 7: 4](R/W) Internal:
+ Pre-connected to the north side of Rx custom. Reserved for ECO use. */
+ uint64_t eco_rw_rx_bot : 4; /**< [ 3: 0](R/W) Internal:
+ Pre-connected to the south side of Rx custom. Reserved for ECO use. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw_rx_bot : 4; /**< [ 3: 0](R/W) Internal:
+ Pre-connected to the south side of Rx custom. Reserved for ECO use. */
+ uint64_t eco_rw_rx_top : 4; /**< [ 7: 4](R/W) Internal:
+ Pre-connected to the north side of Rx custom. Reserved for ECO use. */
+ uint64_t eco_rw_tx : 4; /**< [ 11: 8](R/W) Internal:
+ Pre-connected to Tx custom. Reserved for ECO use. */
+ uint64_t eco_rw_pll : 2; /**< [ 13: 12](R/W) Internal:
+ Pre-connected to the PLL. Reserved for ECO use. */
+ uint64_t eco_rw : 50; /**< [ 63: 14](R/W) Internal:
+ Reserved for ECO use. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eco_s cn; */
+};
+typedef union bdk_gsernx_lanex_eco bdk_gsernx_lanex_eco_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_ECO(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_ECO(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003970ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_ECO", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_ECO(a,b) bdk_gsernx_lanex_eco_t
+#define bustype_BDK_GSERNX_LANEX_ECO(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_ECO(a,b) "GSERNX_LANEX_ECO"
+#define device_bar_BDK_GSERNX_LANEX_ECO(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_ECO(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_ECO(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eee_bcfg
+ *
+ * INTERNAL: GSER Lane EEE Base Configuration Register
+ *
+ * Reserved.
+ * Internal:
+ * Register controls settings for GSER behavior when Energy Efficient Ethernet (EEE) is
+ * in use on the link.
+ */
+union bdk_gsernx_lanex_eee_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eee_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t rx_qa_sqlch_cnt : 12; /**< [ 57: 46](R/W) Reserved.
+ Internal:
+ Receiever QUIET to DATA squelch count.
+ Used to implement a delay or filter function for the receive data to the
+ CGX MAC when the receiver transitions from the EEE QUIET state to the
+ EEE ACTIVE state. [RX_QA_SQLCH_CNT] counter is in units of 10ns.
+ Used in conjuncton with GSERN()_LANE()_EEE_BCFG[RX_QA_SQLCH_EN]. */
+ uint64_t rx_qa_sqlch_en : 1; /**< [ 45: 45](R/W) Reserved.
+ Internal:
+ Receiever QUIET to DATA squelch enable.
+ When [RX_QA_SQLCH_EN] is enabled the receive data to the CGX MAC will be
+ suppressed following the transition from receiver EEE QUIET state to
+ receiver EEE ACTIVE state for the time defined by the
+ GSERN()_LANE()_EEE_BCFG[RX_QA_SQLCH_CNT] squelch count in units of 10ns.
+ This is a optional filtering function to prevent garbage data to the CGX MAC
+ as the receiver is transitioning from the EEE QUIET to EEE ACTIVE states. */
+ uint64_t tx_quiet_drv_en : 1; /**< [ 44: 44](R/W) Reserved.
+ Internal:
+ Transmitter QUIET drive enable.
+ When [TX_QUIET_DRV_EN] is set to one the transmitter Tx+/Tx- driver outputs
+ will drive to electrical idle when either the CGX MAC moves the
+ SerDes transmitter block from the EEE ACTIVE state to the EEE QUIET state or
+ the GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD] is set to one. This ensures that
+ the link partner receiver energy detector sees the local device transmitter
+ transition from the EEE ACTIVE state to the EEE QUIET state.
+ When [TX_QUIET_DRV_EN] is set to one the transmitter Tx+/Tx- driver outputs
+ will drive to electrical idle even if the GSERN()_LANE()_EEE_BCFG[TX_PWRDN_EN]
+ is cleared to zero to inhibit the transmitter from powering down during EEE
+ deep sleep TX QUIET state. When [TX_QUIET_DRV_EN] is cleared to zero the
+ Transmitter Tx+/Tx- outputs will only drive to electrical idle when the
+ transmitter is powered down by CGX or GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD]
+ is set to one and GSERN()_LANE()_EEE_BCFG[TX_PWRDN_EN] is also
+ set to one to enable transmitter power down. */
+ uint64_t eee_edet : 1; /**< [ 43: 43](RO/H) Reserved.
+ Internal:
+ EEE energy detected.
+ For diagnostic use only. Reflects the state of
+ the EEE energy detector. Used to test signals for the wake from
+ EEE deep sleep power down modes of the SerDes. */
+ uint64_t eee_ovrrd : 1; /**< [ 42: 42](R/W) Reserved.
+ Internal:
+ EEE override.
+ For diagnostic use only. When [EEE_OVRRD] is set to one the SerDes EEE rx and
+ tx modes are controlled by GSERN()_LANE()_EEE_BCFG[EEE_RX_OVRRD] and
+ GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD]. Used to test the EEE deep sleep
+ power down modes of the SerDes. */
+ uint64_t eee_tx_ovrrd : 2; /**< [ 41: 40](R/W) Reserved.
+ Internal:
+ EEE Tx override.
+ For diagnostic use only. When GSERN()_LANE()_EEE_BCFG[EEE_OVRRD] is set to one
+ the SerDes transmitter modes are controlled by [EEE_TX_OVRRD]. Used to
+ test the EEE deep sleep power down modes of the SerDes transmitter.
+ 0x0 = ACTIVE/DATA mode
+ 0x1 = QUIET
+ 0x2 = ALERT
+ 0x3 = Reserved. */
+ uint64_t eee_rx_ovrrd : 1; /**< [ 39: 39](R/W) Reserved.
+ Internal:
+ EEE Rx override.
+ For diagnostic use only. When GSERN()_LANE()_EEE_BCFG[EEE_OVRRD] is set to one
+ the SerDes receiver modes are controlled by [EEE_RX_OVRRD]. Used to
+ test the EEE deep sleep power down modes of the SerDes receiver.
+ 0x0 = ACTIVE/DATA mode
+ 0x1 = QUIET */
+ uint64_t bypass_edet : 1; /**< [ 38: 38](R/W) Reserved.
+ Internal:
+ EEE energy detect bypass.
+ 0 = The Energy Detect EDET signal to CGX will behave normally. EDET will be set
+ to one when energy is detected at the lane receiver and EDET will be cleared to zero
+ when there is no energy detected at the lane receiver.
+ 1 = The Energy Detect EDET signal to CGX will always be set to 1 bypassing
+ the energy detect function. */
+ uint64_t pwrdn_mode : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Programs the PHY power mode down during EEE.
+ Used to select the P1, P2, or Shutdown powe states when entering deep sleep mode.
+ 0x0 = Reserved.
+ 0x1 = The PHY will power down to the P1 power state and the power state cntrols
+ will be configured from the GSERN()_LANE()_EEE_RSTP1_BCFG register.
+ 0x2 = The PHY will power down to the P2 power state and the power state controls
+ will be configured from the GSERN()_LANE()_EEE_RSTP2_BCFG register.
+ 0x3 = The PHY will power down to the shutdown (SHDN) power state and the power
+ state controls will be configured from the GSERN()_LANE()_EEE_RSTSHDN_BCFG register. */
+ uint64_t eyemon_pwrdn_en : 1; /**< [ 35: 35](R/W) Reserved.
+ Internal:
+ Programs the behavior of the eye monitor power down during EEE.
+ 0 = The eye monitor will not power down during EEE quiet mode.
+ 1 = The eye monitor will power down during the EEE quiet mode. */
+ uint64_t lpll_pwrdn_en : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Programs the behavior of the lane PLL power down during EEE.
+ 0 = The lane PLL will not power down during EEE quiet mode.
+ 1 = The lane PLL will power down during the EEE quiet mode. */
+ uint64_t tx_pwrdn_en : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Programs the behavior of the transmitter power down during EEE.
+ 0 = The transmitter will not power down during EEE quiet mode.
+ 1 = The transmitter will power down during the EEE quiet mode. */
+ uint64_t rx_pwrdn_en : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Programs the behavior of the receiver power down during EEE.
+ 0 = The receiver will not power down during EEE quiet mode.
+ 1 = The receiver will power down during the EEE Quiet mode. */
+ uint64_t tx_dly_cnt : 16; /**< [ 31: 16](R/W) Reserved.
+ Internal:
+ Programs the delay of the TX PCS layer when the Tx side is transitione from the EEE QUIET
+ phase to the ALERT or ACTIVE phase. This programmable delay adds delau to ensure that
+ txdivclk is running and stable before Tx data resumes.
+ The delay units are in units of service-clock cycles. For diagnostic use only. */
+ uint64_t rx_dly_cnt : 16; /**< [ 15: 0](R/W) Reserved.
+ Internal:
+ Programs the delay of the RX PCS layer when the receiver is transitioned froom the EEE
+ QUIET to ACTIVE phase. The programmable delay adds delay to ensure that the rxdivclk
+ is running and stable before Rx data resumes.
+ The delay units are in units of service-clock cycles. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t rx_dly_cnt : 16; /**< [ 15: 0](R/W) Reserved.
+ Internal:
+ Programs the delay of the RX PCS layer when the receiver is transitioned froom the EEE
+ QUIET to ACTIVE phase. The programmable delay adds delay to ensure that the rxdivclk
+ is running and stable before Rx data resumes.
+ The delay units are in units of service-clock cycles. For diagnostic use only. */
+ uint64_t tx_dly_cnt : 16; /**< [ 31: 16](R/W) Reserved.
+ Internal:
+ Programs the delay of the TX PCS layer when the Tx side is transitione from the EEE QUIET
+ phase to the ALERT or ACTIVE phase. This programmable delay adds delau to ensure that
+ txdivclk is running and stable before Tx data resumes.
+ The delay units are in units of service-clock cycles. For diagnostic use only. */
+ uint64_t rx_pwrdn_en : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Programs the behavior of the receiver power down during EEE.
+ 0 = The receiver will not power down during EEE quiet mode.
+ 1 = The receiver will power down during the EEE Quiet mode. */
+ uint64_t tx_pwrdn_en : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Programs the behavior of the transmitter power down during EEE.
+ 0 = The transmitter will not power down during EEE quiet mode.
+ 1 = The transmitter will power down during the EEE quiet mode. */
+ uint64_t lpll_pwrdn_en : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Programs the behavior of the lane PLL power down during EEE.
+ 0 = The lane PLL will not power down during EEE quiet mode.
+ 1 = The lane PLL will power down during the EEE quiet mode. */
+ uint64_t eyemon_pwrdn_en : 1; /**< [ 35: 35](R/W) Reserved.
+ Internal:
+ Programs the behavior of the eye monitor power down during EEE.
+ 0 = The eye monitor will not power down during EEE quiet mode.
+ 1 = The eye monitor will power down during the EEE quiet mode. */
+ uint64_t pwrdn_mode : 2; /**< [ 37: 36](R/W) Reserved.
+ Internal:
+ Programs the PHY power mode down during EEE.
+ Used to select the P1, P2, or Shutdown powe states when entering deep sleep mode.
+ 0x0 = Reserved.
+ 0x1 = The PHY will power down to the P1 power state and the power state cntrols
+ will be configured from the GSERN()_LANE()_EEE_RSTP1_BCFG register.
+ 0x2 = The PHY will power down to the P2 power state and the power state controls
+ will be configured from the GSERN()_LANE()_EEE_RSTP2_BCFG register.
+ 0x3 = The PHY will power down to the shutdown (SHDN) power state and the power
+ state controls will be configured from the GSERN()_LANE()_EEE_RSTSHDN_BCFG register. */
+ uint64_t bypass_edet : 1; /**< [ 38: 38](R/W) Reserved.
+ Internal:
+ EEE energy detect bypass.
+ 0 = The Energy Detect EDET signal to CGX will behave normally. EDET will be set
+ to one when energy is detected at the lane receiver and EDET will be cleared to zero
+ when there is no energy detected at the lane receiver.
+ 1 = The Energy Detect EDET signal to CGX will always be set to 1 bypassing
+ the energy detect function. */
+ uint64_t eee_rx_ovrrd : 1; /**< [ 39: 39](R/W) Reserved.
+ Internal:
+ EEE Rx override.
+ For diagnostic use only. When GSERN()_LANE()_EEE_BCFG[EEE_OVRRD] is set to one
+ the SerDes receiver modes are controlled by [EEE_RX_OVRRD]. Used to
+ test the EEE deep sleep power down modes of the SerDes receiver.
+ 0x0 = ACTIVE/DATA mode
+ 0x1 = QUIET */
+ uint64_t eee_tx_ovrrd : 2; /**< [ 41: 40](R/W) Reserved.
+ Internal:
+ EEE Tx override.
+ For diagnostic use only. When GSERN()_LANE()_EEE_BCFG[EEE_OVRRD] is set to one
+ the SerDes transmitter modes are controlled by [EEE_TX_OVRRD]. Used to
+ test the EEE deep sleep power down modes of the SerDes transmitter.
+ 0x0 = ACTIVE/DATA mode
+ 0x1 = QUIET
+ 0x2 = ALERT
+ 0x3 = Reserved. */
+ uint64_t eee_ovrrd : 1; /**< [ 42: 42](R/W) Reserved.
+ Internal:
+ EEE override.
+ For diagnostic use only. When [EEE_OVRRD] is set to one the SerDes EEE rx and
+ tx modes are controlled by GSERN()_LANE()_EEE_BCFG[EEE_RX_OVRRD] and
+ GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD]. Used to test the EEE deep sleep
+ power down modes of the SerDes. */
+ uint64_t eee_edet : 1; /**< [ 43: 43](RO/H) Reserved.
+ Internal:
+ EEE energy detected.
+ For diagnostic use only. Reflects the state of
+ the EEE energy detector. Used to test signals for the wake from
+ EEE deep sleep power down modes of the SerDes. */
+ uint64_t tx_quiet_drv_en : 1; /**< [ 44: 44](R/W) Reserved.
+ Internal:
+ Transmitter QUIET drive enable.
+ When [TX_QUIET_DRV_EN] is set to one the transmitter Tx+/Tx- driver outputs
+ will drive to electrical idle when either the CGX MAC moves the
+ SerDes transmitter block from the EEE ACTIVE state to the EEE QUIET state or
+ the GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD] is set to one. This ensures that
+ the link partner receiver energy detector sees the local device transmitter
+ transition from the EEE ACTIVE state to the EEE QUIET state.
+ When [TX_QUIET_DRV_EN] is set to one the transmitter Tx+/Tx- driver outputs
+ will drive to electrical idle even if the GSERN()_LANE()_EEE_BCFG[TX_PWRDN_EN]
+ is cleared to zero to inhibit the transmitter from powering down during EEE
+ deep sleep TX QUIET state. When [TX_QUIET_DRV_EN] is cleared to zero the
+ Transmitter Tx+/Tx- outputs will only drive to electrical idle when the
+ transmitter is powered down by CGX or GSERN()_LANE()_EEE_BCFG[EEE_TX_OVRRD]
+ is set to one and GSERN()_LANE()_EEE_BCFG[TX_PWRDN_EN] is also
+ set to one to enable transmitter power down. */
+ uint64_t rx_qa_sqlch_en : 1; /**< [ 45: 45](R/W) Reserved.
+ Internal:
+ Receiever QUIET to DATA squelch enable.
+ When [RX_QA_SQLCH_EN] is enabled the receive data to the CGX MAC will be
+ suppressed following the transition from receiver EEE QUIET state to
+ receiver EEE ACTIVE state for the time defined by the
+ GSERN()_LANE()_EEE_BCFG[RX_QA_SQLCH_CNT] squelch count in units of 10ns.
+ This is a optional filtering function to prevent garbage data to the CGX MAC
+ as the receiver is transitioning from the EEE QUIET to EEE ACTIVE states. */
+ uint64_t rx_qa_sqlch_cnt : 12; /**< [ 57: 46](R/W) Reserved.
+ Internal:
+ Receiever QUIET to DATA squelch count.
+ Used to implement a delay or filter function for the receive data to the
+ CGX MAC when the receiver transitions from the EEE QUIET state to the
+ EEE ACTIVE state. [RX_QA_SQLCH_CNT] counter is in units of 10ns.
+ Used in conjuncton with GSERN()_LANE()_EEE_BCFG[RX_QA_SQLCH_EN]. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eee_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_eee_bcfg bdk_gsernx_lanex_eee_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EEE_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EEE_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003650ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EEE_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EEE_BCFG(a,b) bdk_gsernx_lanex_eee_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_EEE_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EEE_BCFG(a,b) "GSERNX_LANEX_EEE_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_EEE_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EEE_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EEE_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eee_rstp1_bcfg
+ *
+ * INTERNAL: GSER Lane EEE PowerDown P1 Reset States Control Register
+ *
+ * Reserved.
+ * Internal:
+ * Controls the power down and reset states of the serdes lane PLL, transmitter, receiver,
+ * receiver adaptation, and eye monitor blocks during the EEE deep sleep power down P1 state.
+ */
+union bdk_gsernx_lanex_eee_rstp1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eee_rstp1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep P1 PowerDown state.
+ Note: this value is never likely to be changed from the normal run state (0x8). */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep P1 PowerDown state.
+ Note: this value is never likely to be changed from the normal run state (0x8). */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep P1 PowerDown state. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eee_rstp1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_eee_rstp1_bcfg bdk_gsernx_lanex_eee_rstp1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003750ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EEE_RSTP1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) bdk_gsernx_lanex_eee_rstp1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) "GSERNX_LANEX_EEE_RSTP1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EEE_RSTP1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eee_rstp2_bcfg
+ *
+ * INTERNAL: GSER Lane EEE PowerDown P2 Reset States Control Register
+ *
+ * Reserved.
+ * Internal:
+ * Controls the power down and reset states of the serdes lane PLL, transmitter, receiver,
+ * receiver adaptation, and eye monitor blocks during the EEE deep sleep power down P2 state.
+ */
+union bdk_gsernx_lanex_eee_rstp2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eee_rstp2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep P2 PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep P2 PowerDown state. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eee_rstp2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_eee_rstp2_bcfg bdk_gsernx_lanex_eee_rstp2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003760ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EEE_RSTP2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) bdk_gsernx_lanex_eee_rstp2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) "GSERNX_LANEX_EEE_RSTP2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EEE_RSTP2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eee_rstshdn_bcfg
+ *
+ * INTERNAL: GSER Lane EEE PowerDown P2 Reset States Control Register
+ *
+ * Reserved.
+ * Internal:
+ * Controls the power down and reset states of the serdes lane PLL, transmitter, receiver,
+ * receiver adaptation, and eye monitor blocks during the EEE deep sleep power shut down state.
+ */
+union bdk_gsernx_lanex_eee_rstshdn_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eee_rstshdn_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep shutdown PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ LANE PLL reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ TX reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ RX reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ Eye monitor reset state during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ Rx Adapt state Pause (0) or Hard Reset (1) during EEE deep sleep shutdown PowerDown state. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eee_rstshdn_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_eee_rstshdn_bcfg bdk_gsernx_lanex_eee_rstshdn_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003770ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EEE_RSTSHDN_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) bdk_gsernx_lanex_eee_rstshdn_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) "GSERNX_LANEX_EEE_RSTSHDN_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EEE_RSTSHDN_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eye_ctl
+ *
+ * GSER Lane PCS Lite Eye Data Gathering Control Register
+ */
+union bdk_gsernx_lanex_eye_ctl
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eye_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t rst_n : 1; /**< [ 56: 56](R/W) Clear and then set to reset the cycle count timer, the
+ done indicator, and the eye error counts. */
+ uint64_t reserved_49_55 : 7;
+ uint64_t eye_en : 1; /**< [ 48: 48](R/W) Enable eye error counting (with or without cycle count limits,
+ depending on GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN]). If the cycle count
+ limit feature is not used, counting will stop when
+ GSERN()_LANE()_EYE_CTL[EYE_EN] deasserts. Set this bit prior to
+ deasserting GSERN()_LANE()_EYE_CTL[RST_N] to use the eye data gathering
+ feature. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W) Enable use of GSERN()_LANE()_EYE_CTL[CYCLE_CNT] to limit number of cycles
+ of PCS RX clock over which the errors are accumulated. Set this bit
+ prior to deasserting GSERN()_LANE()_EYE_CTL[RST_N] to use cycle count
+ limiting in the eye data gathering feature. */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W) When enabled, this contains the count of PCS receive-clock cycles
+ over which error counts are accumulated. Set
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT] prior to deasserting
+ GSERN()_LANE()_EYE_CTL[RST_N] to use cycle count limiting in the eye data
+ gathering feature. */
+#else /* Word 0 - Little Endian */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W) When enabled, this contains the count of PCS receive-clock cycles
+ over which error counts are accumulated. Set
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT] prior to deasserting
+ GSERN()_LANE()_EYE_CTL[RST_N] to use cycle count limiting in the eye data
+ gathering feature. */
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W) Enable use of GSERN()_LANE()_EYE_CTL[CYCLE_CNT] to limit number of cycles
+ of PCS RX clock over which the errors are accumulated. Set this bit
+ prior to deasserting GSERN()_LANE()_EYE_CTL[RST_N] to use cycle count
+ limiting in the eye data gathering feature. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t eye_en : 1; /**< [ 48: 48](R/W) Enable eye error counting (with or without cycle count limits,
+ depending on GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN]). If the cycle count
+ limit feature is not used, counting will stop when
+ GSERN()_LANE()_EYE_CTL[EYE_EN] deasserts. Set this bit prior to
+ deasserting GSERN()_LANE()_EYE_CTL[RST_N] to use the eye data gathering
+ feature. */
+ uint64_t reserved_49_55 : 7;
+ uint64_t rst_n : 1; /**< [ 56: 56](R/W) Clear and then set to reset the cycle count timer, the
+ done indicator, and the eye error counts. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eye_ctl_s cn; */
+};
+typedef union bdk_gsernx_lanex_eye_ctl bdk_gsernx_lanex_eye_ctl_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EYE_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EYE_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900007b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EYE_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EYE_CTL(a,b) bdk_gsernx_lanex_eye_ctl_t
+#define bustype_BDK_GSERNX_LANEX_EYE_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EYE_CTL(a,b) "GSERNX_LANEX_EYE_CTL"
+#define device_bar_BDK_GSERNX_LANEX_EYE_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EYE_CTL(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EYE_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eye_ctl_2
+ *
+ * GSER Lane PCS Lite Eye Data Gathering Control Register 2
+ * The low 4 bits in this register allow for shifting either the doutq or
+ * doute_cal data by 1 or 2 UI to allow for an offset in the framing of the
+ * deserialized data between these two data paths in the receiver. Software
+ * will need to iterate eye or scope measurement with identical settings
+ * for the quadurature and eye datapaths, adjusting the shift bits in this
+ * register until no differences are accumulated. (Note that shifting both
+ * doutq and doute_cal would typically not be useful, since the resulting
+ * alignment would be the same as if neither were shifted.)
+ *
+ * The remaining bits control various aspects of the eye monitor error
+ * counting logic.
+ */
+union bdk_gsernx_lanex_eye_ctl_2
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eye_ctl_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_41_63 : 23;
+ uint64_t capture_ones_en : 1; /**< [ 40: 40](R/W) Set to enable capture ones, so that a full eye
+ diagram can be generated. deassert to capture half an eye. The
+ default is to enable the full eye. */
+ uint64_t capture_ones : 1; /**< [ 39: 39](R/W) Set to choose to capture eye data for ones bits in the serial
+ order in the received data stream. Clear to choose to capture
+ eye data for zero bits in serial order in the received data stream.
+ Program as desired before enabling eye data capture. Unlike
+ [CAPTURE_EDGEMODE], this signal sets the mode within the eye monitor
+ only.
+ For 00 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=0.
+ For 01 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=1.
+ For 10 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=1.
+ For 11 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=0. */
+ uint64_t reserved_33_38 : 6;
+ uint64_t eye_adapt_en : 1; /**< [ 32: 32](R/W) Set to enable eye path in the RX calibration DFE (rxcaldfe).
+ It can be asserted/deasserted with GSERN()_LANE()_EYE_CTL[EYE_EN]. It must be
+ enabled for [CAPTURE_EDGEMODE] and GSERN()_LANE()_RX_OS_5_BCFG[C1_E_ADJUST] to
+ be applied to the eye/E path. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t capture_edgemode : 1; /**< [ 24: 24](R/W) Set to choose capture of eye data for bits that transitioned in
+ serial order in the received data stream. Clear to choose capture
+ of eye data for bits that did not transitioned in serial order in
+ the received data stream. Program as desired before enabling eye data
+ capture. Unlike [CAPTURE_TRANS] and GSERN()_LANE()_RX_8_BCFG[DFE_EDGEMODE_OVRD], this signal
+ controls the calculation of the c1 bits for the eye/E path. */
+ uint64_t reserved_17_23 : 7;
+ uint64_t capture_trans : 1; /**< [ 16: 16](R/W) Set to choose capture of eye data for bits that transitioned in
+ serial order in the received data stream. Clear to choose capture
+ of eye data for bits that did not transitioned in serial order in
+ the received data stream. Program as desired before enabling eye data
+ capture. Unlike [CAPTURE_EDGEMODE], this signal sets the mode within
+ the eye monitor only.
+ For 00 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=0.
+ For 01 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=1.
+ For 10 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=1.
+ For 11 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=0. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t dbl_shift_doute : 1; /**< [ 9: 9](R/W) Set to shift the doute_cal (receiver eye calibration path) data
+ by 2 UI earlier to align with doutq for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t shift_doute : 1; /**< [ 8: 8](R/W) Set to shift the doute_cal (receiver eye path) data by 1 UI
+ earlier to align with doutq for eye and scope comparison logic. Only
+ data captured in the eye or scope logic is impacted by this
+ setting. Program as desired before enabling eye data capture. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t dbl_shift_doutq : 1; /**< [ 1: 1](R/W) Set to shift the doutq (receiver normal quadrature path) data by
+ 2 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t shift_doutq : 1; /**< [ 0: 0](R/W) Set to shift the doutq (receiver normal quadrature path) data by
+ 1 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. Program as desired before enabling eye data capture. */
+#else /* Word 0 - Little Endian */
+ uint64_t shift_doutq : 1; /**< [ 0: 0](R/W) Set to shift the doutq (receiver normal quadrature path) data by
+ 1 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. Program as desired before enabling eye data capture. */
+ uint64_t dbl_shift_doutq : 1; /**< [ 1: 1](R/W) Set to shift the doutq (receiver normal quadrature path) data by
+ 2 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t shift_doute : 1; /**< [ 8: 8](R/W) Set to shift the doute_cal (receiver eye path) data by 1 UI
+ earlier to align with doutq for eye and scope comparison logic. Only
+ data captured in the eye or scope logic is impacted by this
+ setting. Program as desired before enabling eye data capture. */
+ uint64_t dbl_shift_doute : 1; /**< [ 9: 9](R/W) Set to shift the doute_cal (receiver eye calibration path) data
+ by 2 UI earlier to align with doutq for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t capture_trans : 1; /**< [ 16: 16](R/W) Set to choose capture of eye data for bits that transitioned in
+ serial order in the received data stream. Clear to choose capture
+ of eye data for bits that did not transitioned in serial order in
+ the received data stream. Program as desired before enabling eye data
+ capture. Unlike [CAPTURE_EDGEMODE], this signal sets the mode within
+ the eye monitor only.
+ For 00 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=0.
+ For 01 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=1.
+ For 10 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=1.
+ For 11 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=0. */
+ uint64_t reserved_17_23 : 7;
+ uint64_t capture_edgemode : 1; /**< [ 24: 24](R/W) Set to choose capture of eye data for bits that transitioned in
+ serial order in the received data stream. Clear to choose capture
+ of eye data for bits that did not transitioned in serial order in
+ the received data stream. Program as desired before enabling eye data
+ capture. Unlike [CAPTURE_TRANS] and GSERN()_LANE()_RX_8_BCFG[DFE_EDGEMODE_OVRD], this signal
+ controls the calculation of the c1 bits for the eye/E path. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t eye_adapt_en : 1; /**< [ 32: 32](R/W) Set to enable eye path in the RX calibration DFE (rxcaldfe).
+ It can be asserted/deasserted with GSERN()_LANE()_EYE_CTL[EYE_EN]. It must be
+ enabled for [CAPTURE_EDGEMODE] and GSERN()_LANE()_RX_OS_5_BCFG[C1_E_ADJUST] to
+ be applied to the eye/E path. */
+ uint64_t reserved_33_38 : 6;
+ uint64_t capture_ones : 1; /**< [ 39: 39](R/W) Set to choose to capture eye data for ones bits in the serial
+ order in the received data stream. Clear to choose to capture
+ eye data for zero bits in serial order in the received data stream.
+ Program as desired before enabling eye data capture. Unlike
+ [CAPTURE_EDGEMODE], this signal sets the mode within the eye monitor
+ only.
+ For 00 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=0.
+ For 01 bit sequence errors, use [CAPTURE_ONES]=0 and [CAPTURE_TRANS]=1.
+ For 10 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=1.
+ For 11 bit sequence errors, use [CAPTURE_ONES]=1 and [CAPTURE_TRANS]=0. */
+ uint64_t capture_ones_en : 1; /**< [ 40: 40](R/W) Set to enable capture ones, so that a full eye
+ diagram can be generated. deassert to capture half an eye. The
+ default is to enable the full eye. */
+ uint64_t reserved_41_63 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eye_ctl_2_s cn; */
+};
+typedef union bdk_gsernx_lanex_eye_ctl_2 bdk_gsernx_lanex_eye_ctl_2_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EYE_CTL_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EYE_CTL_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900007c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EYE_CTL_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) bdk_gsernx_lanex_eye_ctl_2_t
+#define bustype_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) "GSERNX_LANEX_EYE_CTL_2"
+#define device_bar_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EYE_CTL_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_eye_dat
+ *
+ * GSER Lane PCS Lite Eye Data Gathering Result Register
+ */
+union bdk_gsernx_lanex_eye_dat
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_eye_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_EYE_CTL[CYCLE_CNT] has expired if
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] is asserted. If
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] is deasserted, this bit will always
+ read as asserted. */
+ uint64_t reserved_48 : 1;
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When set indicates GSERN()_LANE()_EYE_DAT[ERR_CNT] overflowed and is
+ not accurate. */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of bit errors seen in doute_cal relative to doutq. If
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] and GSERN()_LANE()_EYE_DAT[CYCLE_CNT_DONE]
+ are not both asserted, GSERN()_LANE()_EYE_DAT[ERR_CNT] may not be reliable
+ unless GSERN()_LANE()_EYE_CTL[EYE_EN] is first cleared (to stop the
+ error counter). */
+#else /* Word 0 - Little Endian */
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of bit errors seen in doute_cal relative to doutq. If
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] and GSERN()_LANE()_EYE_DAT[CYCLE_CNT_DONE]
+ are not both asserted, GSERN()_LANE()_EYE_DAT[ERR_CNT] may not be reliable
+ unless GSERN()_LANE()_EYE_CTL[EYE_EN] is first cleared (to stop the
+ error counter). */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When set indicates GSERN()_LANE()_EYE_DAT[ERR_CNT] overflowed and is
+ not accurate. */
+ uint64_t reserved_48 : 1;
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_EYE_CTL[CYCLE_CNT] has expired if
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] is asserted. If
+ GSERN()_LANE()_EYE_CTL[CYCLE_CNT_EN] is deasserted, this bit will always
+ read as asserted. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_eye_dat_s cn; */
+};
+typedef union bdk_gsernx_lanex_eye_dat bdk_gsernx_lanex_eye_dat_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_EYE_DAT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_EYE_DAT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900007d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_EYE_DAT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_EYE_DAT(a,b) bdk_gsernx_lanex_eye_dat_t
+#define bustype_BDK_GSERNX_LANEX_EYE_DAT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_EYE_DAT(a,b) "GSERNX_LANEX_EYE_DAT"
+#define device_bar_BDK_GSERNX_LANEX_EYE_DAT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_EYE_DAT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_EYE_DAT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_idledet_hys
+ *
+ * GSER Lane Receiver Idle Detector Hysteresis Control Register
+ * Parameters controlling hystersis in the custom receiver's idle detector. When
+ * enabled, the hysteresis function adjusts the idle detector offset to bias the
+ * detector in favor of the current idle state after the current state has been stable
+ * for some time. The [HYS_CNT], [HYS_POS], and [HYS_NEG] control fields should be set
+ * before or concurrently with writing [HYS_EN] to 1 when the hystersis function is to
+ * be used.
+ */
+union bdk_gsernx_lanex_idledet_hys
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_idledet_hys_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ uint64_t hys_en : 1; /**< [ 16: 16](R/W) Enable the hysteresis function. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t hys_cnt : 6; /**< [ 13: 8](R/W) Count of 10 ns cycles after a change in idle offset hysteresis direction before a new
+ hysteresis direction will be applied. */
+ uint64_t hys_pos : 4; /**< [ 7: 4](R/W) Offset shift to bias the idle detector in favor of not idle after the the
+ detector has reported not idle for [HYS_CNT] cycles. The offset shift is
+ incremented approximately 5 mV per step. */
+ uint64_t hys_neg : 4; /**< [ 3: 0](R/W) Offset shift to bias the idle detector in favor of idle after the detector has
+ reported idle for [HYS_CNT] cycles. The offset shift is incremented
+ approximately 5 mV per step. */
+#else /* Word 0 - Little Endian */
+ uint64_t hys_neg : 4; /**< [ 3: 0](R/W) Offset shift to bias the idle detector in favor of idle after the detector has
+ reported idle for [HYS_CNT] cycles. The offset shift is incremented
+ approximately 5 mV per step. */
+ uint64_t hys_pos : 4; /**< [ 7: 4](R/W) Offset shift to bias the idle detector in favor of not idle after the the
+ detector has reported not idle for [HYS_CNT] cycles. The offset shift is
+ incremented approximately 5 mV per step. */
+ uint64_t hys_cnt : 6; /**< [ 13: 8](R/W) Count of 10 ns cycles after a change in idle offset hysteresis direction before a new
+ hysteresis direction will be applied. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t hys_en : 1; /**< [ 16: 16](R/W) Enable the hysteresis function. */
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_idledet_hys_s cn; */
+};
+typedef union bdk_gsernx_lanex_idledet_hys bdk_gsernx_lanex_idledet_hys_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_IDLEDET_HYS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_IDLEDET_HYS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900010f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_IDLEDET_HYS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) bdk_gsernx_lanex_idledet_hys_t
+#define bustype_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) "GSERNX_LANEX_IDLEDET_HYS"
+#define device_bar_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_IDLEDET_HYS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_imapsel_bcfg
+ *
+ * GSER Lane Interpolator Map Selection Register
+ * Selection control for the interpolator map. Set prior to bringing the analog
+ * receiver out of reset.
+ */
+union bdk_gsernx_lanex_imapsel_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_imapsel_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t map_case : 5; /**< [ 4: 0](R/W) Interpolator map case selector.
+ 0x0 = data_500_erc_2_c_0_20_mean.
+ 0x1 = data_407_erc_2_c_0_20_mean.
+ 0x2 = data_333_erc_3_c_0_20_mean.
+ 0x3 = data_167_erc_5_c_0_20_mean.
+ 0x4 = data_80_erc_8_c_0_20_mean.
+ 0x5 = data_63_erc_10_c_0_20_mean.
+ 0x6 = data_50_erc_11_c_0_20_mean.
+ 0x7 = data_40_erc_13_c_0_20_mean.
+ 0x8 = data_39_erc_14_c_0_20_mean.
+ 0x9 = data_36_erc_15_c_0_20_mean.
+ 0xa = data_31_erc_15_c_0_20_mean.
+ 0xf = {GSERN()_LANE()_MAP1, GSERN()_LANE()_MAP0}.
+ all others = 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t map_case : 5; /**< [ 4: 0](R/W) Interpolator map case selector.
+ 0x0 = data_500_erc_2_c_0_20_mean.
+ 0x1 = data_407_erc_2_c_0_20_mean.
+ 0x2 = data_333_erc_3_c_0_20_mean.
+ 0x3 = data_167_erc_5_c_0_20_mean.
+ 0x4 = data_80_erc_8_c_0_20_mean.
+ 0x5 = data_63_erc_10_c_0_20_mean.
+ 0x6 = data_50_erc_11_c_0_20_mean.
+ 0x7 = data_40_erc_13_c_0_20_mean.
+ 0x8 = data_39_erc_14_c_0_20_mean.
+ 0x9 = data_36_erc_15_c_0_20_mean.
+ 0xa = data_31_erc_15_c_0_20_mean.
+ 0xf = {GSERN()_LANE()_MAP1, GSERN()_LANE()_MAP0}.
+ all others = 0. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_imapsel_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_imapsel_bcfg bdk_gsernx_lanex_imapsel_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_IMAPSEL_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_IMAPSEL_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001df0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_IMAPSEL_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) bdk_gsernx_lanex_imapsel_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) "GSERNX_LANEX_IMAPSEL_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_IMAPSEL_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_init_bsts
+ *
+ * GSER Lane Initialization Base-level Status Register
+ */
+union bdk_gsernx_lanex_init_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_init_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t eye_deep_idle : 1; /**< [ 42: 42](RO/H) Receiver eye path state is deep idle. */
+ uint64_t eye_rst_sm_complete : 1; /**< [ 41: 41](RO/H) Indicates that the lane eye receive reset state machine has
+ completed. If [EYE_RST_SM_COMPLETE] is set and [EYE_READY] is not,
+ there may be CSR register setting which prevent the receiver eye data
+ path from being ready for use, e.g., power-down or reset overrides. */
+ uint64_t eye_ready : 1; /**< [ 40: 40](RO/H) Lane analog receiver eye data path reset state machine completion
+ status indicating that the lane receiver eye path ready for use. */
+ uint64_t tx_pcie_p2 : 1; /**< [ 39: 39](RO/H) Transmitter state is PCIe power state P2. */
+ uint64_t tx_pcie_p1s2 : 1; /**< [ 38: 38](RO/H) Transmitter state is PCIe power state P1 substate 2. */
+ uint64_t tx_pcie_p1s1 : 1; /**< [ 37: 37](RO/H) Transmitter state is PCIe power state P1 substate 1. */
+ uint64_t tx_pcie_p1cpm : 1; /**< [ 36: 36](RO/H) Transmitter state is PCIe power state P1.CPM (entry to P1 substates
+ or clock disabled state for normal P1 with clock PM support). */
+ uint64_t tx_pcie_p1 : 1; /**< [ 35: 35](RO/H) Transmitter state is PCIe power state P1. */
+ uint64_t tx_deep_idle : 1; /**< [ 34: 34](RO/H) Transmitter state is deep idle. */
+ uint64_t tx_rst_sm_complete : 1; /**< [ 33: 33](RO/H) Indicates that the lane transmitter reset state machine has
+ completed. If [TX_RST_SM_COMPLETE] is set and [TX_READY] is not,
+ there may be CSR register setting which prevent the transmitter from
+ being ready for use, e.g., power-down or reset overrides. */
+ uint64_t tx_ready : 1; /**< [ 32: 32](RO/H) Lane analog transmitter reset state machine completion status
+ indicating that the lane transmitter is in "idle" configuration and
+ ready to start transmitting data after changing the transmitter drive
+ settings to transmit data. */
+ uint64_t rx_pcie_p2 : 1; /**< [ 31: 31](RO/H) Receiver state is PCIe power state P2. */
+ uint64_t rx_pcie_p1s2 : 1; /**< [ 30: 30](RO/H) Receiver state is PCIe power state P1 substate 2. */
+ uint64_t rx_pcie_p1s1 : 1; /**< [ 29: 29](RO/H) Receiver state is PCIe power state P1 substate 1. */
+ uint64_t rx_pcie_p1cpm : 1; /**< [ 28: 28](RO/H) Receiver state is PCIe power state P1.CPM (entry to P1 substates or
+ clock disabled state for normal P1 with clock PM support). */
+ uint64_t rx_pcie_p1 : 1; /**< [ 27: 27](RO/H) Receiver state is PCIe power state P1. */
+ uint64_t rx_deep_idle : 1; /**< [ 26: 26](RO/H) Receiver state is deep idle. */
+ uint64_t rx_rst_sm_complete : 1; /**< [ 25: 25](RO/H) Indicates that the lane receiver reset state machine has
+ completed. If [RX_RST_SM_COMPLETE] is set and [RX_READY] is not,
+ there may be CSR register setting which prevent the receiver from
+ being ready for use, e.g., power-down or reset overrides. */
+ uint64_t rx_ready : 1; /**< [ 24: 24](RO/H) Lane analog receiver reset state machine completion status that the
+ reset sequence has completed and the lane receiver is ready for afe
+ and dfe adaptation. */
+ uint64_t pll_cp_cal : 4; /**< [ 23: 20](RO/H) PLL calibration state machine's resulting charge pump setting. Only
+ valid if [CAL_READY] is set. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t pll_band_cal : 5; /**< [ 16: 12](RO/H) PLL calibration state machine's resulting VCO band setting. Only valid
+ if [CAL_READY] is set. */
+ uint64_t pll_pcie_p2 : 1; /**< [ 11: 11](RO/H) Lane PLL state is PCIe power state P2. */
+ uint64_t pll_pcie_p1s2 : 1; /**< [ 10: 10](RO/H) Lane PLL state is PCIe power state P1 substate 2. */
+ uint64_t pll_pcie_p1s1 : 1; /**< [ 9: 9](RO/H) Lane PLL state is PCIe power state P1 substate 1. */
+ uint64_t pll_pcie_p1cpm : 1; /**< [ 8: 8](RO/H) Lane PLL state is PCIe power state P1.CPM (entry to P1 substates or
+ clock disabled state for normal P1 with clock PM support). */
+ uint64_t pll_pcie_p1 : 1; /**< [ 7: 7](RO/H) Lane PLL state is PCIe power state P1. */
+ uint64_t pll_deep_idle : 1; /**< [ 6: 6](RO/H) Lane PLL state is deep idle. */
+ uint64_t rst_sm_complete : 1; /**< [ 5: 5](RO/H) PLL reset state machine has completed. If
+ [RST_SM_COMPLETE] is set and [RST_SM_READY] is not, there may still
+ be CSR register settings preventing the PLL from being ready
+ for use, e.g., power-down or reset overrides. */
+ uint64_t rst_sm_ready : 1; /**< [ 4: 4](RO/H) PLL reset state machine status indicating that the reset
+ sequence has completed and this PLL is ready for use. */
+ uint64_t lock : 1; /**< [ 3: 3](RO/H) PLL lock status; only valid if [LOCK_READY] is set. */
+ uint64_t lock_ready : 1; /**< [ 2: 2](RO/H) PLL lock status check is complete following most recent PLL
+ reset or assertion of GSERN()_LANE()_RST1_BCFG[LOCK_CHECK]. */
+ uint64_t cal_fail : 1; /**< [ 1: 1](RO/H) PLL calibration failed; valid only if [CAL_READY] is set. */
+ uint64_t cal_ready : 1; /**< [ 0: 0](RO/H) PLL calibration completed */
+#else /* Word 0 - Little Endian */
+ uint64_t cal_ready : 1; /**< [ 0: 0](RO/H) PLL calibration completed */
+ uint64_t cal_fail : 1; /**< [ 1: 1](RO/H) PLL calibration failed; valid only if [CAL_READY] is set. */
+ uint64_t lock_ready : 1; /**< [ 2: 2](RO/H) PLL lock status check is complete following most recent PLL
+ reset or assertion of GSERN()_LANE()_RST1_BCFG[LOCK_CHECK]. */
+ uint64_t lock : 1; /**< [ 3: 3](RO/H) PLL lock status; only valid if [LOCK_READY] is set. */
+ uint64_t rst_sm_ready : 1; /**< [ 4: 4](RO/H) PLL reset state machine status indicating that the reset
+ sequence has completed and this PLL is ready for use. */
+ uint64_t rst_sm_complete : 1; /**< [ 5: 5](RO/H) PLL reset state machine has completed. If
+ [RST_SM_COMPLETE] is set and [RST_SM_READY] is not, there may still
+ be CSR register settings preventing the PLL from being ready
+ for use, e.g., power-down or reset overrides. */
+ uint64_t pll_deep_idle : 1; /**< [ 6: 6](RO/H) Lane PLL state is deep idle. */
+ uint64_t pll_pcie_p1 : 1; /**< [ 7: 7](RO/H) Lane PLL state is PCIe power state P1. */
+ uint64_t pll_pcie_p1cpm : 1; /**< [ 8: 8](RO/H) Lane PLL state is PCIe power state P1.CPM (entry to P1 substates or
+ clock disabled state for normal P1 with clock PM support). */
+ uint64_t pll_pcie_p1s1 : 1; /**< [ 9: 9](RO/H) Lane PLL state is PCIe power state P1 substate 1. */
+ uint64_t pll_pcie_p1s2 : 1; /**< [ 10: 10](RO/H) Lane PLL state is PCIe power state P1 substate 2. */
+ uint64_t pll_pcie_p2 : 1; /**< [ 11: 11](RO/H) Lane PLL state is PCIe power state P2. */
+ uint64_t pll_band_cal : 5; /**< [ 16: 12](RO/H) PLL calibration state machine's resulting VCO band setting. Only valid
+ if [CAL_READY] is set. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t pll_cp_cal : 4; /**< [ 23: 20](RO/H) PLL calibration state machine's resulting charge pump setting. Only
+ valid if [CAL_READY] is set. */
+ uint64_t rx_ready : 1; /**< [ 24: 24](RO/H) Lane analog receiver reset state machine completion status that the
+ reset sequence has completed and the lane receiver is ready for afe
+ and dfe adaptation. */
+ uint64_t rx_rst_sm_complete : 1; /**< [ 25: 25](RO/H) Indicates that the lane receiver reset state machine has
+ completed. If [RX_RST_SM_COMPLETE] is set and [RX_READY] is not,
+ there may be CSR register setting which prevent the receiver from
+ being ready for use, e.g., power-down or reset overrides. */
+ uint64_t rx_deep_idle : 1; /**< [ 26: 26](RO/H) Receiver state is deep idle. */
+ uint64_t rx_pcie_p1 : 1; /**< [ 27: 27](RO/H) Receiver state is PCIe power state P1. */
+ uint64_t rx_pcie_p1cpm : 1; /**< [ 28: 28](RO/H) Receiver state is PCIe power state P1.CPM (entry to P1 substates or
+ clock disabled state for normal P1 with clock PM support). */
+ uint64_t rx_pcie_p1s1 : 1; /**< [ 29: 29](RO/H) Receiver state is PCIe power state P1 substate 1. */
+ uint64_t rx_pcie_p1s2 : 1; /**< [ 30: 30](RO/H) Receiver state is PCIe power state P1 substate 2. */
+ uint64_t rx_pcie_p2 : 1; /**< [ 31: 31](RO/H) Receiver state is PCIe power state P2. */
+ uint64_t tx_ready : 1; /**< [ 32: 32](RO/H) Lane analog transmitter reset state machine completion status
+ indicating that the lane transmitter is in "idle" configuration and
+ ready to start transmitting data after changing the transmitter drive
+ settings to transmit data. */
+ uint64_t tx_rst_sm_complete : 1; /**< [ 33: 33](RO/H) Indicates that the lane transmitter reset state machine has
+ completed. If [TX_RST_SM_COMPLETE] is set and [TX_READY] is not,
+ there may be CSR register setting which prevent the transmitter from
+ being ready for use, e.g., power-down or reset overrides. */
+ uint64_t tx_deep_idle : 1; /**< [ 34: 34](RO/H) Transmitter state is deep idle. */
+ uint64_t tx_pcie_p1 : 1; /**< [ 35: 35](RO/H) Transmitter state is PCIe power state P1. */
+ uint64_t tx_pcie_p1cpm : 1; /**< [ 36: 36](RO/H) Transmitter state is PCIe power state P1.CPM (entry to P1 substates
+ or clock disabled state for normal P1 with clock PM support). */
+ uint64_t tx_pcie_p1s1 : 1; /**< [ 37: 37](RO/H) Transmitter state is PCIe power state P1 substate 1. */
+ uint64_t tx_pcie_p1s2 : 1; /**< [ 38: 38](RO/H) Transmitter state is PCIe power state P1 substate 2. */
+ uint64_t tx_pcie_p2 : 1; /**< [ 39: 39](RO/H) Transmitter state is PCIe power state P2. */
+ uint64_t eye_ready : 1; /**< [ 40: 40](RO/H) Lane analog receiver eye data path reset state machine completion
+ status indicating that the lane receiver eye path ready for use. */
+ uint64_t eye_rst_sm_complete : 1; /**< [ 41: 41](RO/H) Indicates that the lane eye receive reset state machine has
+ completed. If [EYE_RST_SM_COMPLETE] is set and [EYE_READY] is not,
+ there may be CSR register setting which prevent the receiver eye data
+ path from being ready for use, e.g., power-down or reset overrides. */
+ uint64_t eye_deep_idle : 1; /**< [ 42: 42](RO/H) Receiver eye path state is deep idle. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_init_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_init_bsts bdk_gsernx_lanex_init_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_INIT_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_INIT_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000480ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_INIT_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_INIT_BSTS(a,b) bdk_gsernx_lanex_init_bsts_t
+#define bustype_BDK_GSERNX_LANEX_INIT_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_INIT_BSTS(a,b) "GSERNX_LANEX_INIT_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_INIT_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_INIT_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_INIT_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_lt_bcfg
+ *
+ * GSER Lane PCS Lite Configuration (Transmit, Receive, and Loopback) Register
+ */
+union bdk_gsernx_lanex_lt_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_lt_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t inj_err_cnt_rst_n : 1; /**< [ 63: 63](R/W/H) Set to zero to hold the error injection counter in reset. */
+ uint64_t inj_err_cnt_en : 1; /**< [ 62: 62](R/W) PCS will inject a single bit error every other cycle in the transmit
+ data stream at some time following an assertion of
+ [INJ_ERR_CNT_EN]. The number of error cycles to insert is set by
+ [INJ_ERR_CNT_LEN] and it increments the error bit index each
+ cycle. Once all the errors have been transmitted GSER sets
+ GSERN()_LANE()_LT_BSTS[INJ_ERR_CNT_DONE]. Injection of a second set of
+ errors will require clearing the counter by holding [INJ_ERR_CNT_RST_N],
+ asserting [INJ_ERR_CNT_EN], then releasing [INJ_ERR_CNT_RST_N]. This mode
+ should be used separately from [INJ_ERR_BURST_EN] and only one of them
+ can be asserted at any time. */
+ uint64_t inj_err_cnt_len : 6; /**< [ 61: 56](R/W) Tells the PCS lite error injection logic the total number of bit errors
+ to insert in a walking pattern. Every other cycle 1 bit error will be
+ inserted in a walking index up to the count value specified. The max
+ value is set by the valid data width transmitted. For example, if 8
+ bits of valid data are transmitted each cycle only from 1-8 count
+ values can be set. The same for 10, 16, 20, 32, and 40 bits. */
+ uint64_t reserved_55 : 1;
+ uint64_t inj_err_burst_en : 1; /**< [ 54: 54](R/W) PCS will inject a contiguous set of error bits in the transmit data
+ stream at some time following an assertion of [INJ_ERR_BURST_EN]. The
+ length of contiguous errors is set by [INJ_ERR_BURST_LEN]. Injection
+ of a second set of errors will require deasserting and then
+ asserting [INJ_ERR_BURST_EN] again. This mode should be used separately
+ from [INJ_ERR_CNT_EN] and only one of them can be asserted at any time. */
+ uint64_t inj_err_burst_len : 6; /**< [ 53: 48](R/W) Tells the PCS lite error injection logic what length the burst error
+ mask should be. The max value is set by the valid data width
+ transmitted. For example, if 8 bits of valid data are transmitted
+ each cycle, only from 1-8 bits of contiguous errors can be set. The
+ same for 10, 16, 20, 32, and 40 bits. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t pat_dp_width : 3; /**< [ 43: 41](R/W/H) Tells the pattern memory generator/checker logic what width to use
+ in the generator and checker data paths.
+ 0x0 = 8 (requires bit-stuffing/unstuffing or for debug).
+ 0x1 = 10 (requires bit-stuffing/unstuffing or for debug).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ Checking of received data
+ works correctly only for clock divider ratios of 10, 20, and 40. The
+ transmit data sequence is correct for all clock ratios. */
+ uint64_t prbs_dp_width : 3; /**< [ 40: 38](R/W/H) Tells the PCS lite layer PRBS logic what width to use in the
+ generator and checker data paths.
+ 0x0 = 8 (requires bit-stuffing/unstuffing or for debug).
+ 0x1 = 10 (requires bit-stuffing/unstuffing or for debug).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40. */
+ uint64_t rx_dp_width : 3; /**< [ 37: 35](R/W/H) Tells the PCS lite layer logic what width to use in the receive data
+ path between the analog macro and downstream logic, hence what
+ data bits of the doutq[39:0] bus are in use.
+ 0x0 = 8 (reserved; debug only).
+ 0x1 = 10 (reserved; debug only).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ This value must only be changed while lite layer is in reset. */
+ uint64_t tx_dp_width : 3; /**< [ 34: 32](R/W/H) Tells the PCS lite layer logic what width to use in the transmit
+ data path between the lite layer FIFO and the analog macro, hence
+ what data bits of the tx_data[39:0] bus are in use. Values:
+ 0x0 = 8 (reserved; debug only).
+ 0x1 = 10 (reserved; debug only).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ This value must only be changed while lite layer is in reset. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t core_loopback_mode : 1; /**< [ 25: 25](R/W/H) Enable the core-side loopback mode; controller transmit data are
+ looped back to the controller as receive data in the PCS lite layer.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t sloop_mode : 1; /**< [ 24: 24](R/W/H) Enable shallow loopback mode (SerDes receive data looped back to
+ SerDes transmit in the PCS lite layer).
+ This value must only be changed while lite layer is in reset. */
+ uint64_t reserved_23 : 1;
+ uint64_t bitstuff_rx_drop_even : 1; /**< [ 22: 22](R/W/H) Tells the PCS lite receive datapath to drop even bits
+ in the vector of received data from the PMA when [BITSTUFF_RX_EN] is
+ set:
+ 0 = Drop bits 1, 3, 5, 7, ...
+ 1 = Drop bits 0, 2, 4, 6, ...
+
+ This bit is also used in the eye monitor to mask out the dropped
+ bits when counting mismatches.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t bitstuff_rx_en : 1; /**< [ 21: 21](R/W/H) Set to expect duplicates on the PMA RX data and drop bits after
+ alignment & ordering for PCS layer to consume. The drop ordering is
+ determined by [BITSTUFF_RX_DROP_EVEN]. This value must only be changed
+ while lite layer is in reset. */
+ uint64_t inv_rx_polarity : 1; /**< [ 20: 20](R/W/H) Set to invert the polarity of the received data bits. Note that
+ the PCS-lite PRBS checker will require [INV_RX_POLARITY] to be asserted
+ when it is in use to check standard PRBS data from an external
+ source. This value must only be changed while lite layer is in
+ reset. */
+ uint64_t reverse_rx_bit_order : 1; /**< [ 19: 19](R/W/H) While asserted, the normal receive order (lowest valid bit index
+ received first, highest valid index last) is reversed so the highest
+ valid bit index is received first and lowest valid index is received
+ last. This control needs to be asserted for PRBS testing using the
+ PRBS checker in the GSER macro and for PCIe Gen-1 and Gen-2. */
+ uint64_t reserved_18 : 1;
+ uint64_t use_bph_wrreq_psh : 1; /**< [ 17: 17](R/W) Reserved.
+ Internal:
+ Delay the transmit FIFO push request synchronization to the pop side by one
+ txdivclk phase. This is a diagnostic / debug tool to help with transmit lane
+ alignment issues. */
+ uint64_t fifo_algn_qlm_mask : 4; /**< [ 16: 13](R/W) Selection control for which QLMs in this QLM's link group to align in timing the
+ deassertion of reset to this lane's transmitter's clock alignment FIFO.
+ \<0\> = Wait for QLM 0.
+ \<1\> = Wait for QLM 1.
+ \<2\> = Wait for QLM 2.
+ \<3\> = Wait for QLM 3.
+
+ If a link is made up of lanes in multiple QLMs, the mask in each lane must
+ include all active QLMs (including the QLM containing the current lane). */
+ uint64_t fifo_algn_lane_mask : 4; /**< [ 12: 9](R/W) Selection control for which lanes in the current QLM to align in timing the
+ deassertion of reset to this lane's transmitter's clock alignment FIFO.
+ \<0\> = Wait for Lane 0.
+ \<1\> = Wait for Lane 1.
+ \<2\> = Wait for Lane 2.
+ \<3\> = Wait for Lane 3.
+
+ The bit corresponding to the current Lane is ignored. */
+ uint64_t fifo_bypass_en : 1; /**< [ 8: 8](R/W) For diagnostic use only.
+ Internal:
+ This control is currently inactive and is left as a placeholder for
+ possible re-inclusion in 7nm.
+
+ Set to bypass the PCS lite layer transmit asynchronous FIFO
+ with a single flop. This saves 1-2 cycles of latency in the transmit
+ path, but imposes additional constraints on static timing
+ closure. Note that shallow loopback data cannot bypass the FIFO. */
+ uint64_t tx_fifo_pop_start_addr : 3; /**< [ 7: 5](R/W) Reserved.
+ Internal:
+ Starting address for lite transmit FIFO pops
+ (reads). Changing this allows shifting the latency through the FIFO in steps of
+ 1 txdivclk cycle (8, 10, 16, 20, 32, or 40 UI, depending on data path width
+ setting). The function is similar to FIFO_UNLOAD_DLY, but provides a wider range
+ of adjustment. For diagnostic use only. */
+ uint64_t fifo_unload_dly : 1; /**< [ 4: 4](R/W/H) Set to add one cycle delay to the PCS lite layer transmit
+ asynchronous FIFO pop data. This value must only be changed before
+ releasing [FIFO_RST_N]. */
+ uint64_t fifo_rst_n : 1; /**< [ 3: 3](R/W/H) Clear to hold the PCS lite layer transmit asynchronous FIFO in
+ reset. */
+ uint64_t bitstuff_tx_en : 1; /**< [ 2: 2](R/W/H) Set to duplicate the first 20 bits of TX data before
+ alignment & ordering for lower data rates. This could be PCS TX
+ data, PRBS data, or shallow-loopback RX data depending on mode.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t inv_tx_polarity : 1; /**< [ 1: 1](R/W/H) Set to invert the polarity of the transmit data bits. Note
+ that the PCS-lite PRBS generator will require [INV_TX_POLARITY] to be
+ asserted when PRBS data are being transmitted to match the expected
+ polarity of the standard PRBS patterns.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t reverse_tx_bit_order : 1; /**< [ 0: 0](R/W/H) Assertion causes the normal transmit order (lowest valid bit index
+ transmitted first, highest valid index last) to be reversed so the
+ highest valid bit index is transmitted first and lowest valid index
+ is transmitted last. Note that the PCS-lite PRBS generator will
+ require [REVERSE_TX_BIT_ORDER] to be asserted.
+ This value must only be changed while lite layer is in reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t reverse_tx_bit_order : 1; /**< [ 0: 0](R/W/H) Assertion causes the normal transmit order (lowest valid bit index
+ transmitted first, highest valid index last) to be reversed so the
+ highest valid bit index is transmitted first and lowest valid index
+ is transmitted last. Note that the PCS-lite PRBS generator will
+ require [REVERSE_TX_BIT_ORDER] to be asserted.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t inv_tx_polarity : 1; /**< [ 1: 1](R/W/H) Set to invert the polarity of the transmit data bits. Note
+ that the PCS-lite PRBS generator will require [INV_TX_POLARITY] to be
+ asserted when PRBS data are being transmitted to match the expected
+ polarity of the standard PRBS patterns.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t bitstuff_tx_en : 1; /**< [ 2: 2](R/W/H) Set to duplicate the first 20 bits of TX data before
+ alignment & ordering for lower data rates. This could be PCS TX
+ data, PRBS data, or shallow-loopback RX data depending on mode.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t fifo_rst_n : 1; /**< [ 3: 3](R/W/H) Clear to hold the PCS lite layer transmit asynchronous FIFO in
+ reset. */
+ uint64_t fifo_unload_dly : 1; /**< [ 4: 4](R/W/H) Set to add one cycle delay to the PCS lite layer transmit
+ asynchronous FIFO pop data. This value must only be changed before
+ releasing [FIFO_RST_N]. */
+ uint64_t tx_fifo_pop_start_addr : 3; /**< [ 7: 5](R/W) Reserved.
+ Internal:
+ Starting address for lite transmit FIFO pops
+ (reads). Changing this allows shifting the latency through the FIFO in steps of
+ 1 txdivclk cycle (8, 10, 16, 20, 32, or 40 UI, depending on data path width
+ setting). The function is similar to FIFO_UNLOAD_DLY, but provides a wider range
+ of adjustment. For diagnostic use only. */
+ uint64_t fifo_bypass_en : 1; /**< [ 8: 8](R/W) For diagnostic use only.
+ Internal:
+ This control is currently inactive and is left as a placeholder for
+ possible re-inclusion in 7nm.
+
+ Set to bypass the PCS lite layer transmit asynchronous FIFO
+ with a single flop. This saves 1-2 cycles of latency in the transmit
+ path, but imposes additional constraints on static timing
+ closure. Note that shallow loopback data cannot bypass the FIFO. */
+ uint64_t fifo_algn_lane_mask : 4; /**< [ 12: 9](R/W) Selection control for which lanes in the current QLM to align in timing the
+ deassertion of reset to this lane's transmitter's clock alignment FIFO.
+ \<0\> = Wait for Lane 0.
+ \<1\> = Wait for Lane 1.
+ \<2\> = Wait for Lane 2.
+ \<3\> = Wait for Lane 3.
+
+ The bit corresponding to the current Lane is ignored. */
+ uint64_t fifo_algn_qlm_mask : 4; /**< [ 16: 13](R/W) Selection control for which QLMs in this QLM's link group to align in timing the
+ deassertion of reset to this lane's transmitter's clock alignment FIFO.
+ \<0\> = Wait for QLM 0.
+ \<1\> = Wait for QLM 1.
+ \<2\> = Wait for QLM 2.
+ \<3\> = Wait for QLM 3.
+
+ If a link is made up of lanes in multiple QLMs, the mask in each lane must
+ include all active QLMs (including the QLM containing the current lane). */
+ uint64_t use_bph_wrreq_psh : 1; /**< [ 17: 17](R/W) Reserved.
+ Internal:
+ Delay the transmit FIFO push request synchronization to the pop side by one
+ txdivclk phase. This is a diagnostic / debug tool to help with transmit lane
+ alignment issues. */
+ uint64_t reserved_18 : 1;
+ uint64_t reverse_rx_bit_order : 1; /**< [ 19: 19](R/W/H) While asserted, the normal receive order (lowest valid bit index
+ received first, highest valid index last) is reversed so the highest
+ valid bit index is received first and lowest valid index is received
+ last. This control needs to be asserted for PRBS testing using the
+ PRBS checker in the GSER macro and for PCIe Gen-1 and Gen-2. */
+ uint64_t inv_rx_polarity : 1; /**< [ 20: 20](R/W/H) Set to invert the polarity of the received data bits. Note that
+ the PCS-lite PRBS checker will require [INV_RX_POLARITY] to be asserted
+ when it is in use to check standard PRBS data from an external
+ source. This value must only be changed while lite layer is in
+ reset. */
+ uint64_t bitstuff_rx_en : 1; /**< [ 21: 21](R/W/H) Set to expect duplicates on the PMA RX data and drop bits after
+ alignment & ordering for PCS layer to consume. The drop ordering is
+ determined by [BITSTUFF_RX_DROP_EVEN]. This value must only be changed
+ while lite layer is in reset. */
+ uint64_t bitstuff_rx_drop_even : 1; /**< [ 22: 22](R/W/H) Tells the PCS lite receive datapath to drop even bits
+ in the vector of received data from the PMA when [BITSTUFF_RX_EN] is
+ set:
+ 0 = Drop bits 1, 3, 5, 7, ...
+ 1 = Drop bits 0, 2, 4, 6, ...
+
+ This bit is also used in the eye monitor to mask out the dropped
+ bits when counting mismatches.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t reserved_23 : 1;
+ uint64_t sloop_mode : 1; /**< [ 24: 24](R/W/H) Enable shallow loopback mode (SerDes receive data looped back to
+ SerDes transmit in the PCS lite layer).
+ This value must only be changed while lite layer is in reset. */
+ uint64_t core_loopback_mode : 1; /**< [ 25: 25](R/W/H) Enable the core-side loopback mode; controller transmit data are
+ looped back to the controller as receive data in the PCS lite layer.
+ This value must only be changed while lite layer is in reset. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t tx_dp_width : 3; /**< [ 34: 32](R/W/H) Tells the PCS lite layer logic what width to use in the transmit
+ data path between the lite layer FIFO and the analog macro, hence
+ what data bits of the tx_data[39:0] bus are in use. Values:
+ 0x0 = 8 (reserved; debug only).
+ 0x1 = 10 (reserved; debug only).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ This value must only be changed while lite layer is in reset. */
+ uint64_t rx_dp_width : 3; /**< [ 37: 35](R/W/H) Tells the PCS lite layer logic what width to use in the receive data
+ path between the analog macro and downstream logic, hence what
+ data bits of the doutq[39:0] bus are in use.
+ 0x0 = 8 (reserved; debug only).
+ 0x1 = 10 (reserved; debug only).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ This value must only be changed while lite layer is in reset. */
+ uint64_t prbs_dp_width : 3; /**< [ 40: 38](R/W/H) Tells the PCS lite layer PRBS logic what width to use in the
+ generator and checker data paths.
+ 0x0 = 8 (requires bit-stuffing/unstuffing or for debug).
+ 0x1 = 10 (requires bit-stuffing/unstuffing or for debug).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40. */
+ uint64_t pat_dp_width : 3; /**< [ 43: 41](R/W/H) Tells the pattern memory generator/checker logic what width to use
+ in the generator and checker data paths.
+ 0x0 = 8 (requires bit-stuffing/unstuffing or for debug).
+ 0x1 = 10 (requires bit-stuffing/unstuffing or for debug).
+ 0x2 = 16.
+ 0x3 = 20.
+ 0x4 = 32.
+ 0x5 = 40.
+
+ Checking of received data
+ works correctly only for clock divider ratios of 10, 20, and 40. The
+ transmit data sequence is correct for all clock ratios. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t inj_err_burst_len : 6; /**< [ 53: 48](R/W) Tells the PCS lite error injection logic what length the burst error
+ mask should be. The max value is set by the valid data width
+ transmitted. For example, if 8 bits of valid data are transmitted
+ each cycle, only from 1-8 bits of contiguous errors can be set. The
+ same for 10, 16, 20, 32, and 40 bits. */
+ uint64_t inj_err_burst_en : 1; /**< [ 54: 54](R/W) PCS will inject a contiguous set of error bits in the transmit data
+ stream at some time following an assertion of [INJ_ERR_BURST_EN]. The
+ length of contiguous errors is set by [INJ_ERR_BURST_LEN]. Injection
+ of a second set of errors will require deasserting and then
+ asserting [INJ_ERR_BURST_EN] again. This mode should be used separately
+ from [INJ_ERR_CNT_EN] and only one of them can be asserted at any time. */
+ uint64_t reserved_55 : 1;
+ uint64_t inj_err_cnt_len : 6; /**< [ 61: 56](R/W) Tells the PCS lite error injection logic the total number of bit errors
+ to insert in a walking pattern. Every other cycle 1 bit error will be
+ inserted in a walking index up to the count value specified. The max
+ value is set by the valid data width transmitted. For example, if 8
+ bits of valid data are transmitted each cycle only from 1-8 count
+ values can be set. The same for 10, 16, 20, 32, and 40 bits. */
+ uint64_t inj_err_cnt_en : 1; /**< [ 62: 62](R/W) PCS will inject a single bit error every other cycle in the transmit
+ data stream at some time following an assertion of
+ [INJ_ERR_CNT_EN]. The number of error cycles to insert is set by
+ [INJ_ERR_CNT_LEN] and it increments the error bit index each
+ cycle. Once all the errors have been transmitted GSER sets
+ GSERN()_LANE()_LT_BSTS[INJ_ERR_CNT_DONE]. Injection of a second set of
+ errors will require clearing the counter by holding [INJ_ERR_CNT_RST_N],
+ asserting [INJ_ERR_CNT_EN], then releasing [INJ_ERR_CNT_RST_N]. This mode
+ should be used separately from [INJ_ERR_BURST_EN] and only one of them
+ can be asserted at any time. */
+ uint64_t inj_err_cnt_rst_n : 1; /**< [ 63: 63](R/W/H) Set to zero to hold the error injection counter in reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_lt_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_lt_bcfg bdk_gsernx_lanex_lt_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_LT_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_LT_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000580ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_LT_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_LT_BCFG(a,b) bdk_gsernx_lanex_lt_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_LT_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_LT_BCFG(a,b) "GSERNX_LANEX_LT_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_LT_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_LT_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_LT_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_lt_bsts
+ *
+ * GSER Lane PCS Lite Status Register
+ */
+union bdk_gsernx_lanex_lt_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_lt_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t inj_err_cnt_done : 1; /**< [ 2: 2](RO/H) Indicates the PCS error injection counter is done. */
+ uint64_t bitstuff_rx_algn_is_odd : 1;/**< [ 1: 1](RO/H) Indicates the PCS receive data path has detected bit-stuffed
+ receive data that is aligned with duplicate bits in pairs as (1,2),
+ (3,4), (5.6), ... The indication is valid only if the receive data
+ are bit-stuffed and error-free. */
+ uint64_t bitstuff_rx_algn_is_even : 1;/**< [ 0: 0](RO/H) Indicates the PCS receive data path has detected bit-stuffed
+ receive data that is aligned with duplicate bits in pairs as (0,1),
+ (2,3), (4,5), ... The indication is valid only if the receive data
+ are bit-stuffed and error-free. */
+#else /* Word 0 - Little Endian */
+ uint64_t bitstuff_rx_algn_is_even : 1;/**< [ 0: 0](RO/H) Indicates the PCS receive data path has detected bit-stuffed
+ receive data that is aligned with duplicate bits in pairs as (0,1),
+ (2,3), (4,5), ... The indication is valid only if the receive data
+ are bit-stuffed and error-free. */
+ uint64_t bitstuff_rx_algn_is_odd : 1;/**< [ 1: 1](RO/H) Indicates the PCS receive data path has detected bit-stuffed
+ receive data that is aligned with duplicate bits in pairs as (1,2),
+ (3,4), (5.6), ... The indication is valid only if the receive data
+ are bit-stuffed and error-free. */
+ uint64_t inj_err_cnt_done : 1; /**< [ 2: 2](RO/H) Indicates the PCS error injection counter is done. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_lt_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_lt_bsts bdk_gsernx_lanex_lt_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_LT_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_LT_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000590ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_LT_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_LT_BSTS(a,b) bdk_gsernx_lanex_lt_bsts_t
+#define bustype_BDK_GSERNX_LANEX_LT_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_LT_BSTS(a,b) "GSERNX_LANEX_LT_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_LT_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_LT_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_LT_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_lt_prbs1_bcfg
+ *
+ * GSER Lane PCS Lite PRBS Checker Control Register 1
+ */
+union bdk_gsernx_lanex_lt_prbs1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_lt_prbs1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t prbs_rx_rst_n : 1; /**< [ 59: 59](R/W/H) Clear to hold the receive PRBS pattern checker in reset. */
+ uint64_t prbs_rx_mode : 1; /**< [ 58: 58](R/W/H) Enables PRBS checking in the PCS lite layer receive data path. If
+ using PRBS checking, assert GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_MODE]
+ prior to deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_RST_N]. Software
+ can deassert this bit to stop accumulating error counts without
+ resetting the counter. */
+ uint64_t prbs_tx_rst_n : 1; /**< [ 57: 57](R/W/H) Clear to hold the transmit PRBS pattern generator in reset. */
+ uint64_t prbs_tx_mode : 1; /**< [ 56: 56](R/W/H) Enables PRBS generation and sending PRBS transmit data to the SERDES
+ macro. If using PRBS transmitting, set
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_MODE] prior to deasserting
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. Note that the PCS-lite PRBS
+ generator will require GSERN()_LANE()_LT_BCFG[REVERSE_TX_BIT_ORDER] to be
+ asserted. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t prbs_mode : 4; /**< [ 51: 48](R/W/H) Selects the PRBS pattern mode for both transmit generation and
+ receive checking:
+ 0 = Prbs07 (taps at 6 & 7; reset default).
+ 1 = Prbs7a (taps at 3 & 7).
+ 2 = Prbs09 (taps at 5 & 9).
+ 3 = Prbs11 (taps at 9 & 11).
+ 4 = Prbs15 (taps at 14 & 15).
+ 5 = Prbs20 (taps at 3 & 20).
+ 6 = Prbs23 (taps at 18 & 23).
+ 7 = Prbs29 (taps at 27 & 29).
+ 8 = Prbs31 (taps at 28 & 31).
+ others reserved. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W/H) Enable use of GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT] to limit number of
+ cycles of PCS RX clock over which PRBS errors are accumulated. */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W/H) When enabled, this contains the count of PCS receive-clock cycles
+ over which PRBS error counts are accumulated. */
+#else /* Word 0 - Little Endian */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W/H) When enabled, this contains the count of PCS receive-clock cycles
+ over which PRBS error counts are accumulated. */
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W/H) Enable use of GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT] to limit number of
+ cycles of PCS RX clock over which PRBS errors are accumulated. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t prbs_mode : 4; /**< [ 51: 48](R/W/H) Selects the PRBS pattern mode for both transmit generation and
+ receive checking:
+ 0 = Prbs07 (taps at 6 & 7; reset default).
+ 1 = Prbs7a (taps at 3 & 7).
+ 2 = Prbs09 (taps at 5 & 9).
+ 3 = Prbs11 (taps at 9 & 11).
+ 4 = Prbs15 (taps at 14 & 15).
+ 5 = Prbs20 (taps at 3 & 20).
+ 6 = Prbs23 (taps at 18 & 23).
+ 7 = Prbs29 (taps at 27 & 29).
+ 8 = Prbs31 (taps at 28 & 31).
+ others reserved. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t prbs_tx_mode : 1; /**< [ 56: 56](R/W/H) Enables PRBS generation and sending PRBS transmit data to the SERDES
+ macro. If using PRBS transmitting, set
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_MODE] prior to deasserting
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. Note that the PCS-lite PRBS
+ generator will require GSERN()_LANE()_LT_BCFG[REVERSE_TX_BIT_ORDER] to be
+ asserted. */
+ uint64_t prbs_tx_rst_n : 1; /**< [ 57: 57](R/W/H) Clear to hold the transmit PRBS pattern generator in reset. */
+ uint64_t prbs_rx_mode : 1; /**< [ 58: 58](R/W/H) Enables PRBS checking in the PCS lite layer receive data path. If
+ using PRBS checking, assert GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_MODE]
+ prior to deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_RST_N]. Software
+ can deassert this bit to stop accumulating error counts without
+ resetting the counter. */
+ uint64_t prbs_rx_rst_n : 1; /**< [ 59: 59](R/W/H) Clear to hold the receive PRBS pattern checker in reset. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_lt_prbs1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_lt_prbs1_bcfg bdk_gsernx_lanex_lt_prbs1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000690ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_LT_PRBS1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) bdk_gsernx_lanex_lt_prbs1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) "GSERNX_LANEX_LT_PRBS1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_LT_PRBS1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_lt_prbs2_bcfg
+ *
+ * GSER Lane PCS Lite PRBS Checker Control Register 2
+ */
+union bdk_gsernx_lanex_lt_prbs2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_lt_prbs2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t lock_cnt : 8; /**< [ 55: 48](R/W/H) One less than the number of cycles of matching receive data the PRBS
+ checker needs to see before starting to count errors. Default is 31,
+ for 32 cycles of matching data before starting the PRBS error
+ counter; the maximum setting is 255. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[LOCK_CNT] as desired before deasserting
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_RST_N]. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t tx_lfsr_use_preload : 1; /**< [ 40: 40](R/W/H) Enables use of the GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE]
+ instead of all zeros in the transmitter LFSR PRBS generator. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD] and
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE] as desired before
+ deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. */
+ uint64_t tx_lfsr_preload_value : 40; /**< [ 39: 0](R/W/H) Initial state of the transmitter LFSR PRBS generator (if enabled by
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD]). When enabled, this
+ value will be loaded when GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]
+ asserts (low). Do not set to all ones, or the LFSR will lock up. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD] and
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE] as desired before
+ deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_lfsr_preload_value : 40; /**< [ 39: 0](R/W/H) Initial state of the transmitter LFSR PRBS generator (if enabled by
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD]). When enabled, this
+ value will be loaded when GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]
+ asserts (low). Do not set to all ones, or the LFSR will lock up. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD] and
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE] as desired before
+ deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. */
+ uint64_t tx_lfsr_use_preload : 1; /**< [ 40: 40](R/W/H) Enables use of the GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE]
+ instead of all zeros in the transmitter LFSR PRBS generator. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_USE_PRELOAD] and
+ GSERN()_LANE()_LT_PRBS2_BCFG[TX_LFSR_PRELOAD_VALUE] as desired before
+ deasserting GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_TX_RST_N]. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t lock_cnt : 8; /**< [ 55: 48](R/W/H) One less than the number of cycles of matching receive data the PRBS
+ checker needs to see before starting to count errors. Default is 31,
+ for 32 cycles of matching data before starting the PRBS error
+ counter; the maximum setting is 255. Set
+ GSERN()_LANE()_LT_PRBS2_BCFG[LOCK_CNT] as desired before deasserting
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_RST_N]. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_lt_prbs2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_lt_prbs2_bcfg bdk_gsernx_lanex_lt_prbs2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900006a0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_LT_PRBS2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) bdk_gsernx_lanex_lt_prbs2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) "GSERNX_LANEX_LT_PRBS2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_LT_PRBS2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_lt_prbs_sts
+ *
+ * GSER Lane PCS Lite PRBS Checker Status Register
+ */
+union bdk_gsernx_lanex_lt_prbs_sts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_lt_prbs_sts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT] has expired
+ if GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] is set. If
+ GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] is clear, this bit will
+ always read as clear. */
+ uint64_t lock : 1; /**< [ 48: 48](RO/H) Indicates the PRBS checker logic has achieved lock prior to
+ starting error counting. */
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When asserted indicates GSERN()_LANE()_LT_PRBS_STS[ERR_CNT] overflowed and
+ is not accurate. */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of PRBS bit errors seen. If GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] and
+ GSERN()_LANE()_LT_PRBS_STS[CYCLE_CNT_DONE] are not both asserted,
+ GSERN()_LANE()_LT_PRBS_STS[ERR_CNT] may not be reliable unless
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_MODE] is first deasserted (to stop
+ the error counter). */
+#else /* Word 0 - Little Endian */
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of PRBS bit errors seen. If GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] and
+ GSERN()_LANE()_LT_PRBS_STS[CYCLE_CNT_DONE] are not both asserted,
+ GSERN()_LANE()_LT_PRBS_STS[ERR_CNT] may not be reliable unless
+ GSERN()_LANE()_LT_PRBS1_BCFG[PRBS_RX_MODE] is first deasserted (to stop
+ the error counter). */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When asserted indicates GSERN()_LANE()_LT_PRBS_STS[ERR_CNT] overflowed and
+ is not accurate. */
+ uint64_t lock : 1; /**< [ 48: 48](RO/H) Indicates the PRBS checker logic has achieved lock prior to
+ starting error counting. */
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT] has expired
+ if GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] is set. If
+ GSERN()_LANE()_LT_PRBS1_BCFG[CYCLE_CNT_EN] is clear, this bit will
+ always read as clear. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_lt_prbs_sts_s cn; */
+};
+typedef union bdk_gsernx_lanex_lt_prbs_sts bdk_gsernx_lanex_lt_prbs_sts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS_STS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_LT_PRBS_STS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900006b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_LT_PRBS_STS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) bdk_gsernx_lanex_lt_prbs_sts_t
+#define bustype_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) "GSERNX_LANEX_LT_PRBS_STS"
+#define device_bar_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_LT_PRBS_STS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_map0
+ *
+ * GSER Lane Programmable Map Register 0
+ * Manually settable option for the interpolator map. If using
+ * GSERN()_LANE()_IMAPSEL_BCFG[MAP_CASE]=0xf, set these bits prior to bringing analog
+ * receiver out of reset.
+ */
+union bdk_gsernx_lanex_map0
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_map0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W) map register 0, 64 LSB of map 128b vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W) map register 0, 64 LSB of map 128b vector. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_map0_s cn; */
+};
+typedef union bdk_gsernx_lanex_map0 bdk_gsernx_lanex_map0_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_MAP0(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_MAP0(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001e00ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_MAP0", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_MAP0(a,b) bdk_gsernx_lanex_map0_t
+#define bustype_BDK_GSERNX_LANEX_MAP0(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_MAP0(a,b) "GSERNX_LANEX_MAP0"
+#define device_bar_BDK_GSERNX_LANEX_MAP0(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_MAP0(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_MAP0(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_map1
+ *
+ * GSER Lane Programmable Map Register 1
+ * Manually settable option for the interpolator map. If using
+ * (GSERN()_LANE()_IMAPSEL_BCFG[MAP_CASE]=0xf), set these bits prior to bringing
+ * analog receiver out of reset.
+ */
+union bdk_gsernx_lanex_map1
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_map1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W) Map register 1, 64 most significant bits of map 128-bit vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 64; /**< [ 63: 0](R/W) Map register 1, 64 most significant bits of map 128-bit vector. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_map1_s cn; */
+};
+typedef union bdk_gsernx_lanex_map1 bdk_gsernx_lanex_map1_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_MAP1(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_MAP1(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001e10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_MAP1", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_MAP1(a,b) bdk_gsernx_lanex_map1_t
+#define bustype_BDK_GSERNX_LANEX_MAP1(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_MAP1(a,b) "GSERNX_LANEX_MAP1"
+#define device_bar_BDK_GSERNX_LANEX_MAP1(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_MAP1(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_MAP1(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_max_oob_add_count
+ *
+ * GSER Lane RX OOB Maximum ADDER Durations Counted Register
+ * Observes the maximum number of times we had to delay the idle offset
+ * recalibration because of a collision with an OOB event.
+ */
+union bdk_gsernx_lanex_max_oob_add_count
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_max_oob_add_count_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t accumulated_oob_adders : 8; /**< [ 7: 0](RO/H) Observed maximum number of OOB ADDERS applied to the idle offset
+ recalibration FSM that delay the calibration. This is in terms of
+ how many GSERN()_LANE()_RX_IDLE_CAL_CFG[OOB_DELAY_ADDER_COUNT] ticks added to
+ the duration between recalibration. */
+#else /* Word 0 - Little Endian */
+ uint64_t accumulated_oob_adders : 8; /**< [ 7: 0](RO/H) Observed maximum number of OOB ADDERS applied to the idle offset
+ recalibration FSM that delay the calibration. This is in terms of
+ how many GSERN()_LANE()_RX_IDLE_CAL_CFG[OOB_DELAY_ADDER_COUNT] ticks added to
+ the duration between recalibration. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_max_oob_add_count_s cn; */
+};
+typedef union bdk_gsernx_lanex_max_oob_add_count bdk_gsernx_lanex_max_oob_add_count_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001550ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_MAX_OOB_ADD_COUNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) bdk_gsernx_lanex_max_oob_add_count_t
+#define bustype_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) "GSERNX_LANEX_MAX_OOB_ADD_COUNT"
+#define device_bar_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_MAX_OOB_ADD_COUNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_ocx_txeq_bcfg
+ *
+ * GSER Lane OCX Tx Equalizer Base Configuration Register
+ * Register controls settings for the transmitter equalizer taps
+ * when the GSER is configured for OCX mode and KR training is not enabled.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL] is set to 'OCX'.
+ */
+union bdk_gsernx_lanex_ocx_txeq_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_ocx_txeq_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t tx_coeff_update : 1; /**< [ 27: 27](R/W/H) Transmitter coefficient update.
+ An asserting edge will start the transmitter coefficient update
+ sequencer. This field self-clears when the sequence has completed.
+ To update the GSER transmitter euqalizer coefficients program
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CSPD].
+
+ then write [TX_COEFF_UPDATE] to 1. */
+ uint64_t tx_enable : 1; /**< [ 26: 26](R/W) Transmitter enable.
+ 0 = Disable the serdes transmitter.
+ 1 = Enable the serdes transmitter.
+
+ Internal:
+ Drives the ocx_tx_enable input to the GSERN src_mux. */
+ uint64_t tx_stuff : 1; /**< [ 25: 25](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter bit stuffing.
+ Programs the transmitter PCS lite layer for bit stuffing.
+ Not used for OCX connections.
+ Leave programmed to 0x0.
+ Drives the ocx_tx_stuff input to the GSERN src_mux. */
+ uint64_t tx_oob : 1; /**< [ 24: 24](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter OOB signaling.
+ Not typically used for OCX connnections.
+ Leave programmed to 0x0.
+ Drives the ocx_tx_oob input to the GSERN src_mux. */
+ uint64_t tx_idle : 1; /**< [ 23: 23](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter electrical idle.
+ Used to force the transmitter to electrical idle.
+ Not typically used for OCX connections.
+ Leave progreammed to 0x0.
+ Drives the ocx_tx_idle input to the GSERN src_mux. */
+ uint64_t tx_cspd : 1; /**< [ 22: 22](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+ uint64_t tx_bs : 6; /**< [ 21: 16](R/W) TX bias/swing selection. This setting only takes effect if [TX_CSPD] is
+ deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t tx_cpost : 5; /**< [ 15: 11](R/W) Transmitter Post (C+1) equalizer tap coefficient value.
+ Programs the transmitter Post tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_cmain : 6; /**< [ 10: 5](R/W) Transmitter Main (C0) equalizer tap coefficient value.
+ Programs the serdes transmitter Main tap.
+ Valid range is 0x30 to 0x18.
+ When programing the transmitter Pre, Main, and Post
+ taps the following rules must be adhered to:
+ _ ([TX_CMAIN] + [TX_CPRE] + [TX_CPOST]) \<= 0x30.
+ _ ([TX_CMAIN] - [TX_CPRE] - [TX_CPOST]) \>= 0x6.
+ _ 0x30 \<= [TX_CMAIN] \<= 0x18.
+ _ 0x16 \>= [TX_CPRE] \>= 0x0.
+ _ 0x16 \>= [TX_CPOST] \>= 0x0.
+
+ [TX_CMAIN] should be adjusted when either [TX_CPRE] or
+ [TX_CPOST] is adjusted to provide constant power transmitter
+ amplitude adjustments.
+
+ To update the GSER serdes transmitter Pre, Main, and Post
+ equalizer taps from the [TX_CPOST], [TX_CMAIN], and [TX_CPRE]
+ fields write GSERN()_LANE()_OCX_TXEQ_BCFG[TX_COEFF_UPDATE]
+ to 1 and subsequently clear [TX_COEFF_UPDATE] to 0. This step
+ transfers the [TX_CPOST], [TX_CMAIN], and [TX_CPRE] to the
+ serdes transmitter equalizer.
+
+ Related CSRs:
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_COEFF_UPDATE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CSPD]. */
+ uint64_t tx_cpre : 5; /**< [ 4: 0](R/W) Transmitter Pre (C-1) equalizer tap coefficient value.
+ Programs the transmitter Pre tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_cpre : 5; /**< [ 4: 0](R/W) Transmitter Pre (C-1) equalizer tap coefficient value.
+ Programs the transmitter Pre tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_cmain : 6; /**< [ 10: 5](R/W) Transmitter Main (C0) equalizer tap coefficient value.
+ Programs the serdes transmitter Main tap.
+ Valid range is 0x30 to 0x18.
+ When programing the transmitter Pre, Main, and Post
+ taps the following rules must be adhered to:
+ _ ([TX_CMAIN] + [TX_CPRE] + [TX_CPOST]) \<= 0x30.
+ _ ([TX_CMAIN] - [TX_CPRE] - [TX_CPOST]) \>= 0x6.
+ _ 0x30 \<= [TX_CMAIN] \<= 0x18.
+ _ 0x16 \>= [TX_CPRE] \>= 0x0.
+ _ 0x16 \>= [TX_CPOST] \>= 0x0.
+
+ [TX_CMAIN] should be adjusted when either [TX_CPRE] or
+ [TX_CPOST] is adjusted to provide constant power transmitter
+ amplitude adjustments.
+
+ To update the GSER serdes transmitter Pre, Main, and Post
+ equalizer taps from the [TX_CPOST], [TX_CMAIN], and [TX_CPRE]
+ fields write GSERN()_LANE()_OCX_TXEQ_BCFG[TX_COEFF_UPDATE]
+ to 1 and subsequently clear [TX_COEFF_UPDATE] to 0. This step
+ transfers the [TX_CPOST], [TX_CMAIN], and [TX_CPRE] to the
+ serdes transmitter equalizer.
+
+ Related CSRs:
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_COEFF_UPDATE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CSPD]. */
+ uint64_t tx_cpost : 5; /**< [ 15: 11](R/W) Transmitter Post (C+1) equalizer tap coefficient value.
+ Programs the transmitter Post tap.
+ Valid range is 0 to 0x10.
+ See GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN]. */
+ uint64_t tx_bs : 6; /**< [ 21: 16](R/W) TX bias/swing selection. This setting only takes effect if [TX_CSPD] is
+ deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t tx_cspd : 1; /**< [ 22: 22](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+ uint64_t tx_idle : 1; /**< [ 23: 23](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter electrical idle.
+ Used to force the transmitter to electrical idle.
+ Not typically used for OCX connections.
+ Leave progreammed to 0x0.
+ Drives the ocx_tx_idle input to the GSERN src_mux. */
+ uint64_t tx_oob : 1; /**< [ 24: 24](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter OOB signaling.
+ Not typically used for OCX connnections.
+ Leave programmed to 0x0.
+ Drives the ocx_tx_oob input to the GSERN src_mux. */
+ uint64_t tx_stuff : 1; /**< [ 25: 25](R/W) Reserved. For Diagnostic Use Only.
+ Internal:
+ Transmitter bit stuffing.
+ Programs the transmitter PCS lite layer for bit stuffing.
+ Not used for OCX connections.
+ Leave programmed to 0x0.
+ Drives the ocx_tx_stuff input to the GSERN src_mux. */
+ uint64_t tx_enable : 1; /**< [ 26: 26](R/W) Transmitter enable.
+ 0 = Disable the serdes transmitter.
+ 1 = Enable the serdes transmitter.
+
+ Internal:
+ Drives the ocx_tx_enable input to the GSERN src_mux. */
+ uint64_t tx_coeff_update : 1; /**< [ 27: 27](R/W/H) Transmitter coefficient update.
+ An asserting edge will start the transmitter coefficient update
+ sequencer. This field self-clears when the sequence has completed.
+ To update the GSER transmitter euqalizer coefficients program
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPOST].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CMAIN].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CPRE].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_BS].
+ * GSERN()_LANE()_OCX_TXEQ_BCFG[TX_CSPD].
+
+ then write [TX_COEFF_UPDATE] to 1. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_ocx_txeq_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_ocx_txeq_bcfg bdk_gsernx_lanex_ocx_txeq_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003550ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_OCX_TXEQ_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) bdk_gsernx_lanex_ocx_txeq_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) "GSERNX_LANEX_OCX_TXEQ_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_OCX_TXEQ_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pat#
+ *
+ * GSER Lane Pattern Memory Register
+ */
+union bdk_gsernx_lanex_patx
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_patx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t dat : 40; /**< [ 39: 0](R/W) Pattern Memory Registers. All 40b of both registers are used under
+ al clock ratios except 32:1. In 32b (32:1) mode bits [31:0] of each
+ register are used. The total pattern length is 64b in 32b mode and
+ 80b in all other clock modes.
+
+ The bit pattern in bits [N-1:0] of PAT[0], where N is the clock
+ ratio, must be unique within the overall pattern to allow the
+ pattern checker to correctly lock before checking for errors.
+
+ Internal:
+ If the pattern data in this register is written while pattern transmission
+ testing is in progress, the transmitted data may be briefly unpredictable. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 40; /**< [ 39: 0](R/W) Pattern Memory Registers. All 40b of both registers are used under
+ al clock ratios except 32:1. In 32b (32:1) mode bits [31:0] of each
+ register are used. The total pattern length is 64b in 32b mode and
+ 80b in all other clock modes.
+
+ The bit pattern in bits [N-1:0] of PAT[0], where N is the clock
+ ratio, must be unique within the overall pattern to allow the
+ pattern checker to correctly lock before checking for errors.
+
+ Internal:
+ If the pattern data in this register is written while pattern transmission
+ testing is in progress, the transmitted data may be briefly unpredictable. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_patx_s cn; */
+};
+typedef union bdk_gsernx_lanex_patx bdk_gsernx_lanex_patx_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PATX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PATX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4) && (c<=1)))
+ return 0x87e090007ff0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7) + 8ll * ((c) & 0x1);
+ __bdk_csr_fatal("GSERNX_LANEX_PATX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PATX(a,b,c) bdk_gsernx_lanex_patx_t
+#define bustype_BDK_GSERNX_LANEX_PATX(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PATX(a,b,c) "GSERNX_LANEX_PATX"
+#define device_bar_BDK_GSERNX_LANEX_PATX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PATX(a,b,c) (a)
+#define arguments_BDK_GSERNX_LANEX_PATX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) gsern#_lane#_pat_ctrl
+ *
+ * GSER Lane PCS Lite Pattern Memory Stress Control Register
+ */
+union bdk_gsernx_lanex_pat_ctrl
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pat_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t tx_rst_n : 1; /**< [ 50: 50](R/W) Clear and then set to reset the pattern memory stress transmit
+ data path, specifically the pattern memory index counter. */
+ uint64_t rx_rst_n : 1; /**< [ 49: 49](R/W) Clear and then set to reset the pattern memory stress
+ receive checking data path, including the lock indication and the
+ error counts. */
+ uint64_t en : 1; /**< [ 48: 48](R/W) Enable (i.e., start, or stop if deasserted) pattern memory stress
+ generation and checking. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W) Enable use of GSERN()_LANE()_PAT_CTRL[CYCLE_CNT] to limit number of cycles
+ of PCS RX clock over which the pattern memory loopback errors are
+ accumulated. */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W) When enabled by GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN], this contains the
+ count of PCS receive-clock cycles over which pattern memory loopback
+ error counts are accumulated. */
+#else /* Word 0 - Little Endian */
+ uint64_t cycle_cnt : 40; /**< [ 39: 0](R/W) When enabled by GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN], this contains the
+ count of PCS receive-clock cycles over which pattern memory loopback
+ error counts are accumulated. */
+ uint64_t cycle_cnt_en : 1; /**< [ 40: 40](R/W) Enable use of GSERN()_LANE()_PAT_CTRL[CYCLE_CNT] to limit number of cycles
+ of PCS RX clock over which the pattern memory loopback errors are
+ accumulated. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t en : 1; /**< [ 48: 48](R/W) Enable (i.e., start, or stop if deasserted) pattern memory stress
+ generation and checking. */
+ uint64_t rx_rst_n : 1; /**< [ 49: 49](R/W) Clear and then set to reset the pattern memory stress
+ receive checking data path, including the lock indication and the
+ error counts. */
+ uint64_t tx_rst_n : 1; /**< [ 50: 50](R/W) Clear and then set to reset the pattern memory stress transmit
+ data path, specifically the pattern memory index counter. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pat_ctrl_s cn; */
+};
+typedef union bdk_gsernx_lanex_pat_ctrl bdk_gsernx_lanex_pat_ctrl_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PAT_CTRL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PAT_CTRL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090007fd0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PAT_CTRL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PAT_CTRL(a,b) bdk_gsernx_lanex_pat_ctrl_t
+#define bustype_BDK_GSERNX_LANEX_PAT_CTRL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PAT_CTRL(a,b) "GSERNX_LANEX_PAT_CTRL"
+#define device_bar_BDK_GSERNX_LANEX_PAT_CTRL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PAT_CTRL(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PAT_CTRL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pat_dat
+ *
+ * GSER Lane PCS Lite Pattern Memory Stress Data Result Register
+ */
+union bdk_gsernx_lanex_pat_dat
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pat_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t framing_match : 1; /**< [ 63: 63](RO/H) Indicates that the pattern memory checker found a framing match. This field is
+ valid only after enabling pattern memory generation and checking by setting
+ GSERN()_LANE()_PAT_CTRL[EN]. */
+ uint64_t reserved_62 : 1;
+ uint64_t framing_offset : 6; /**< [ 61: 56](RO/H) The offset the pattern memory checker found of the low bits of the pattern data
+ in the receive data frame. This field is valid only when [FRAMING_MATCH]
+ reads as asserted after enabling pattern memory generation and checking by
+ setting GSERN()_LANE()_PAT_CTRL[EN]. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_PAT_CTRL[CYCLE_CNT] has expired if
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] is asserted. If
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] is deasserted,
+ GSERN()_LANE()_PAT_DAT[CYCLE_CNT_DONE] will always read as asserted. */
+ uint64_t lock : 1; /**< [ 48: 48](RO/H) Indicates the pattern memory checker has achieved lock. */
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When asserted indicates GSERN()_LANE()_PAT_DAT[ERR_CNT] overflowed and is
+ not accurate. */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of bit errors seen in pattern memory loopback testing. If
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] and GSERN()_LANE()_PAT_DAT[CYCLE_CNT_DONE]
+ are not both asserted, GSERN()_LANE()_PAT_DAT[ERR_CNT] may not be reliable
+ unless GSERN()_LANE()_PAT_CTRL[EN] is first deasserted (to stop the error
+ counter). */
+#else /* Word 0 - Little Endian */
+ uint64_t err_cnt : 45; /**< [ 44: 0](RO/H) Count of bit errors seen in pattern memory loopback testing. If
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] and GSERN()_LANE()_PAT_DAT[CYCLE_CNT_DONE]
+ are not both asserted, GSERN()_LANE()_PAT_DAT[ERR_CNT] may not be reliable
+ unless GSERN()_LANE()_PAT_CTRL[EN] is first deasserted (to stop the error
+ counter). */
+ uint64_t reserved_45_46 : 2;
+ uint64_t err_cnt_ovf : 1; /**< [ 47: 47](RO/H) When asserted indicates GSERN()_LANE()_PAT_DAT[ERR_CNT] overflowed and is
+ not accurate. */
+ uint64_t lock : 1; /**< [ 48: 48](RO/H) Indicates the pattern memory checker has achieved lock. */
+ uint64_t cycle_cnt_done : 1; /**< [ 49: 49](RO/H) Indicates the GSERN()_LANE()_PAT_CTRL[CYCLE_CNT] has expired if
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] is asserted. If
+ GSERN()_LANE()_PAT_CTRL[CYCLE_CNT_EN] is deasserted,
+ GSERN()_LANE()_PAT_DAT[CYCLE_CNT_DONE] will always read as asserted. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t framing_offset : 6; /**< [ 61: 56](RO/H) The offset the pattern memory checker found of the low bits of the pattern data
+ in the receive data frame. This field is valid only when [FRAMING_MATCH]
+ reads as asserted after enabling pattern memory generation and checking by
+ setting GSERN()_LANE()_PAT_CTRL[EN]. */
+ uint64_t reserved_62 : 1;
+ uint64_t framing_match : 1; /**< [ 63: 63](RO/H) Indicates that the pattern memory checker found a framing match. This field is
+ valid only after enabling pattern memory generation and checking by setting
+ GSERN()_LANE()_PAT_CTRL[EN]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pat_dat_s cn; */
+};
+typedef union bdk_gsernx_lanex_pat_dat bdk_gsernx_lanex_pat_dat_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PAT_DAT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PAT_DAT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090007fe0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PAT_DAT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PAT_DAT(a,b) bdk_gsernx_lanex_pat_dat_t
+#define bustype_BDK_GSERNX_LANEX_PAT_DAT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PAT_DAT(a,b) "GSERNX_LANEX_PAT_DAT"
+#define device_bar_BDK_GSERNX_LANEX_PAT_DAT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PAT_DAT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PAT_DAT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_pcs2_bcfg
+ *
+ * GSER Lane PCIe PCS Control 2 Register
+ * Control settings for PCIe PCS functionality.
+ */
+union bdk_gsernx_lanex_pcie_pcs2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_pcs2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pause_adpt_rxstandby : 4; /**< [ 63: 60](R/W) Set to one to allow the PIPE RxStandby to pause all adaptation functions and
+ hold the CDRFSM when the PCIe lane is operating at the corresponding rate.
+ The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t frc_unalgn_rxstandby : 4; /**< [ 59: 56](R/W) Enables use of RxStandby to force the RX PCS into unalign state with
+ an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t frc_unalgn_rxelecidle : 4; /**< [ 55: 52](R/W) Enables use of detected RxElecIdle to force the RX PCS into unalign state
+ with an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t frc_unalgn_blkalgnctl : 2; /**< [ 51: 50](R/W) Enables use of BlockAlignControl assertion to force the RX PCS into unalign state
+ with an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen3.
+ \<1\> = PCIe gen4. */
+ uint64_t pipe_tx_sel : 2; /**< [ 49: 48](R/W) Selects the source for the transmit PIPE controls:
+ \<0\> = PCIe pipe 0 transmit.
+ \<1\> = PCIe pipe 1 transmit.
+ \<2\> = PCIe pipe 2 transmit.
+ \<3\> = Reserved. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t gen34_pll_div_f : 18; /**< [ 45: 28](R/W) PLL feedback divider fractional portion. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t gen12_pll_div_f : 18; /**< [ 25: 8](R/W) PLL feedback divider fractional portion. */
+ uint64_t pause_adpt_on_idle : 4; /**< [ 7: 4](R/W) Set to one to allow the Rx Electrical Idle to pause all adaptation functions and
+ hold the CDRFSM when the PCIe lane is operating at the corresponding rate.
+ The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_prevga_gn_adpt : 4; /**< [ 3: 0](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+#else /* Word 0 - Little Endian */
+ uint64_t do_prevga_gn_adpt : 4; /**< [ 3: 0](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t pause_adpt_on_idle : 4; /**< [ 7: 4](R/W) Set to one to allow the Rx Electrical Idle to pause all adaptation functions and
+ hold the CDRFSM when the PCIe lane is operating at the corresponding rate.
+ The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t gen12_pll_div_f : 18; /**< [ 25: 8](R/W) PLL feedback divider fractional portion. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t gen34_pll_div_f : 18; /**< [ 45: 28](R/W) PLL feedback divider fractional portion. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t pipe_tx_sel : 2; /**< [ 49: 48](R/W) Selects the source for the transmit PIPE controls:
+ \<0\> = PCIe pipe 0 transmit.
+ \<1\> = PCIe pipe 1 transmit.
+ \<2\> = PCIe pipe 2 transmit.
+ \<3\> = Reserved. */
+ uint64_t frc_unalgn_blkalgnctl : 2; /**< [ 51: 50](R/W) Enables use of BlockAlignControl assertion to force the RX PCS into unalign state
+ with an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen3.
+ \<1\> = PCIe gen4. */
+ uint64_t frc_unalgn_rxelecidle : 4; /**< [ 55: 52](R/W) Enables use of detected RxElecIdle to force the RX PCS into unalign state
+ with an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t frc_unalgn_rxstandby : 4; /**< [ 59: 56](R/W) Enables use of RxStandby to force the RX PCS into unalign state with
+ an individual control bit per PCIe rate mapped as following:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t pause_adpt_rxstandby : 4; /**< [ 63: 60](R/W) Set to one to allow the PIPE RxStandby to pause all adaptation functions and
+ hold the CDRFSM when the PCIe lane is operating at the corresponding rate.
+ The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_pcs2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_pcs2_bcfg bdk_gsernx_lanex_pcie_pcs2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001f20ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_PCS2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) bdk_gsernx_lanex_pcie_pcs2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) "GSERNX_LANEX_PCIE_PCS2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_PCS2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_pcs3_bcfg
+ *
+ * GSER Lane PCIe PCS Control 3 Register
+ * Control settings for PCIe PCS functionality.
+ */
+union bdk_gsernx_lanex_pcie_pcs3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_pcs3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t tx_enfast : 4; /**< [ 35: 32](R/W) Enables fast slew on the TX preamp output with an individual control bit
+ per PCIe rate mapped as following:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_afeos_final : 4; /**< [ 31: 28](R/W) Set to one to allow AFEOS adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_AFEOS_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctlelte_final : 4; /**< [ 27: 24](R/W) Set to one to allow CTLELTE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLELTE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctlez_final : 4; /**< [ 23: 20](R/W) Set to one to allow CTLEZ adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLEZ_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctle_final : 4; /**< [ 19: 16](R/W) Set to one to allow CTLE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_dfe_final : 4; /**< [ 15: 12](R/W) Set to one to allow DFE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_DFE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_vga_final : 4; /**< [ 11: 8](R/W) Set to one to allow VGA adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_VGA_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_blwc_final : 4; /**< [ 7: 4](R/W) Set to one to allow BLWC adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_BLWC_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_prevga_gn_final : 4; /**< [ 3: 0](R/W) Set to one to allow PREVGA_GN adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS2_BCFG[DO_PREVGA_GN_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+#else /* Word 0 - Little Endian */
+ uint64_t do_prevga_gn_final : 4; /**< [ 3: 0](R/W) Set to one to allow PREVGA_GN adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS2_BCFG[DO_PREVGA_GN_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_blwc_final : 4; /**< [ 7: 4](R/W) Set to one to allow BLWC adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_BLWC_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_vga_final : 4; /**< [ 11: 8](R/W) Set to one to allow VGA adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_VGA_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_dfe_final : 4; /**< [ 15: 12](R/W) Set to one to allow DFE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_DFE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctle_final : 4; /**< [ 19: 16](R/W) Set to one to allow CTLE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctlez_final : 4; /**< [ 23: 20](R/W) Set to one to allow CTLEZ adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLEZ_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_ctlelte_final : 4; /**< [ 27: 24](R/W) Set to one to allow CTLELTE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_CTLELTE_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t do_afeos_final : 4; /**< [ 31: 28](R/W) Set to one to allow AFEOS adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_PCIE_PCS_BCFG[DO_AFEOS_ADPT] is set and the PCIe lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t tx_enfast : 4; /**< [ 35: 32](R/W) Enables fast slew on the TX preamp output with an individual control bit
+ per PCIe rate mapped as following:
+ \<0\> = PCIe Gen1.
+ \<1\> = PCIe Gen2.
+ \<2\> = PCIe Gen3.
+ \<3\> = PCIe Gen4. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_pcs3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_pcs3_bcfg bdk_gsernx_lanex_pcie_pcs3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001f30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_PCS3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) bdk_gsernx_lanex_pcie_pcs3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) "GSERNX_LANEX_PCIE_PCS3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_PCS3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_pcs_bcfg
+ *
+ * GSER Lane PCIe PCS Control Register
+ * Control settings for PCIe PCS functionality.
+ */
+union bdk_gsernx_lanex_pcie_pcs_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_pcs_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t do_afeos_adpt : 4; /**< [ 63: 60](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctlelte_adpt : 4; /**< [ 59: 56](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctlez_adpt : 4; /**< [ 55: 52](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctle_adpt : 4; /**< [ 51: 48](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_dfe_adpt : 4; /**< [ 47: 44](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_vga_adpt : 4; /**< [ 43: 40](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_blwc_adpt : 4; /**< [ 39: 36](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t gen34_pll_div_n : 9; /**< [ 35: 27](R/W) PLL feedback divider integer portion. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t gen12_pll_div_n : 9; /**< [ 24: 16](R/W) PLL feedback divider integer portion. */
+ uint64_t skp_add_thr : 4; /**< [ 15: 12](R/W) SKP addition threshold.
+ The receive elastic store will add a SKP symbol (Gen1/2) or add four
+ SKP symbols (Gen3/4) when the store fill level is less than or equal
+ to this value. */
+ uint64_t skp_del_thr : 4; /**< [ 11: 8](R/W) SKP deletion threshold.
+ The receive elastic store will delete a SKP symbol (Gen1/2) or delete
+ four SKP symbols (Gen3/4) when the store fill level is greater than or
+ equal to this value plus 8. */
+ uint64_t comma_thr : 4; /**< [ 7: 4](R/W) COMMA detection threshold. The receive aligner must see this many
+ COMMA characters at the same rotation before declaring symbol
+ alignment (only used for Gen1/2). */
+ uint64_t error_thr : 4; /**< [ 3: 0](R/W) Error threshold. The receive aligner must see this many COMMA
+ characters at a different rotation than currently in use before
+ declaring loss of symbol alignment (Gen1/2). For Gen3/4 this is
+ the number of invalid Sync Headers needed to cause the aligner
+ to enter the Unaligned Phase and declare an alignment error. */
+#else /* Word 0 - Little Endian */
+ uint64_t error_thr : 4; /**< [ 3: 0](R/W) Error threshold. The receive aligner must see this many COMMA
+ characters at a different rotation than currently in use before
+ declaring loss of symbol alignment (Gen1/2). For Gen3/4 this is
+ the number of invalid Sync Headers needed to cause the aligner
+ to enter the Unaligned Phase and declare an alignment error. */
+ uint64_t comma_thr : 4; /**< [ 7: 4](R/W) COMMA detection threshold. The receive aligner must see this many
+ COMMA characters at the same rotation before declaring symbol
+ alignment (only used for Gen1/2). */
+ uint64_t skp_del_thr : 4; /**< [ 11: 8](R/W) SKP deletion threshold.
+ The receive elastic store will delete a SKP symbol (Gen1/2) or delete
+ four SKP symbols (Gen3/4) when the store fill level is greater than or
+ equal to this value plus 8. */
+ uint64_t skp_add_thr : 4; /**< [ 15: 12](R/W) SKP addition threshold.
+ The receive elastic store will add a SKP symbol (Gen1/2) or add four
+ SKP symbols (Gen3/4) when the store fill level is less than or equal
+ to this value. */
+ uint64_t gen12_pll_div_n : 9; /**< [ 24: 16](R/W) PLL feedback divider integer portion. */
+ uint64_t reserved_25_26 : 2;
+ uint64_t gen34_pll_div_n : 9; /**< [ 35: 27](R/W) PLL feedback divider integer portion. */
+ uint64_t do_blwc_adpt : 4; /**< [ 39: 36](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_vga_adpt : 4; /**< [ 43: 40](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_dfe_adpt : 4; /**< [ 47: 44](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctle_adpt : 4; /**< [ 51: 48](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctlez_adpt : 4; /**< [ 55: 52](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_ctlelte_adpt : 4; /**< [ 59: 56](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+ uint64_t do_afeos_adpt : 4; /**< [ 63: 60](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the PCIe lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = PCIe gen1.
+ \<1\> = PCIe gen2.
+ \<2\> = PCIe gen3.
+ \<3\> = PCIe gen4. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_pcs_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_pcs_bcfg bdk_gsernx_lanex_pcie_pcs_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001f10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_PCS_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) bdk_gsernx_lanex_pcie_pcs_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) "GSERNX_LANEX_PCIE_PCS_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_PCS_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_pcs_bsts
+ *
+ * GSER Lane PCIe PCS Status Register
+ * Error Status for PCIe PCS functionality.
+ */
+union bdk_gsernx_lanex_pcie_pcs_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_pcs_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t pcs_rx_eq_raw_fom : 12; /**< [ 27: 16](RO/H) Raw 12-bit figure of merit for last receiver equalization evaluation. */
+ uint64_t reserved_5_15 : 11;
+ uint64_t pcs_8b10b_disp_error : 1; /**< [ 4: 4](R/W1C/H) 8B10B disparity error (PCIe Gen1/2 only).
+ A valid 8B10B code word was received with invalid disparity. */
+ uint64_t pcs_decode_error : 1; /**< [ 3: 3](R/W1C/H) 8B10B decode error (PCIe Gen1/2).
+ An invalid 8B10B code word was detected. The invalid code word was
+ replaced by an EDB symbol (0xFE).
+
+ 128B130B decode error (PCIe Gen3/4).
+ An error was detected in the first 4N+1 symbols of a SKP ordered set. */
+ uint64_t es_underflow : 1; /**< [ 2: 2](R/W1C/H) Elastic store underflow.
+ A read was attempted from the receive Elastic Store while it was empty.
+ This would indicate a receive data rate slower than supported or a
+ lack of SKP ordered sets to allow SKP symbol additions. */
+ uint64_t es_overflow : 1; /**< [ 1: 1](R/W1C/H) Elastic store overflow.
+ A write was attempted to the receive Elastic Store while it was full.
+ This would indicate a receive data rate faster than supported or a
+ lack of SKP ordered sets to allow SKP symbol deletions. */
+ uint64_t align_error : 1; /**< [ 0: 0](R/W1C/H) Alignment error.
+ The receive aligner has detected an error. For PCIe Gen1/2, an error is
+ declared if GSERN()_LANE()_PCIE_PCS_BCFG[ERROR_THR]
+ COMMA characters are detected at a 10 bit rotation that does not match
+ the active rotation. The COMMAs do not have to all be at the same rotation.
+ For PCIe Gen3/4, an error is declared if GSERN()_LANE()_PCIE_PCS_BCFG[ERROR_THR]
+ invalid sync headers are detected at the current block alignment. */
+#else /* Word 0 - Little Endian */
+ uint64_t align_error : 1; /**< [ 0: 0](R/W1C/H) Alignment error.
+ The receive aligner has detected an error. For PCIe Gen1/2, an error is
+ declared if GSERN()_LANE()_PCIE_PCS_BCFG[ERROR_THR]
+ COMMA characters are detected at a 10 bit rotation that does not match
+ the active rotation. The COMMAs do not have to all be at the same rotation.
+ For PCIe Gen3/4, an error is declared if GSERN()_LANE()_PCIE_PCS_BCFG[ERROR_THR]
+ invalid sync headers are detected at the current block alignment. */
+ uint64_t es_overflow : 1; /**< [ 1: 1](R/W1C/H) Elastic store overflow.
+ A write was attempted to the receive Elastic Store while it was full.
+ This would indicate a receive data rate faster than supported or a
+ lack of SKP ordered sets to allow SKP symbol deletions. */
+ uint64_t es_underflow : 1; /**< [ 2: 2](R/W1C/H) Elastic store underflow.
+ A read was attempted from the receive Elastic Store while it was empty.
+ This would indicate a receive data rate slower than supported or a
+ lack of SKP ordered sets to allow SKP symbol additions. */
+ uint64_t pcs_decode_error : 1; /**< [ 3: 3](R/W1C/H) 8B10B decode error (PCIe Gen1/2).
+ An invalid 8B10B code word was detected. The invalid code word was
+ replaced by an EDB symbol (0xFE).
+
+ 128B130B decode error (PCIe Gen3/4).
+ An error was detected in the first 4N+1 symbols of a SKP ordered set. */
+ uint64_t pcs_8b10b_disp_error : 1; /**< [ 4: 4](R/W1C/H) 8B10B disparity error (PCIe Gen1/2 only).
+ A valid 8B10B code word was received with invalid disparity. */
+ uint64_t reserved_5_15 : 11;
+ uint64_t pcs_rx_eq_raw_fom : 12; /**< [ 27: 16](RO/H) Raw 12-bit figure of merit for last receiver equalization evaluation. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_pcs_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_pcs_bsts bdk_gsernx_lanex_pcie_pcs_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_PCS_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002a30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_PCS_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) bdk_gsernx_lanex_pcie_pcs_bsts_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) "GSERNX_LANEX_PCIE_PCS_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_PCS_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstp1_bcfg
+ *
+ * GSER Lane PCIe PowerDown P1 Reset States Control Register
+ * Controls the Reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor) corresponding to
+ * PCIe PowerDown state P1.
+ */
+union bdk_gsernx_lanex_pcie_rstp1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstp1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1 PowerDown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1 PowerDown state, but is only used when P1 is entered for
+ lanes that were active in a link and that link has now returned to LTSSM.DETECT
+ state and there are other lanes rejoining the link after having been turned off. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1 PowerDown state, but is only used when P1 is entered
+ for lanes that were active in a link and that link has now returned to LTSSM.DETECT
+ state and there are other lanes rejoining the link after having been turned off.
+ Note: this value is never likely to be changed from the normal run state (0x8). */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1 PowerDown state, but is only used when P1 is entered
+ for lanes that were active in a link and that link has now returned to LTSSM.DETECT
+ state and there are other lanes rejoining the link after having been turned off.
+ Note: this value is never likely to be changed from the normal run state (0x8). */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1 PowerDown state, but is only used when P1 is entered for
+ lanes that were active in a link and that link has now returned to LTSSM.DETECT
+ state and there are other lanes rejoining the link after having been turned off. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1 PowerDown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1 PowerDown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstp1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstp1_bcfg bdk_gsernx_lanex_pcie_rstp1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002030ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTP1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) bdk_gsernx_lanex_pcie_rstp1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTP1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTP1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstp1s0_bcfg
+ *
+ * GSER Lane PCIe PowerDown P1 CPM Reset States Control Register
+ * Controls the Reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor) corresponding to
+ * PCIe PowerDown state P1 CPM (P1 substates entry).
+ */
+union bdk_gsernx_lanex_pcie_rstp1s0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstp1s0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1 CPM PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1 CPM PowerDown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1 CPM PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1 CPM PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1 CPM PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1 CPM PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1 CPM PowerDown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1 CPM PowerDown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstp1s0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstp1s0_bcfg bdk_gsernx_lanex_pcie_rstp1s0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002040ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTP1S0_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) bdk_gsernx_lanex_pcie_rstp1s0_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTP1S0_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTP1S0_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstp1s1_bcfg
+ *
+ * GSER Lane PCIe PowerDown P1.1 Reset States Control Register
+ * Controls the Reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor) corresponding to
+ * PCIe PowerDown state P1.1 (P1 substate).
+ */
+union bdk_gsernx_lanex_pcie_rstp1s1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstp1s1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1.1 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1.1 PowerDown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1.1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1.1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1.1 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1.1 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1.1 PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1.1 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1.1 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1.1 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1.1 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1.1 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1.1 PowerDown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1.1 PowerDown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstp1s1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstp1s1_bcfg bdk_gsernx_lanex_pcie_rstp1s1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002050ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTP1S1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) bdk_gsernx_lanex_pcie_rstp1s1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTP1S1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTP1S1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstp1s2_bcfg
+ *
+ * GSER Lane PCIe PowerDown P1.2 Reset States Control Register
+ * Controls the Reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor) corresponding to
+ * PCIe PowerDown state P1.2 (P1 substate).
+ */
+union bdk_gsernx_lanex_pcie_rstp1s2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstp1s2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1.2 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1.2 PowerDown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1.2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1.2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1.2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1.2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1.2 PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P1.2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P1.2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P1.2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P1.2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P1.2 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P1.2 PowerDown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P1.2 PowerDown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstp1s2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstp1s2_bcfg bdk_gsernx_lanex_pcie_rstp1s2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002060ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTP1S2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) bdk_gsernx_lanex_pcie_rstp1s2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTP1S2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTP1S2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstp2_bcfg
+ *
+ * GSER Lane PCIe PowerDown P2 Reset States Control Register
+ * Controls the Reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor) corresponding to
+ * PCIe PowerDown state P2.
+ */
+union bdk_gsernx_lanex_pcie_rstp2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstp2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P2 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P2 PowerDown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P2 PowerDown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during P2 PowerDown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during P2 PowerDown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during P2 PowerDown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during P2 PowerDown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during P2 PowerDown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during P2 PowerDown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during P2 PowerDown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstp2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstp2_bcfg bdk_gsernx_lanex_pcie_rstp2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002070ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTP2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) bdk_gsernx_lanex_pcie_rstp2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTP2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTP2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstrate_bcfg
+ *
+ * GSER Lane PCIe Lane Rate Change Reset States Control Register
+ * This register controls the reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor)
+ * required for PCIe lane rate change.
+ */
+union bdk_gsernx_lanex_pcie_rstrate_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstrate_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during lane rate change. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during lane rate change. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during lane rate change. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during lane rate change. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during lane rate change. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during lane rate change. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during lane rate change. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during lane rate change. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during lane rate change. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during lane rate change. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during lane rate change. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during lane rate change. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx Electric Idle detection during lane rate change. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable Tx Common Mode voltage during lane rate change. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstrate_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstrate_bcfg bdk_gsernx_lanex_pcie_rstrate_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002090ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTRATE_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) bdk_gsernx_lanex_pcie_rstrate_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTRATE_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTRATE_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rstshdn_bcfg
+ *
+ * GSER Lane PCIe Lane Shutdown Reset States Control Register
+ * This register controls the reset states (Lane PLL, Tx, Rx, Adapt and Eye Monitor)
+ * corresponding to PCIe Lane Shutdown state enabled by the assertion of TxCompliance &
+ * TxElecIdle.
+ */
+union bdk_gsernx_lanex_pcie_rstshdn_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rstshdn_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable TX common mode voltage during lane shutdown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx electric idle detection during lane shutdown state. */
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during lane shutdown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during lane shutdown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during lane shutdown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during lane shutdown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during lane shutdown state. */
+#else /* Word 0 - Little Endian */
+ uint64_t lnpll_rst : 4; /**< [ 3: 0](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ LANE PLL reset state during lane shutdown state. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t tx_rst : 4; /**< [ 11: 8](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ TX reset state during lane shutdown state. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t rx_rst : 5; /**< [ 20: 16](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ RX reset state during lane shutdown state. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t eye_rst : 5; /**< [ 28: 24](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Eye monitor reset state during lane shutdown state. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t adapt_rst : 1; /**< [ 32: 32](R/W) Reserved.
+ Internal:
+ FIXME - add more details
+ Rx Adapt state Pause (0) or Hard Reset (1) during lane shutdown state. */
+ uint64_t rxidledet_disable : 1; /**< [ 33: 33](R/W) Reserved.
+ Internal:
+ Set to disable Rx electric idle detection during lane shutdown state. */
+ uint64_t txcmnmode_disable : 1; /**< [ 34: 34](R/W) Reserved.
+ Internal:
+ Set to disable TX common mode voltage during lane shutdown state. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rstshdn_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rstshdn_bcfg bdk_gsernx_lanex_pcie_rstshdn_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002080ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RSTSHDN_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) bdk_gsernx_lanex_pcie_rstshdn_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) "GSERNX_LANEX_PCIE_RSTSHDN_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RSTSHDN_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq1_1_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during PCIe Gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq1_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq1_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t pcie_g1_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t pcie_g1_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g1_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g1_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g1_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g1_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g1_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g1_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g1_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g1_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t pcie_g1_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g1_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g1_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g1_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g1_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g1_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g1_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g1_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq1_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq1_1_bcfg bdk_gsernx_lanex_pcie_rxeq1_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002300ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ1_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq1_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ1_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ1_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq1_2_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during PCIe Gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq1_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq1_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g1_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g1_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq1_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq1_2_bcfg bdk_gsernx_lanex_pcie_rxeq1_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002310ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ1_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq1_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ1_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ1_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq1_3_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during PCIe Gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq1_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq1_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t pcie_g1_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t pcie_g1_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g1_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g1_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g1_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g1_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t pcie_g1_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g1_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g1_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g1_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g1_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+ uint64_t pcie_g1_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g1_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g1_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g1_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g1_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t pcie_g1_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g1_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g1_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g1_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq1_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq1_3_bcfg bdk_gsernx_lanex_pcie_rxeq1_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002320ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ1_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq1_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ1_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ1_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq1_4_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Equalizer Control Register 4
+ * Parameters controlling the custom receiver equalization during PCIe Gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq1_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq1_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g1_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g1_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g1_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq1_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq1_4_bcfg bdk_gsernx_lanex_pcie_rxeq1_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002330ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ1_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq1_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ1_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ1_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq2_1_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during PCIe Gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq2_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq2_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t pcie_g2_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t pcie_g2_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g2_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g2_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g2_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g2_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g2_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g2_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g2_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g2_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t pcie_g2_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g2_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g2_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g2_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g2_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g2_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g2_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g2_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq2_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq2_1_bcfg bdk_gsernx_lanex_pcie_rxeq2_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002340ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ2_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq2_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ2_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ2_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq2_2_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during PCIe Gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq2_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq2_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g2_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g2_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq2_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq2_2_bcfg bdk_gsernx_lanex_pcie_rxeq2_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002350ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ2_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq2_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ2_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ2_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq2_3_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during PCIe Gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq2_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq2_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t pcie_g2_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t pcie_g2_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g2_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g2_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g2_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g2_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t pcie_g2_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g2_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g2_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g2_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g2_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+ uint64_t pcie_g2_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g2_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g2_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g2_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g2_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t pcie_g2_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g2_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g2_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g2_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq2_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq2_3_bcfg bdk_gsernx_lanex_pcie_rxeq2_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002360ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ2_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq2_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ2_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ2_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq2_4_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Equalizer Control Register 4
+ * Parameters controlling the custom receiver equalization during PCIe Gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq2_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq2_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g2_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g2_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g2_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq2_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq2_4_bcfg bdk_gsernx_lanex_pcie_rxeq2_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002370ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ2_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq2_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ2_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ2_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq3_1_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during PCIe Gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq3_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq3_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t pcie_g3_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t pcie_g3_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g3_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g3_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g3_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g3_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g3_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g3_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g3_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g3_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t pcie_g3_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g3_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g3_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g3_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g3_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g3_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g3_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g3_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq3_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq3_1_bcfg bdk_gsernx_lanex_pcie_rxeq3_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002380ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ3_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq3_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ3_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ3_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq3_2_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during PCIe Gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq3_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq3_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g3_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g3_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq3_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq3_2_bcfg bdk_gsernx_lanex_pcie_rxeq3_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002390ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ3_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq3_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ3_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ3_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq3_3_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during PCIe Gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq3_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq3_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t pcie_g3_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t pcie_g3_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g3_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g3_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g3_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g3_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t pcie_g3_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g3_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g3_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g3_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g3_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+ uint64_t pcie_g3_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g3_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g3_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g3_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g3_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t pcie_g3_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g3_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g3_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g3_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq3_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq3_3_bcfg bdk_gsernx_lanex_pcie_rxeq3_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023a0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ3_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq3_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ3_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ3_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq3_4_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Equalizer Control Register 4
+ * Parameters controlling the custom receiver equalization during PCIe Gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq3_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq3_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g3_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g3_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g3_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq3_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq3_4_bcfg bdk_gsernx_lanex_pcie_rxeq3_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ3_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq3_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ3_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ3_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq4_1_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during PCIe Gen4 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq4_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq4_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t pcie_g4_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t pcie_g4_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g4_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g4_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g4_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g4_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g4_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g4_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g4_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g4_c1_q_adjust : 5; /**< [ 4: 0](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t pcie_g4_voter_sp_mask : 1; /**< [ 5: 5](R/W) Set to mask out "010" and "101" patterns in RX cdr voter.
+ GSERN()_LANE()_CDRFSM_BCFG[VOTER_SP_MASK] will be updated
+ by the hardware even when this bit drives the control. */
+ uint64_t pcie_g4_settle_wait : 4; /**< [ 9: 6](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t pcie_g4_ctle_lte_zero_ovrd : 4;/**< [ 13: 10](R/W) CTLE LTE zero frequency override value. */
+ uint64_t pcie_g4_ctle_lte_zero_ovrd_en : 1;/**< [ 14: 14](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t pcie_g4_c6_c15_limit_lo : 6;/**< [ 20: 15](R/W) C6 to C15 postcursor limit low. */
+ uint64_t pcie_g4_c6_c15_limit_hi : 6;/**< [ 26: 21](R/W) C6 to C15 postcursor limit high. */
+ uint64_t pcie_g4_erc : 4; /**< [ 30: 27](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. See GSERN()_LANE()_RX_ST_BCFG.ERC
+ for detailed information. */
+ uint64_t pcie_g4_blwc_deadband : 12; /**< [ 42: 31](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq4_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq4_1_bcfg bdk_gsernx_lanex_pcie_rxeq4_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ4_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq4_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ4_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ4_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq4_2_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during PCIe Gen4 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq4_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq4_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g4_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g4_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq4_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq4_2_bcfg bdk_gsernx_lanex_pcie_rxeq4_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ4_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq4_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ4_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ4_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq4_3_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during PCIe Gen4 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq4_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq4_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t pcie_g4_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t pcie_g4_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g4_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g4_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g4_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g4_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t pcie_g4_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g4_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g4_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g4_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g4_c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+ uint64_t pcie_g4_c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t pcie_g4_c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t pcie_g4_c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t pcie_g4_c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t pcie_g4_c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t pcie_g4_c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t pcie_g4_c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t pcie_g4_c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t pcie_g4_c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq4_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq4_3_bcfg bdk_gsernx_lanex_pcie_rxeq4_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ4_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq4_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ4_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ4_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxeq4_4_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Equalizer Control Register 4
+ * Parameters controlling the custom receiver equalization during PCIe Gen4 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'PCIe'.
+ */
+union bdk_gsernx_lanex_pcie_rxeq4_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxeq4_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_g4_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_g4_blwc_subrate_init : 16;/**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_blwc_subrate_final : 16;/**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t pcie_g4_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxeq4_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxeq4_4_bcfg bdk_gsernx_lanex_pcie_rxeq4_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900023f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXEQ4_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) bdk_gsernx_lanex_pcie_rxeq4_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) "GSERNX_LANEX_PCIE_RXEQ4_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXEQ4_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidl1a_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 1. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidl1a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidl1a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidl1a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidl1a_bcfg bdk_gsernx_lanex_pcie_rxidl1a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021a0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDL1A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidl1a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDL1A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDL1A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidl2a_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 2. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidl2a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidl2a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidl2a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidl2a_bcfg bdk_gsernx_lanex_pcie_rxidl2a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDL2A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidl2a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDL2A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDL2A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidl3a_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 3. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidl3a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidl3a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidl3a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidl3a_bcfg bdk_gsernx_lanex_pcie_rxidl3a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDL3A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidl3a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDL3A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDL3A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidl4a_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 4. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidl4a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidl4a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidl4a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidl4a_bcfg bdk_gsernx_lanex_pcie_rxidl4a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002200ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDL4A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidl4a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDL4A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDL4A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidle1_bcfg
+ *
+ * GSER Lane PCIe Gen1 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 1. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidle1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidle1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidle1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidle1_bcfg bdk_gsernx_lanex_pcie_rxidle1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002190ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDLE1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidle1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDLE1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDLE1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidle2_bcfg
+ *
+ * GSER Lane PCIe Gen2 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 2. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidle2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidle2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidle2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidle2_bcfg bdk_gsernx_lanex_pcie_rxidle2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDLE2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidle2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDLE2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDLE2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidle3_bcfg
+ *
+ * GSER Lane PCIe Gen3 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 3. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidle3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidle3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidle3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidle3_bcfg bdk_gsernx_lanex_pcie_rxidle3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDLE3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidle3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDLE3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDLE3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_rxidle4_bcfg
+ *
+ * GSER Lane PCIe Gen4 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for PCIe Gen 4. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_pcie_rxidle4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_rxidle4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_rxidle4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_rxidle4_bcfg bdk_gsernx_lanex_pcie_rxidle4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900021f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_RXIDLE4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) bdk_gsernx_lanex_pcie_rxidle4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) "GSERNX_LANEX_PCIE_RXIDLE4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_RXIDLE4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txbias_bcfg
+ *
+ * GSER Lane PCIe TX Margin BIAS Control Register
+ * TX BIAS values corresponding to Full Scale, Half Scale and Margin levels for both.
+ */
+union bdk_gsernx_lanex_pcie_txbias_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txbias_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t tx_margin_h4 : 6; /**< [ 59: 54](R/W) TX BIAS setting for half scale, Margin 4 output drive. */
+ uint64_t tx_margin_h3 : 6; /**< [ 53: 48](R/W) TX BIAS setting for half scale, Margin 3 output drive. */
+ uint64_t tx_margin_h2 : 6; /**< [ 47: 42](R/W) TX BIAS setting for half scale, Margin 2 output drive. */
+ uint64_t tx_margin_h1 : 6; /**< [ 41: 36](R/W) TX BIAS setting for half scale, Margin 1 output drive. */
+ uint64_t tx_bias_half : 6; /**< [ 35: 30](R/W) TX BIAS setting for half scale output drive. */
+ uint64_t tx_margin_f4 : 6; /**< [ 29: 24](R/W) TX BIAS setting for full scale, Margin 4 output drive. */
+ uint64_t tx_margin_f3 : 6; /**< [ 23: 18](R/W) TX BIAS setting for full scale, Margin 3 output drive. */
+ uint64_t tx_margin_f2 : 6; /**< [ 17: 12](R/W) TX BIAS setting for full scale, Margin 2 output drive. */
+ uint64_t tx_margin_f1 : 6; /**< [ 11: 6](R/W) TX BIAS setting for full scale, Margin 1 output drive. */
+ uint64_t tx_bias_full : 6; /**< [ 5: 0](R/W) TX BIAS setting for full scale output drive. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_bias_full : 6; /**< [ 5: 0](R/W) TX BIAS setting for full scale output drive. */
+ uint64_t tx_margin_f1 : 6; /**< [ 11: 6](R/W) TX BIAS setting for full scale, Margin 1 output drive. */
+ uint64_t tx_margin_f2 : 6; /**< [ 17: 12](R/W) TX BIAS setting for full scale, Margin 2 output drive. */
+ uint64_t tx_margin_f3 : 6; /**< [ 23: 18](R/W) TX BIAS setting for full scale, Margin 3 output drive. */
+ uint64_t tx_margin_f4 : 6; /**< [ 29: 24](R/W) TX BIAS setting for full scale, Margin 4 output drive. */
+ uint64_t tx_bias_half : 6; /**< [ 35: 30](R/W) TX BIAS setting for half scale output drive. */
+ uint64_t tx_margin_h1 : 6; /**< [ 41: 36](R/W) TX BIAS setting for half scale, Margin 1 output drive. */
+ uint64_t tx_margin_h2 : 6; /**< [ 47: 42](R/W) TX BIAS setting for half scale, Margin 2 output drive. */
+ uint64_t tx_margin_h3 : 6; /**< [ 53: 48](R/W) TX BIAS setting for half scale, Margin 3 output drive. */
+ uint64_t tx_margin_h4 : 6; /**< [ 59: 54](R/W) TX BIAS setting for half scale, Margin 4 output drive. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txbias_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txbias_bcfg bdk_gsernx_lanex_pcie_txbias_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002930ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXBIAS_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) bdk_gsernx_lanex_pcie_txbias_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) "GSERNX_LANEX_PCIE_TXBIAS_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXBIAS_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txdrv_bcfg
+ *
+ * GSER Lane PCIe TX Drive Reserved Presets, FS & LF Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for the Reserved Presets
+ * for Gen3 and Gen4 (the default coefficient values correspond to preset P4).
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the full
+ * 6 bits defined in the PCIe specification are not needed.
+ * This register also contains the control registers for the Local FS and LF.
+ */
+union bdk_gsernx_lanex_pcie_txdrv_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txdrv_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t g4_rsv_cpost : 5; /**< [ 60: 56](R/W) Gen4 Cpost value for all reserved presets. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t g4_rsv_cmain : 6; /**< [ 53: 48](R/W) Gen4 Cmain value for all reserved presets. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t g4_rsv_cpre : 4; /**< [ 43: 40](R/W) Gen4 Cpost value for all reserved presets. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t local_lf : 6; /**< [ 37: 32](R/W) Local LF value advertised to the MAC. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t local_fs : 6; /**< [ 29: 24](R/W) Local FS value advertised to the MAC. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t g3_rsv_cpost : 5; /**< [ 20: 16](R/W) Gen3 Cpost value for all reserved presets. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_rsv_cmain : 6; /**< [ 13: 8](R/W) Gen3 Cmain value for all reserved presets. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_rsv_cpre : 4; /**< [ 3: 0](R/W) Gen3 Cpost value for all reserved presets. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_rsv_cpre : 4; /**< [ 3: 0](R/W) Gen3 Cpost value for all reserved presets. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_rsv_cmain : 6; /**< [ 13: 8](R/W) Gen3 Cmain value for all reserved presets. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_rsv_cpost : 5; /**< [ 20: 16](R/W) Gen3 Cpost value for all reserved presets. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t local_fs : 6; /**< [ 29: 24](R/W) Local FS value advertised to the MAC. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t local_lf : 6; /**< [ 37: 32](R/W) Local LF value advertised to the MAC. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t g4_rsv_cpre : 4; /**< [ 43: 40](R/W) Gen4 Cpost value for all reserved presets. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t g4_rsv_cmain : 6; /**< [ 53: 48](R/W) Gen4 Cmain value for all reserved presets. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t g4_rsv_cpost : 5; /**< [ 60: 56](R/W) Gen4 Cpost value for all reserved presets. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txdrv_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txdrv_bcfg bdk_gsernx_lanex_pcie_txdrv_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002830ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXDRV_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) bdk_gsernx_lanex_pcie_txdrv_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) "GSERNX_LANEX_PCIE_TXDRV_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXDRV_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst0_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P0.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p0_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P0. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p0_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P0. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p0_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P0. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p0_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P0. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p0_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P0. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p0_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P0. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst0_bcfg bdk_gsernx_lanex_pcie_txpst0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900024f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST0_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst0_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST0_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST0_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst10_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P10.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst10_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst10_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p10_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P10. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p10_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P10. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p10_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P10. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p10_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P10. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p10_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P10. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p10_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P10. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst10_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst10_bcfg bdk_gsernx_lanex_pcie_txpst10_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002590ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST10_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst10_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST10_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST10_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst11_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P0.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst11_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst11_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p0_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P0. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p0_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P0. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p0_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P0. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p0_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P0. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p0_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P0. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p0_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P0. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst11_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst11_bcfg bdk_gsernx_lanex_pcie_txpst11_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002690ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST11_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst11_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST11_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST11_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst12_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P1.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst12_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst12_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p1_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P1. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p1_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P1. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p1_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P1. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p1_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P1. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p1_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P1. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p1_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P1. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst12_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst12_bcfg bdk_gsernx_lanex_pcie_txpst12_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026a0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST12_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst12_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST12_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST12_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst13_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P2.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst13_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst13_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p2_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P2. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p2_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P2. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p2_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P2. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p2_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P2. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p2_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P2. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p2_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P2. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst13_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst13_bcfg bdk_gsernx_lanex_pcie_txpst13_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST13_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst13_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST13_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST13_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst14_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P3.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst14_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst14_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p3_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P3. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p3_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P3. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p3_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P3. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p3_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P3. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p3_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P3. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p3_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P3. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst14_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst14_bcfg bdk_gsernx_lanex_pcie_txpst14_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST14_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst14_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST14_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST14_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst15_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P4.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst15_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst15_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p4_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P4. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p4_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P4. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p4_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P4. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p4_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P4. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p4_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P4. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p4_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P4. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst15_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst15_bcfg bdk_gsernx_lanex_pcie_txpst15_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST15_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst15_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST15_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST15_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst16_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P5.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst16_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst16_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p5_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P5. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p5_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P5. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p5_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P5. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p5_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P5. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p5_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P5. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p5_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P5. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst16_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst16_bcfg bdk_gsernx_lanex_pcie_txpst16_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST16_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst16_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST16_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST16_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst17_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P6.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst17_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst17_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p6_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P6. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p6_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P6. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p6_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P6. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p6_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P6. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p6_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P6. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p6_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P6. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst17_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst17_bcfg bdk_gsernx_lanex_pcie_txpst17_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900026f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST17_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst17_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST17_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST17_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst18_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P7.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst18_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst18_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p7_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P7. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p7_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P7. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p7_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P7. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p7_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P7. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p7_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P7. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p7_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P7. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst18_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst18_bcfg bdk_gsernx_lanex_pcie_txpst18_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002700ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST18_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst18_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST18_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST18_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst19_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P8.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst19_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst19_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p8_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P8. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p8_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P8. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p8_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P8. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p8_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P8. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p8_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P8. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p8_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P8. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst19_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst19_bcfg bdk_gsernx_lanex_pcie_txpst19_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002710ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST19_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst19_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST19_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST19_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst1_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P1.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p1_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P1. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p1_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P1. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p1_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P1. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p1_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P1. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p1_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P1. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p1_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P1. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst1_bcfg bdk_gsernx_lanex_pcie_txpst1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002500ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst20_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P9.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst20_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst20_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p9_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P9. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p9_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P9. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p9_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P9. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p9_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P9. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p9_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P9. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p9_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P9. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst20_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst20_bcfg bdk_gsernx_lanex_pcie_txpst20_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002720ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST20_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst20_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST20_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST20_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst21_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen4 preset P10.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst21_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst21_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g4_p10_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P10. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p10_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P10. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p10_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P10. */
+#else /* Word 0 - Little Endian */
+ uint64_t g4_p10_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen4 preset P10. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g4_p10_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen4 preset P10. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g4_p10_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen4 preset P10. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst21_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst21_bcfg bdk_gsernx_lanex_pcie_txpst21_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002730ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST21_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst21_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST21_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST21_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst2_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P2.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p2_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P2. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p2_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P2. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p2_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P2. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p2_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P2. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p2_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P2. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p2_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P2. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst2_bcfg bdk_gsernx_lanex_pcie_txpst2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002510ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst3_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P3.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p3_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P3. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p3_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P3. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p3_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P3. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p3_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P3. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p3_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P3. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p3_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P3. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst3_bcfg bdk_gsernx_lanex_pcie_txpst3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002520ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst4_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P4.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p4_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P4. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p4_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P4. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p4_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P4. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p4_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P4. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p4_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P4. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p4_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P4. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst4_bcfg bdk_gsernx_lanex_pcie_txpst4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002530ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst5_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P5.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p5_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P5. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p5_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P5. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p5_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P5. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p5_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P5. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p5_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P5. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p5_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P5. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst5_bcfg bdk_gsernx_lanex_pcie_txpst5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002540ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst6_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P6.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst6_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst6_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p6_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P6. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p6_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P6. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p6_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P6. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p6_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P6. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p6_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P6. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p6_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P6. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst6_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst6_bcfg bdk_gsernx_lanex_pcie_txpst6_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002550ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST6_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst6_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST6_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST6_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst7_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P7.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst7_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst7_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p7_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P7. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p7_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P7. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p7_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P7. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p7_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P7. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p7_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P7. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p7_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P7. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst7_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst7_bcfg bdk_gsernx_lanex_pcie_txpst7_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002560ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST7_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst7_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST7_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST7_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst8_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P8.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst8_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst8_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p8_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P8. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p8_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P8. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p8_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P8. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p8_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P8. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p8_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P8. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p8_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P8. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst8_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst8_bcfg bdk_gsernx_lanex_pcie_txpst8_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002570ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST8_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst8_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST8_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST8_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcie_txpst9_bcfg
+ *
+ * GSER Lane PCIe TX Drive Preset Coefficients Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values for Gen3 preset P9.
+ * Cpre and Cpost are only 4 and 5 bits in length, respectively, as the
+ * full 6 bits defined in the PCIe specification are not needed.
+ */
+union bdk_gsernx_lanex_pcie_txpst9_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcie_txpst9_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t g3_p9_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P9. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p9_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P9. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p9_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P9. */
+#else /* Word 0 - Little Endian */
+ uint64_t g3_p9_cpre : 4; /**< [ 3: 0](R/W) Cpost value for Gen3 preset P9. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t g3_p9_cmain : 6; /**< [ 13: 8](R/W) Cmain value for Gen3 preset P9. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t g3_p9_cpost : 5; /**< [ 20: 16](R/W) Cpost value for Gen3 preset P9. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcie_txpst9_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcie_txpst9_bcfg bdk_gsernx_lanex_pcie_txpst9_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002580ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCIE_TXPST9_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) bdk_gsernx_lanex_pcie_txpst9_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) "GSERNX_LANEX_PCIE_TXPST9_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCIE_TXPST9_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pcs_802p3_bcfg
+ *
+ * GSER Lane 802.3 PCS Base Configuration Register 0
+ * This register controls settings for Ethernet IEEE 802.3 PCS layer.
+ */
+union bdk_gsernx_lanex_pcs_802p3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pcs_802p3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t rx_wpk_order : 1; /**< [ 3: 3](R/W) Receiver word packing order. Used when the Ethernet MAC is configured for SGMII
+ 1.25 GBaud. When GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_20B40B] is set two
+ consecutive 20-bit RX data words from the PCS Lite Layer are packed into a
+ 40-bit word for the Ethernet SGMII MAC.
+
+ 0 = The first 20-bit word from the PCS Lite Layer is transferred to the lower
+ 20-bit word position, bits[19:0] of the 40-bit word and the next consecutive
+ 20-bit word from the PCS Lite layer is transferred to the upper 20-bit word
+ position, bits[39:20] of the 40-bit word. The assembled 40-bit word is then
+ forwarded the SGMII Ethernet MAC.
+
+ 1 = The first 20-bit word from the PCS Lite Layer is transferred to the upper
+ 20-bit word position, bits[39:20] of the 40-bit word and the next consecutive
+ 20-bit word from the PCS Lite layer is transferred to the lower 20-bit word
+ position, bits[19:0] of the 40-bit word. The assembled 40-bit word is then
+ forwarded the SGMII Ethernet MAC.
+
+ For diagnostic use only. */
+ uint64_t tx_wup_order : 1; /**< [ 2: 2](R/W) Transmitter word unpacking order. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud. When GSERN()_LANE()_PCS_802P3_BCFG[TX_WUP_40B20B] is set the
+ 20-bit consecutive RX data word from the PCS Lite Layer are packed into 40-bit
+ words for the Ethernet SGMII MAC.
+
+ 0 = The lower 20-bit word, bits[19:0] of the 40-bit
+ word are transferred to the PCS Lite layer followed by the upper 20-bit word,
+ bits[39:20] of the 40-bit word..
+
+ 1 = The upper 20-bit word, bits[39:20], are transferred to the PCS Lite layer
+ followed by the lower 20-bit word, bits[19:0], of the 40-bit word.
+
+ For diagnostic use only. */
+ uint64_t rx_wpk_20b40b : 1; /**< [ 1: 1](R/W) RX Word Packing 20 bits to 40 bits. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud.
+ When set, consecutive 20-bit RX data
+ words from the PCS Lite Layer are packed into 40-bit words for the Ethernet SGMII MAC.
+ Used in conjunction with GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER]. Refer to
+ the description for GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER].
+ For diagnostic use only. */
+ uint64_t tx_wup_40b20b : 1; /**< [ 0: 0](R/W) TX Word UnPacking 40 bits to 20 bits. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud.
+ When set, the 40-bit TX data words from
+ the Ethernet SGMII MAC are transferred to the PCS Lite Layer using two consecutive
+ 20-bit word transfers.
+ Used in conjunction with GSERN()_LANE()_PCS_802P3_BCFG[TX_WUP_ORDER]. Refer to
+ the description for GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER].
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_wup_40b20b : 1; /**< [ 0: 0](R/W) TX Word UnPacking 40 bits to 20 bits. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud.
+ When set, the 40-bit TX data words from
+ the Ethernet SGMII MAC are transferred to the PCS Lite Layer using two consecutive
+ 20-bit word transfers.
+ Used in conjunction with GSERN()_LANE()_PCS_802P3_BCFG[TX_WUP_ORDER]. Refer to
+ the description for GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER].
+ For diagnostic use only. */
+ uint64_t rx_wpk_20b40b : 1; /**< [ 1: 1](R/W) RX Word Packing 20 bits to 40 bits. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud.
+ When set, consecutive 20-bit RX data
+ words from the PCS Lite Layer are packed into 40-bit words for the Ethernet SGMII MAC.
+ Used in conjunction with GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER]. Refer to
+ the description for GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_ORDER].
+ For diagnostic use only. */
+ uint64_t tx_wup_order : 1; /**< [ 2: 2](R/W) Transmitter word unpacking order. Used when the Ethernet MAC is configured for
+ SGMII 1.25 GBaud. When GSERN()_LANE()_PCS_802P3_BCFG[TX_WUP_40B20B] is set the
+ 20-bit consecutive RX data word from the PCS Lite Layer are packed into 40-bit
+ words for the Ethernet SGMII MAC.
+
+ 0 = The lower 20-bit word, bits[19:0] of the 40-bit
+ word are transferred to the PCS Lite layer followed by the upper 20-bit word,
+ bits[39:20] of the 40-bit word..
+
+ 1 = The upper 20-bit word, bits[39:20], are transferred to the PCS Lite layer
+ followed by the lower 20-bit word, bits[19:0], of the 40-bit word.
+
+ For diagnostic use only. */
+ uint64_t rx_wpk_order : 1; /**< [ 3: 3](R/W) Receiver word packing order. Used when the Ethernet MAC is configured for SGMII
+ 1.25 GBaud. When GSERN()_LANE()_PCS_802P3_BCFG[RX_WPK_20B40B] is set two
+ consecutive 20-bit RX data words from the PCS Lite Layer are packed into a
+ 40-bit word for the Ethernet SGMII MAC.
+
+ 0 = The first 20-bit word from the PCS Lite Layer is transferred to the lower
+ 20-bit word position, bits[19:0] of the 40-bit word and the next consecutive
+ 20-bit word from the PCS Lite layer is transferred to the upper 20-bit word
+ position, bits[39:20] of the 40-bit word. The assembled 40-bit word is then
+ forwarded the SGMII Ethernet MAC.
+
+ 1 = The first 20-bit word from the PCS Lite Layer is transferred to the upper
+ 20-bit word position, bits[39:20] of the 40-bit word and the next consecutive
+ 20-bit word from the PCS Lite layer is transferred to the lower 20-bit word
+ position, bits[19:0] of the 40-bit word. The assembled 40-bit word is then
+ forwarded the SGMII Ethernet MAC.
+
+ For diagnostic use only. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pcs_802p3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pcs_802p3_bcfg bdk_gsernx_lanex_pcs_802p3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PCS_802P3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PCS_802P3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003350ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PCS_802P3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) bdk_gsernx_lanex_pcs_802p3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) "GSERNX_LANEX_PCS_802P3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PCS_802P3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pll_1_bcfg
+ *
+ * GSER Lane PLL Base Configuration Register 1
+ */
+union bdk_gsernx_lanex_pll_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pll_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t reserved_29_37 : 9;
+ uint64_t post_div : 2; /**< [ 28: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV].
+ 0x0 = Divide PLL frequency by 1.
+ 0x1 = Divide PLL frequency by 2.
+ 0x2 = Divide PLL frequency by 4.
+ 0x3 = Divide PLL frequency by 8. */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion. */
+#else /* Word 0 - Little Endian */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion. */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t post_div : 2; /**< [ 28: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV].
+ 0x0 = Divide PLL frequency by 1.
+ 0x1 = Divide PLL frequency by 2.
+ 0x2 = Divide PLL frequency by 4.
+ 0x3 = Divide PLL frequency by 8. */
+ uint64_t reserved_29_37 : 9;
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_gsernx_lanex_pll_1_bcfg_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t reserved_36_37 : 2;
+ uint64_t reserved_29_35 : 7;
+ uint64_t post_div : 2; /**< [ 28: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV].
+ 0x0 = Divide PLL frequency by 1.
+ 0x1 = Divide PLL frequency by 2.
+ 0x2 = Divide PLL frequency by 4.
+ 0x3 = Divide PLL frequency by 8. */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion. */
+#else /* Word 0 - Little Endian */
+ uint64_t div_f : 18; /**< [ 17: 0](R/W) PLL feedback divider fractional portion. */
+ uint64_t div_n : 9; /**< [ 26: 18](R/W) PLL feedback divider integer portion. */
+ uint64_t post_div : 2; /**< [ 28: 27](R/W) Forward PLL divider. Used in conjunction with [DIV_N] to set the
+ PLL frequency given a reference clock frequency. The output frequency will
+ be the VCO frequency divided by [POST_DIV].
+ 0x0 = Divide PLL frequency by 1.
+ 0x1 = Divide PLL frequency by 2.
+ 0x2 = Divide PLL frequency by 4.
+ 0x3 = Divide PLL frequency by 8. */
+ uint64_t reserved_29_35 : 7;
+ uint64_t reserved_36_37 : 2;
+ uint64_t sdm_en : 1; /**< [ 38: 38](R/W) Enable PLL fractional-N operation. */
+ uint64_t vco_sel : 1; /**< [ 39: 39](R/W) PLL select one of the two VCOs in the PLL. */
+ uint64_t cal_sel : 1; /**< [ 40: 40](R/W) PLL calibration method select. */
+ uint64_t dither_en : 1; /**< [ 41: 41](R/W) Enable the dithering bit of sigma delta modulator. */
+ uint64_t bg_clk_en : 1; /**< [ 42: 42](R/W) Enable chopping in the band gap circuit. */
+ uint64_t bg_div16 : 1; /**< [ 43: 43](R/W) Enable divide by 16 of reference clock to the band gap. */
+ uint64_t band_overide : 1; /**< [ 44: 44](R/W/H) Bypass PLL calibration and set PLL band with band field inputs. */
+ uint64_t band_limits : 3; /**< [ 47: 45](R/W) Band limits for the PLL calibration procedure. */
+ uint64_t band : 5; /**< [ 52: 48](R/W/H) PLL manual PLL band inputs; only effective if [BAND_OVERIDE] set. */
+ uint64_t band_ppm : 2; /**< [ 54: 53](R/W) PLL band ppm setting. */
+ uint64_t cp_overide : 1; /**< [ 55: 55](R/W) PLL charge pump override. */
+ uint64_t cp : 4; /**< [ 59: 56](R/W) PLL charge pump configuration. */
+ uint64_t cal_cp_mult : 2; /**< [ 61: 60](R/W) PLL cal charge pump mult control. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_gsernx_lanex_pll_1_bcfg bdk_gsernx_lanex_pll_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PLL_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PLL_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000200ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PLL_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) bdk_gsernx_lanex_pll_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) "GSERNX_LANEX_PLL_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PLL_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_pll_2_bcfg
+ *
+ * GSER Lane PLL Base Configuration Register 2
+ */
+union bdk_gsernx_lanex_pll_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_pll_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t lock_check_cnt_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [LOCK_CHECK_CNT_OVRD]. */
+ uint64_t lock_check_cnt_ovrd : 15; /**< [ 54: 40](R/W) Lock check counter value override. This counter is used to wait for PLL lock to
+ be valid. It counts every REFCLK cycle and once its done asserts
+ GSERN()_LANE()_INIT_BSTS[LOCK_READY]. For Common PLL, REFCLK is the input from the
+ pad. For Lane PLL, REFCLK is the output of the common PLL. To use value assert
+ GSERN()_LANE()_RST1_BCFG[LOCK_CHECK] or trigger a PLL reset sequence. */
+ uint64_t reserved_34_39 : 6;
+ uint64_t vcm_sel : 1; /**< [ 33: 33](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t cp_boost : 1; /**< [ 32: 32](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t ssc_sata_mode : 2; /**< [ 31: 30](R/W) PLL SATA spread spectrum control.
+ 0x0 = Down spreading. PPM triangle wave total peak-to-peak spread subtracted from
+ nominal frequency.
+ 0x1 = Up spreading. PPM triangle wave total peak-to-peak spread added to nominal
+ frequency.
+ 0x2 = Center spreading. PPM triangle wave total peak-to-peak spread centered at nominal
+ frequency.
+ 0x3 = Square wave subtracted from nominal frequency. */
+ uint64_t ssc_ppm : 2; /**< [ 29: 28](R/W) Spread-spectrum clocking total peak-to-peak spread.
+ 0x0 = 5000 PPM.
+ 0x1 = 3000 PPM.
+ 0x2 = 2500 PPM.
+ 0x3 = 1000 PPM. */
+ uint64_t pnr_refclk_en : 1; /**< [ 27: 27](R/W) Enable PLL reference clock to internal logic. */
+ uint64_t ssc_en : 1; /**< [ 26: 26](R/W) Spread-spectrum clocking enable. */
+ uint64_t shlb_en : 1; /**< [ 25: 25](R/W) Used when in shallow loopback mode to mux the CDR receive clock onto
+ the transmit data path clock to ensure that the clock frequencies
+ are matched (to prevent data overrun). */
+ uint64_t pfd_offset : 1; /**< [ 24: 24](R/W) PLL PFD offset enable. */
+ uint64_t opamp : 4; /**< [ 23: 20](R/W) PLL loop filter op-amp configuration. */
+ uint64_t res : 4; /**< [ 19: 16](R/W) PLL loop filter configuration. */
+ uint64_t reserved_15 : 1;
+ uint64_t vco_bias : 3; /**< [ 14: 12](R/W) VCO bias control. */
+ uint64_t cal_dac_low : 4; /**< [ 11: 8](R/W) PLL calibration DAC low control. */
+ uint64_t cal_dac_mid : 4; /**< [ 7: 4](R/W) PLL calibration DAC middle control. */
+ uint64_t cal_dac_high : 4; /**< [ 3: 0](R/W) PLL calibration DAC high control. */
+#else /* Word 0 - Little Endian */
+ uint64_t cal_dac_high : 4; /**< [ 3: 0](R/W) PLL calibration DAC high control. */
+ uint64_t cal_dac_mid : 4; /**< [ 7: 4](R/W) PLL calibration DAC middle control. */
+ uint64_t cal_dac_low : 4; /**< [ 11: 8](R/W) PLL calibration DAC low control. */
+ uint64_t vco_bias : 3; /**< [ 14: 12](R/W) VCO bias control. */
+ uint64_t reserved_15 : 1;
+ uint64_t res : 4; /**< [ 19: 16](R/W) PLL loop filter configuration. */
+ uint64_t opamp : 4; /**< [ 23: 20](R/W) PLL loop filter op-amp configuration. */
+ uint64_t pfd_offset : 1; /**< [ 24: 24](R/W) PLL PFD offset enable. */
+ uint64_t shlb_en : 1; /**< [ 25: 25](R/W) Used when in shallow loopback mode to mux the CDR receive clock onto
+ the transmit data path clock to ensure that the clock frequencies
+ are matched (to prevent data overrun). */
+ uint64_t ssc_en : 1; /**< [ 26: 26](R/W) Spread-spectrum clocking enable. */
+ uint64_t pnr_refclk_en : 1; /**< [ 27: 27](R/W) Enable PLL reference clock to internal logic. */
+ uint64_t ssc_ppm : 2; /**< [ 29: 28](R/W) Spread-spectrum clocking total peak-to-peak spread.
+ 0x0 = 5000 PPM.
+ 0x1 = 3000 PPM.
+ 0x2 = 2500 PPM.
+ 0x3 = 1000 PPM. */
+ uint64_t ssc_sata_mode : 2; /**< [ 31: 30](R/W) PLL SATA spread spectrum control.
+ 0x0 = Down spreading. PPM triangle wave total peak-to-peak spread subtracted from
+ nominal frequency.
+ 0x1 = Up spreading. PPM triangle wave total peak-to-peak spread added to nominal
+ frequency.
+ 0x2 = Center spreading. PPM triangle wave total peak-to-peak spread centered at nominal
+ frequency.
+ 0x3 = Square wave subtracted from nominal frequency. */
+ uint64_t cp_boost : 1; /**< [ 32: 32](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t vcm_sel : 1; /**< [ 33: 33](R/W) For diagnostic use only.
+ Internal:
+ See PLL designer for how to set these. */
+ uint64_t reserved_34_39 : 6;
+ uint64_t lock_check_cnt_ovrd : 15; /**< [ 54: 40](R/W) Lock check counter value override. This counter is used to wait for PLL lock to
+ be valid. It counts every REFCLK cycle and once its done asserts
+ GSERN()_LANE()_INIT_BSTS[LOCK_READY]. For Common PLL, REFCLK is the input from the
+ pad. For Lane PLL, REFCLK is the output of the common PLL. To use value assert
+ GSERN()_LANE()_RST1_BCFG[LOCK_CHECK] or trigger a PLL reset sequence. */
+ uint64_t lock_check_cnt_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [LOCK_CHECK_CNT_OVRD]. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_pll_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_pll_2_bcfg bdk_gsernx_lanex_pll_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_PLL_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_PLL_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000210ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_PLL_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) bdk_gsernx_lanex_pll_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) "GSERNX_LANEX_PLL_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_PLL_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rev
+ *
+ * GSER Lane Revision Register
+ * Revision number
+ */
+union bdk_gsernx_lanex_rev
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rev : 8; /**< [ 7: 0](RO/H) Revision number for GSERN lane subblock.
+ Internal:
+ Used primarily for E5. */
+#else /* Word 0 - Little Endian */
+ uint64_t rev : 8; /**< [ 7: 0](RO/H) Revision number for GSERN lane subblock.
+ Internal:
+ Used primarily for E5. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rev_s cn; */
+};
+typedef union bdk_gsernx_lanex_rev bdk_gsernx_lanex_rev_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_REV(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_REV(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000000ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_REV", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_REV(a,b) bdk_gsernx_lanex_rev_t
+#define bustype_BDK_GSERNX_LANEX_REV(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_REV(a,b) "GSERNX_LANEX_REV"
+#define device_bar_BDK_GSERNX_LANEX_REV(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_REV(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_REV(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst1_bcfg
+ *
+ * GSER Lane Reset State Machine Controls and Overrides Register 1
+ */
+union bdk_gsernx_lanex_rst1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t domain_rst_en : 1; /**< [ 55: 55](R/W) Domain reset enable.
+ 0 = Prevent reseting lane logic with domain reset.
+ 1 = Enable reseting all lane logic with domain reset.
+
+ For PCIe configurations, typically 1 for a root complex and 0 for an endpoint. */
+ uint64_t reserved_48_54 : 7;
+ uint64_t rx_go2deep_idle : 1; /**< [ 47: 47](R/W) Set to sequence the receiver into deep idle. */
+ uint64_t rx_pd_qac_q : 1; /**< [ 46: 46](R/W) Power control for the custom analog quadrature accuracy corrector
+ (QAC). This QAC corrects for phase error between the I clock and the Q
+ (quadrature, doutq) clock.
+ 0 = Power up the I/Q QAC.
+ 1 = Power down the I/Q QAC. When in this state,
+ GSERN()_LANE()_RX_QAC_BCFG[CDR_QAC_SELQ] should also be set to zero to
+ disconnect the QAC from the clock data recovery (CDR) loop. */
+ uint64_t rx_pd_qac_e : 1; /**< [ 45: 45](R/W) Power control for the custom analog quadrature accuracy corrector
+ (QAC). This QAC corrects for phase error between the I clock and the E
+ (eye, doute) clock.
+ 0 = Power up the I/E QAC.
+ 1 = Power down the I/E QAC. When in this state,
+ GSERN()_LANE()_RX_QAC_BCFG[CDR_QAC_SELQ] should also be set to zero to
+ disconnect the QAC from the clock data recovery (CDR) loop. */
+ uint64_t rx_pd_idle : 1; /**< [ 44: 44](R/W) Set to power down the idle detector in the custom analog
+ receiver. */
+ uint64_t rx_rst_deser : 1; /**< [ 43: 43](R/W) Set to reset the deserializers to the offset DAC, current
+ bias DAC, and interpolator re-mapping. */
+ uint64_t rx_rst_dcc_q : 1; /**< [ 42: 42](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the Q (quadrature, data, doutq) path. */
+ uint64_t rx_rst_dcc_i : 1; /**< [ 41: 41](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the I (in-phase, edge, douti) path. */
+ uint64_t rx_rst_dcc_e : 1; /**< [ 40: 40](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the E (eye, doute) path */
+ uint64_t idle : 1; /**< [ 39: 39](R/W) Set to idle the custom receiver and baseline wander
+ compensation (bwlc). */
+ uint64_t rx_rst_qac_q : 1; /**< [ 38: 38](R/W) Set reset to the doutq datapath quadrature corrector
+ filter and associated logic. */
+ uint64_t rx_rst_qac_e : 1; /**< [ 37: 37](R/W) Set reset to the doute quadrature corrector filter and
+ associated logic. */
+ uint64_t rx_rst_blwc : 1; /**< [ 36: 36](R/W) Set to reset the analog baseline wander compensation
+ block. */
+ uint64_t rx_rst_cdrfsm : 1; /**< [ 35: 35](R/W) Set to reset the CDR FSM. */
+ uint64_t rx_rst_voter : 1; /**< [ 34: 34](R/W) Set to reset the analog voter block. */
+ uint64_t rx_rst_div_e : 1; /**< [ 33: 33](R/W) Set to reset the analog CDR clock dividers in the eye data path for
+ div{5, 8, 10, 16, 20}. */
+ uint64_t rx_rst_div : 1; /**< [ 32: 32](R/W) Set to reset the analog CDR clock dividers in the quadrature data path
+ for div{5, 8, 10, 16, 20}. */
+ uint64_t rx_rst_interp_q : 1; /**< [ 31: 31](R/W) Set to reset the Q (quadrature, doutq) pipe analog
+ interpolator logic (only, not the full datapaths). */
+ uint64_t rx_rst_interp_i : 1; /**< [ 30: 30](R/W) Set to reset the I (in-phase, douti) pipe analog
+ interpolator logic (only, not the full datapath). */
+ uint64_t rx_rst_interp_e : 1; /**< [ 29: 29](R/W) Set to reset the E (eye, doute) analog interpolator logic
+ (only, not the full datapath). */
+ uint64_t rx_pd_interp_q : 1; /**< [ 28: 28](R/W) Set to power down the I (in-phase, douti) analog
+ interpolator logic and output clocks (only, not the full clock path). */
+ uint64_t rx_pd_interp_i : 1; /**< [ 27: 27](R/W) Set to power down the I (in-phase, douti) analog
+ interpolator logic and output clocks (only, not the full clock path). */
+ uint64_t rx_pd_interp_e : 1; /**< [ 26: 26](R/W) Set to power down the E (eye, doute) analog interpolator
+ logic and output clocks (only, not the full clock path). */
+ uint64_t rx_pd_dfe_x : 1; /**< [ 25: 25](R/W) Set to power down the DFE X path. The X path is passed to
+ the DFE I (edge, douti) pipe depending on edgesel_{even,odd}. */
+ uint64_t rx_pd_dfe_q : 1; /**< [ 24: 24](R/W) Set to power down the DFE Q (data, doutq) path (only, not
+ the full datapath) */
+ uint64_t rx_pd_dfe_i : 1; /**< [ 23: 23](R/W) Set to power down the DFE I (edge, douti) path (only, not
+ the full datapath). */
+ uint64_t rx_pd_dfe_e : 1; /**< [ 22: 22](R/W) Set to power down the DFE E (eye, doute) path (only, not
+ the full datapath). */
+ uint64_t rx_pd_dcc_q : 1; /**< [ 21: 21](R/W) Set to power down the duty-cycle corrector (DCC) of the Q
+ (quadrature, doutq) clock after the interpolator and before the
+ divider (only, not the full clock path). */
+ uint64_t rx_pd_dcc_i : 1; /**< [ 20: 20](R/W) Set to power down the duty-cycle corrector (DCC) of the I
+ (in-phase, douti) clock after the interpolator and before the divider
+ (not the full clock path). */
+ uint64_t rx_pd_dcc_e : 1; /**< [ 19: 19](R/W) Set to power down the duty-cycle corrector (DCC) of the E
+ (eye, doute) clock after the interpolator and before the divider (not
+ the full clock path). */
+ uint64_t rx_pd_biasdac : 1; /**< [ 18: 18](R/W) Set to power down the current bias DAC, which would power
+ down any amplifier in the RX (CTLE, VGA, DFE summer, DCC, QAC, etc.). */
+ uint64_t rx_pd_afe : 1; /**< [ 17: 17](R/W) Set to power down the analog front-end (AFE). */
+ uint64_t rx_en_cdrfsm : 1; /**< [ 16: 16](R/W) Set to enable (power-up) the CDR FSM. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pll_go2deep_idle : 1; /**< [ 12: 12](R/W) Set to cycle the PLL into deep idle. */
+ uint64_t lock_ppm : 2; /**< [ 11: 10](R/W) PLL lock PPM setting; after GSERN()_LANE()_RST1_BCFG[LOCK_WAIT], compare
+ reference clock and divided VCO clock for this many cycles:
+ 0x0 = Compare after 5000 reference clock cycles.
+ 0x1 = Compare after 10000 reference clock cycles.
+ 0x2 = Compare after 20000 reference clock cycles.
+ 0x3 = Compare after 2500 reference clock cycles. */
+ uint64_t lock_wait : 2; /**< [ 9: 8](R/W) Wait time for PLL lock check function to start:
+ 0x0 = Wait 2500 reference clock cycles.
+ 0x1 = Wait 5000 reference clock cycles.
+ 0x2 = Wait 10000 reference clock cycles.
+ 0x3 = Wait 1250 reference clock cycles. */
+ uint64_t lock_check : 1; /**< [ 7: 7](R/W) Trigger a PLL lock status check; result returned in
+ GSERN()_LANE()_INIT_BSTS[LOCK] when GSERN()_LANE()_INIT_BSTS[LOCK_READY]
+ asserts. deassert and re-assert to repeat checking. */
+ uint64_t vco_cal_reset : 1; /**< [ 6: 6](R/W) PLL VCO calibration state machine reset. */
+ uint64_t fracn_reset : 1; /**< [ 5: 5](R/W) PLL fractional-N state machine reset. */
+ uint64_t ssc_reset : 1; /**< [ 4: 4](R/W) PLL SSC state machine reset. */
+ uint64_t post_div_reset : 1; /**< [ 3: 3](RO) Reserved.
+ Internal:
+ Was common PLL post divider reset. No longer used. */
+ uint64_t reset : 1; /**< [ 2: 2](R/W) PLL primary reset; must assert [POST_DIV_RESET] if [RESET] is asserted. */
+ uint64_t cal_en : 1; /**< [ 1: 1](R/W) Enable PLL calibration procedure. */
+ uint64_t pwdn : 1; /**< [ 0: 0](R/W) PLL power down control. */
+#else /* Word 0 - Little Endian */
+ uint64_t pwdn : 1; /**< [ 0: 0](R/W) PLL power down control. */
+ uint64_t cal_en : 1; /**< [ 1: 1](R/W) Enable PLL calibration procedure. */
+ uint64_t reset : 1; /**< [ 2: 2](R/W) PLL primary reset; must assert [POST_DIV_RESET] if [RESET] is asserted. */
+ uint64_t post_div_reset : 1; /**< [ 3: 3](RO) Reserved.
+ Internal:
+ Was common PLL post divider reset. No longer used. */
+ uint64_t ssc_reset : 1; /**< [ 4: 4](R/W) PLL SSC state machine reset. */
+ uint64_t fracn_reset : 1; /**< [ 5: 5](R/W) PLL fractional-N state machine reset. */
+ uint64_t vco_cal_reset : 1; /**< [ 6: 6](R/W) PLL VCO calibration state machine reset. */
+ uint64_t lock_check : 1; /**< [ 7: 7](R/W) Trigger a PLL lock status check; result returned in
+ GSERN()_LANE()_INIT_BSTS[LOCK] when GSERN()_LANE()_INIT_BSTS[LOCK_READY]
+ asserts. deassert and re-assert to repeat checking. */
+ uint64_t lock_wait : 2; /**< [ 9: 8](R/W) Wait time for PLL lock check function to start:
+ 0x0 = Wait 2500 reference clock cycles.
+ 0x1 = Wait 5000 reference clock cycles.
+ 0x2 = Wait 10000 reference clock cycles.
+ 0x3 = Wait 1250 reference clock cycles. */
+ uint64_t lock_ppm : 2; /**< [ 11: 10](R/W) PLL lock PPM setting; after GSERN()_LANE()_RST1_BCFG[LOCK_WAIT], compare
+ reference clock and divided VCO clock for this many cycles:
+ 0x0 = Compare after 5000 reference clock cycles.
+ 0x1 = Compare after 10000 reference clock cycles.
+ 0x2 = Compare after 20000 reference clock cycles.
+ 0x3 = Compare after 2500 reference clock cycles. */
+ uint64_t pll_go2deep_idle : 1; /**< [ 12: 12](R/W) Set to cycle the PLL into deep idle. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t rx_en_cdrfsm : 1; /**< [ 16: 16](R/W) Set to enable (power-up) the CDR FSM. */
+ uint64_t rx_pd_afe : 1; /**< [ 17: 17](R/W) Set to power down the analog front-end (AFE). */
+ uint64_t rx_pd_biasdac : 1; /**< [ 18: 18](R/W) Set to power down the current bias DAC, which would power
+ down any amplifier in the RX (CTLE, VGA, DFE summer, DCC, QAC, etc.). */
+ uint64_t rx_pd_dcc_e : 1; /**< [ 19: 19](R/W) Set to power down the duty-cycle corrector (DCC) of the E
+ (eye, doute) clock after the interpolator and before the divider (not
+ the full clock path). */
+ uint64_t rx_pd_dcc_i : 1; /**< [ 20: 20](R/W) Set to power down the duty-cycle corrector (DCC) of the I
+ (in-phase, douti) clock after the interpolator and before the divider
+ (not the full clock path). */
+ uint64_t rx_pd_dcc_q : 1; /**< [ 21: 21](R/W) Set to power down the duty-cycle corrector (DCC) of the Q
+ (quadrature, doutq) clock after the interpolator and before the
+ divider (only, not the full clock path). */
+ uint64_t rx_pd_dfe_e : 1; /**< [ 22: 22](R/W) Set to power down the DFE E (eye, doute) path (only, not
+ the full datapath). */
+ uint64_t rx_pd_dfe_i : 1; /**< [ 23: 23](R/W) Set to power down the DFE I (edge, douti) path (only, not
+ the full datapath). */
+ uint64_t rx_pd_dfe_q : 1; /**< [ 24: 24](R/W) Set to power down the DFE Q (data, doutq) path (only, not
+ the full datapath) */
+ uint64_t rx_pd_dfe_x : 1; /**< [ 25: 25](R/W) Set to power down the DFE X path. The X path is passed to
+ the DFE I (edge, douti) pipe depending on edgesel_{even,odd}. */
+ uint64_t rx_pd_interp_e : 1; /**< [ 26: 26](R/W) Set to power down the E (eye, doute) analog interpolator
+ logic and output clocks (only, not the full clock path). */
+ uint64_t rx_pd_interp_i : 1; /**< [ 27: 27](R/W) Set to power down the I (in-phase, douti) analog
+ interpolator logic and output clocks (only, not the full clock path). */
+ uint64_t rx_pd_interp_q : 1; /**< [ 28: 28](R/W) Set to power down the I (in-phase, douti) analog
+ interpolator logic and output clocks (only, not the full clock path). */
+ uint64_t rx_rst_interp_e : 1; /**< [ 29: 29](R/W) Set to reset the E (eye, doute) analog interpolator logic
+ (only, not the full datapath). */
+ uint64_t rx_rst_interp_i : 1; /**< [ 30: 30](R/W) Set to reset the I (in-phase, douti) pipe analog
+ interpolator logic (only, not the full datapath). */
+ uint64_t rx_rst_interp_q : 1; /**< [ 31: 31](R/W) Set to reset the Q (quadrature, doutq) pipe analog
+ interpolator logic (only, not the full datapaths). */
+ uint64_t rx_rst_div : 1; /**< [ 32: 32](R/W) Set to reset the analog CDR clock dividers in the quadrature data path
+ for div{5, 8, 10, 16, 20}. */
+ uint64_t rx_rst_div_e : 1; /**< [ 33: 33](R/W) Set to reset the analog CDR clock dividers in the eye data path for
+ div{5, 8, 10, 16, 20}. */
+ uint64_t rx_rst_voter : 1; /**< [ 34: 34](R/W) Set to reset the analog voter block. */
+ uint64_t rx_rst_cdrfsm : 1; /**< [ 35: 35](R/W) Set to reset the CDR FSM. */
+ uint64_t rx_rst_blwc : 1; /**< [ 36: 36](R/W) Set to reset the analog baseline wander compensation
+ block. */
+ uint64_t rx_rst_qac_e : 1; /**< [ 37: 37](R/W) Set reset to the doute quadrature corrector filter and
+ associated logic. */
+ uint64_t rx_rst_qac_q : 1; /**< [ 38: 38](R/W) Set reset to the doutq datapath quadrature corrector
+ filter and associated logic. */
+ uint64_t idle : 1; /**< [ 39: 39](R/W) Set to idle the custom receiver and baseline wander
+ compensation (bwlc). */
+ uint64_t rx_rst_dcc_e : 1; /**< [ 40: 40](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the E (eye, doute) path */
+ uint64_t rx_rst_dcc_i : 1; /**< [ 41: 41](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the I (in-phase, edge, douti) path. */
+ uint64_t rx_rst_dcc_q : 1; /**< [ 42: 42](R/W) Set to reset the integrator in the duty-cycle corrector
+ (DCC) on the Q (quadrature, data, doutq) path. */
+ uint64_t rx_rst_deser : 1; /**< [ 43: 43](R/W) Set to reset the deserializers to the offset DAC, current
+ bias DAC, and interpolator re-mapping. */
+ uint64_t rx_pd_idle : 1; /**< [ 44: 44](R/W) Set to power down the idle detector in the custom analog
+ receiver. */
+ uint64_t rx_pd_qac_e : 1; /**< [ 45: 45](R/W) Power control for the custom analog quadrature accuracy corrector
+ (QAC). This QAC corrects for phase error between the I clock and the E
+ (eye, doute) clock.
+ 0 = Power up the I/E QAC.
+ 1 = Power down the I/E QAC. When in this state,
+ GSERN()_LANE()_RX_QAC_BCFG[CDR_QAC_SELQ] should also be set to zero to
+ disconnect the QAC from the clock data recovery (CDR) loop. */
+ uint64_t rx_pd_qac_q : 1; /**< [ 46: 46](R/W) Power control for the custom analog quadrature accuracy corrector
+ (QAC). This QAC corrects for phase error between the I clock and the Q
+ (quadrature, doutq) clock.
+ 0 = Power up the I/Q QAC.
+ 1 = Power down the I/Q QAC. When in this state,
+ GSERN()_LANE()_RX_QAC_BCFG[CDR_QAC_SELQ] should also be set to zero to
+ disconnect the QAC from the clock data recovery (CDR) loop. */
+ uint64_t rx_go2deep_idle : 1; /**< [ 47: 47](R/W) Set to sequence the receiver into deep idle. */
+ uint64_t reserved_48_54 : 7;
+ uint64_t domain_rst_en : 1; /**< [ 55: 55](R/W) Domain reset enable.
+ 0 = Prevent reseting lane logic with domain reset.
+ 1 = Enable reseting all lane logic with domain reset.
+
+ For PCIe configurations, typically 1 for a root complex and 0 for an endpoint. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst1_bcfg bdk_gsernx_lanex_rst1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000310ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST1_BCFG(a,b) bdk_gsernx_lanex_rst1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST1_BCFG(a,b) "GSERNX_LANEX_RST1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst2_bcfg
+ *
+ * GSER Lane Reset State Machine Controls and Overrides Register 2
+ */
+union bdk_gsernx_lanex_rst2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t adpt_trigger_wait : 4; /**< [ 57: 54](R/W) Wait time for after triggering adaptation before checking adaptation status. Set
+ to a minimum of 3. Set to the desired value before or at the same time as
+ setting [RST_ADPT_RST_SM] to zero. */
+ uint64_t reserved_50_53 : 4;
+ uint64_t adpt_wait : 18; /**< [ 49: 32](R/W) Wait time for adaptation to complete. Set at least as long as the maximum of:
+ * GSERN()_LANE()_RX_5_BCFG[VGA_TIMER_MAX].
+ * GSERN()_LANE()_RX_5_BCFG[DFE_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLELTE_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLEZ_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLE_TIMER_MAX].
+ * GSERN()_LANE()_RX_12_BCFG[AFEOS_TIMER_MAX].
+ * GSERN()_LANE()_RX_19_BCFG[BLWC_TIMER_MAX].
+ * GSERN()_LANE()_RX_23_BCFG[PREVGA_GN_TIMER_MAX].
+
+ The adaptation state machine will move on when all enabled adaptation operations
+ complete within the [ADPT_WAIT] count. If they do not complete within the wait
+ time, the state machine will move on when the counter expires. Set to the
+ desired value before or at the same time as setting [RST_ADPT_RST_SM] to zero. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t do_prevga_gn_adpt : 1; /**< [ 29: 29](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_blwc_adpt : 1; /**< [ 28: 28](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_afeos_adpt : 1; /**< [ 27: 27](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctlelte_adpt : 1; /**< [ 26: 26](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctlez_adpt : 1; /**< [ 25: 25](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctle_adpt : 1; /**< [ 24: 24](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_dfe_adpt : 1; /**< [ 23: 23](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_vga_adpt : 1; /**< [ 22: 22](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t rst_adpt_rst_sm : 1; /**< [ 21: 21](R/W) Set to one to reset the adaptation reset state machine; set to zero to allow the
+ adaptation reset state machine to run. Leave set to one to run adaptation
+ entirely under SW control through the GSERN()_LANE()_RX_7_BCFG[*_RST]
+ controls. Write to zero at the same time or after the desired [DO_*_ADPT]
+ controls are enabled to allow the reset state machine to initiate
+ adaptation. Note - for pausing and restarting adaptation associated with PCIe
+ rate changes and all power state transitions, the reset state machine should
+ control adaptation. */
+ uint64_t rst_eye_rst_sm : 1; /**< [ 20: 20](R/W) Set to reset the eye data path reset and power-up/power-down
+ state machine; set low to allow the eye data path reset and soft
+ power-up/power-down state machine to run (if [LN_RESET_USE_EYE] is
+ asserted). */
+ uint64_t ln_reset_use_eye : 1; /**< [ 19: 19](R/W) Set to enable the eye (doute) data path reset and
+ power-up/power-down state machine to run at cold reset when
+ [RST_EYE_RST_SM] deasserts. After cold reset, assert or deassert
+ [LN_RESET_USE_EYE] to run the eye data path soft power-up or
+ power-down sequence. */
+ uint64_t rst_rx_rst_sm : 1; /**< [ 18: 18](R/W) Set to reset the receiver reset state machine; set low to run
+ the receiver reset initialization state machine. */
+ uint64_t rst_tx_rst_sm : 1; /**< [ 17: 17](R/W) Set to reset the transmitter reset state machine; set low to
+ run the transmitter reset initialization state machine. */
+ uint64_t rst_pll_rst_sm : 1; /**< [ 16: 16](R/W) Set to reset the full lane reset state machine (PLL, TX,
+ and RX); set low to run the complete reset initialization sequence
+ starting with lane PLL initialization. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t tx_dcc_iboost : 1; /**< [ 12: 12](R/W) Set to assert the iboost control bit of the
+ transmit duty cycle correcter. Should be programmed as desired before
+ sequencing the transmitter reset state machine. Differs
+ from [TX_DCC_LOWF] in the data rate range that it is set at. */
+ uint64_t tx_go2deep_idle : 1; /**< [ 11: 11](R/W) Set to sequence the transmitter into deep idle. */
+ uint64_t tx_dcc_lowf : 1; /**< [ 10: 10](R/W) Set to assert the low-frequency control bit of the transmit duty cycle
+ correcter. Should be programmed as desired before sequencing the transmitter
+ reset state machine. Set to 1 for data rates below 4 Gbaud. */
+ uint64_t tx_idle : 1; /**< [ 9: 9](R/W) Set to put the transmitter into idle (weak terminate). */
+ uint64_t tx_div_rst : 1; /**< [ 8: 8](R/W) Set to reset the counter in the analog transmitter clock
+ divider. */
+ uint64_t tx_dcc_rst : 1; /**< [ 7: 7](R/W) Set to reset the analog duty cycle corrector in the
+ transmitter. */
+ uint64_t reserved_6 : 1;
+ uint64_t tx_enctl : 1; /**< [ 5: 5](R/W) Set to enable the analog TX controls (c*, en*). */
+ uint64_t tx_cdrdiv3 : 1; /**< [ 4: 4](R/W) Set to enable the analog divide by 3 post scalar divider in the
+ TX divider. If GSERN()_LANE()_CDRFSM_BCFG[CLK_SEL] is set to use the div3clk from
+ the transmitter this bit needs to be enabled. */
+ uint64_t tx_endiv5 : 1; /**< [ 3: 3](R/W) Set to enable the analog divide by 4 or 5 post scalar dividers
+ in the TX divider. */
+ uint64_t reserved_2 : 1;
+ uint64_t tx_pdb : 1; /**< [ 1: 1](R/W) Set to zero to power down the entire analog TX driver, disabling
+ current mirrors, current DACs, and op-amps. */
+ uint64_t tx_dcc_pdb : 1; /**< [ 0: 0](R/W) Set to zero to power-down the low-swing input, CML to CMOS shifter,
+ and duty cycle corrector. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_dcc_pdb : 1; /**< [ 0: 0](R/W) Set to zero to power-down the low-swing input, CML to CMOS shifter,
+ and duty cycle corrector. */
+ uint64_t tx_pdb : 1; /**< [ 1: 1](R/W) Set to zero to power down the entire analog TX driver, disabling
+ current mirrors, current DACs, and op-amps. */
+ uint64_t reserved_2 : 1;
+ uint64_t tx_endiv5 : 1; /**< [ 3: 3](R/W) Set to enable the analog divide by 4 or 5 post scalar dividers
+ in the TX divider. */
+ uint64_t tx_cdrdiv3 : 1; /**< [ 4: 4](R/W) Set to enable the analog divide by 3 post scalar divider in the
+ TX divider. If GSERN()_LANE()_CDRFSM_BCFG[CLK_SEL] is set to use the div3clk from
+ the transmitter this bit needs to be enabled. */
+ uint64_t tx_enctl : 1; /**< [ 5: 5](R/W) Set to enable the analog TX controls (c*, en*). */
+ uint64_t reserved_6 : 1;
+ uint64_t tx_dcc_rst : 1; /**< [ 7: 7](R/W) Set to reset the analog duty cycle corrector in the
+ transmitter. */
+ uint64_t tx_div_rst : 1; /**< [ 8: 8](R/W) Set to reset the counter in the analog transmitter clock
+ divider. */
+ uint64_t tx_idle : 1; /**< [ 9: 9](R/W) Set to put the transmitter into idle (weak terminate). */
+ uint64_t tx_dcc_lowf : 1; /**< [ 10: 10](R/W) Set to assert the low-frequency control bit of the transmit duty cycle
+ correcter. Should be programmed as desired before sequencing the transmitter
+ reset state machine. Set to 1 for data rates below 4 Gbaud. */
+ uint64_t tx_go2deep_idle : 1; /**< [ 11: 11](R/W) Set to sequence the transmitter into deep idle. */
+ uint64_t tx_dcc_iboost : 1; /**< [ 12: 12](R/W) Set to assert the iboost control bit of the
+ transmit duty cycle correcter. Should be programmed as desired before
+ sequencing the transmitter reset state machine. Differs
+ from [TX_DCC_LOWF] in the data rate range that it is set at. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t rst_pll_rst_sm : 1; /**< [ 16: 16](R/W) Set to reset the full lane reset state machine (PLL, TX,
+ and RX); set low to run the complete reset initialization sequence
+ starting with lane PLL initialization. */
+ uint64_t rst_tx_rst_sm : 1; /**< [ 17: 17](R/W) Set to reset the transmitter reset state machine; set low to
+ run the transmitter reset initialization state machine. */
+ uint64_t rst_rx_rst_sm : 1; /**< [ 18: 18](R/W) Set to reset the receiver reset state machine; set low to run
+ the receiver reset initialization state machine. */
+ uint64_t ln_reset_use_eye : 1; /**< [ 19: 19](R/W) Set to enable the eye (doute) data path reset and
+ power-up/power-down state machine to run at cold reset when
+ [RST_EYE_RST_SM] deasserts. After cold reset, assert or deassert
+ [LN_RESET_USE_EYE] to run the eye data path soft power-up or
+ power-down sequence. */
+ uint64_t rst_eye_rst_sm : 1; /**< [ 20: 20](R/W) Set to reset the eye data path reset and power-up/power-down
+ state machine; set low to allow the eye data path reset and soft
+ power-up/power-down state machine to run (if [LN_RESET_USE_EYE] is
+ asserted). */
+ uint64_t rst_adpt_rst_sm : 1; /**< [ 21: 21](R/W) Set to one to reset the adaptation reset state machine; set to zero to allow the
+ adaptation reset state machine to run. Leave set to one to run adaptation
+ entirely under SW control through the GSERN()_LANE()_RX_7_BCFG[*_RST]
+ controls. Write to zero at the same time or after the desired [DO_*_ADPT]
+ controls are enabled to allow the reset state machine to initiate
+ adaptation. Note - for pausing and restarting adaptation associated with PCIe
+ rate changes and all power state transitions, the reset state machine should
+ control adaptation. */
+ uint64_t do_vga_adpt : 1; /**< [ 22: 22](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_dfe_adpt : 1; /**< [ 23: 23](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctle_adpt : 1; /**< [ 24: 24](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctlez_adpt : 1; /**< [ 25: 25](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_ctlelte_adpt : 1; /**< [ 26: 26](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_afeos_adpt : 1; /**< [ 27: 27](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_blwc_adpt : 1; /**< [ 28: 28](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t do_prevga_gn_adpt : 1; /**< [ 29: 29](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when [RST_ADPT_RST_SM] is deasserted. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t adpt_wait : 18; /**< [ 49: 32](R/W) Wait time for adaptation to complete. Set at least as long as the maximum of:
+ * GSERN()_LANE()_RX_5_BCFG[VGA_TIMER_MAX].
+ * GSERN()_LANE()_RX_5_BCFG[DFE_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLELTE_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLEZ_TIMER_MAX].
+ * GSERN()_LANE()_RX_6_BCFG[CTLE_TIMER_MAX].
+ * GSERN()_LANE()_RX_12_BCFG[AFEOS_TIMER_MAX].
+ * GSERN()_LANE()_RX_19_BCFG[BLWC_TIMER_MAX].
+ * GSERN()_LANE()_RX_23_BCFG[PREVGA_GN_TIMER_MAX].
+
+ The adaptation state machine will move on when all enabled adaptation operations
+ complete within the [ADPT_WAIT] count. If they do not complete within the wait
+ time, the state machine will move on when the counter expires. Set to the
+ desired value before or at the same time as setting [RST_ADPT_RST_SM] to zero. */
+ uint64_t reserved_50_53 : 4;
+ uint64_t adpt_trigger_wait : 4; /**< [ 57: 54](R/W) Wait time for after triggering adaptation before checking adaptation status. Set
+ to a minimum of 3. Set to the desired value before or at the same time as
+ setting [RST_ADPT_RST_SM] to zero. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst2_bcfg bdk_gsernx_lanex_rst2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000320ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST2_BCFG(a,b) bdk_gsernx_lanex_rst2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST2_BCFG(a,b) "GSERNX_LANEX_RST2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst_cnt1_bcfg
+ *
+ * GSER Lane Reset State Machine Delay Count Register 1
+ * Wait counts for the lane reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_lanex_rst_cnt1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst_cnt1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t cal_en_wait : 15; /**< [ 62: 48](R/W) Wait count in service clock cycles after calibration enable before deasserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. The service clock for the GSER PHY is connected to
+ the reference clock used by the primary chip clock PLLs. Typically service clock
+ is 100 MHz. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t pre_cal_en_wait : 12; /**< [ 43: 32](R/W) Wait count in service clock cycles after deasserting pwdn before asserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t pre_pll_sm_reset_wait : 9; /**< [ 24: 16](R/W) Wait count in service clock cycles after deasserting pwdn before
+ asserting calibration enable to the PLL. Set this field to one less than the
+ desired number of cycles of delay. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pre_pwup_wait : 13; /**< [ 12: 0](R/W) Wait count in service clock cycles after initial trigger before deasserting
+ power down to the PLL. The actual delay will be three cycles more than set
+ here. The common block PLL state machine will typically wait 2^12 cycles before
+ triggering the lane PLL to start. This field allows for staggering startup of
+ different lanes by up to about 80us. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_pwup_wait : 13; /**< [ 12: 0](R/W) Wait count in service clock cycles after initial trigger before deasserting
+ power down to the PLL. The actual delay will be three cycles more than set
+ here. The common block PLL state machine will typically wait 2^12 cycles before
+ triggering the lane PLL to start. This field allows for staggering startup of
+ different lanes by up to about 80us. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t pre_pll_sm_reset_wait : 9; /**< [ 24: 16](R/W) Wait count in service clock cycles after deasserting pwdn before
+ asserting calibration enable to the PLL. Set this field to one less than the
+ desired number of cycles of delay. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t pre_cal_en_wait : 12; /**< [ 43: 32](R/W) Wait count in service clock cycles after deasserting pwdn before asserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t cal_en_wait : 15; /**< [ 62: 48](R/W) Wait count in service clock cycles after calibration enable before deasserting
+ calibration enable to the PLL. Set this field to one less than the desired
+ number of cycles of delay. The service clock for the GSER PHY is connected to
+ the reference clock used by the primary chip clock PLLs. Typically service clock
+ is 100 MHz. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst_cnt1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst_cnt1_bcfg bdk_gsernx_lanex_rst_cnt1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000330ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST_CNT1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) bdk_gsernx_lanex_rst_cnt1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) "GSERNX_LANEX_RST_CNT1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST_CNT1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst_cnt2_bcfg
+ *
+ * GSER Lane Reset State Machine Delay Count Register 2
+ * Wait counts for the lane reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_lanex_rst_cnt2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst_cnt2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t rx_pre_qac_sel_wait : 9; /**< [ 56: 48](R/W) Wait count in service clock cycles after the deasserting reset to
+ the QAC filter logic before asserting select to the q and e pipe qac
+ filters. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t txrx_pre_pwup_wait : 14; /**< [ 45: 32](R/W) Wait count in service clock cycles after the lane PLL exits reset before
+ deasserting power down signals to the transmitter and receiver. Set this field
+ to three less than the desired number of cycles of delay. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t pre_pdiv_reset_wait : 13; /**< [ 28: 16](R/W) Reserved.
+ Internal:
+ The lane PLL no longer has a postdivider
+ reset. (This was the wait count in service clock cycles after
+ deasserting reset before deasserting reset to the PLL
+ postdivider. Set this field to one less than the desired number of
+ cycles of delay.) */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pre_pll_reset_wait : 12; /**< [ 11: 0](R/W) Wait count in service clock cycles after calibration enable deasserts
+ before deasserting reset to the PLL. Set this field to one less
+ than the desired number of cycles of delay. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_pll_reset_wait : 12; /**< [ 11: 0](R/W) Wait count in service clock cycles after calibration enable deasserts
+ before deasserting reset to the PLL. Set this field to one less
+ than the desired number of cycles of delay. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t pre_pdiv_reset_wait : 13; /**< [ 28: 16](R/W) Reserved.
+ Internal:
+ The lane PLL no longer has a postdivider
+ reset. (This was the wait count in service clock cycles after
+ deasserting reset before deasserting reset to the PLL
+ postdivider. Set this field to one less than the desired number of
+ cycles of delay.) */
+ uint64_t reserved_29_31 : 3;
+ uint64_t txrx_pre_pwup_wait : 14; /**< [ 45: 32](R/W) Wait count in service clock cycles after the lane PLL exits reset before
+ deasserting power down signals to the transmitter and receiver. Set this field
+ to three less than the desired number of cycles of delay. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t rx_pre_qac_sel_wait : 9; /**< [ 56: 48](R/W) Wait count in service clock cycles after the deasserting reset to
+ the QAC filter logic before asserting select to the q and e pipe qac
+ filters. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst_cnt2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst_cnt2_bcfg bdk_gsernx_lanex_rst_cnt2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000340ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST_CNT2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) bdk_gsernx_lanex_rst_cnt2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) "GSERNX_LANEX_RST_CNT2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST_CNT2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst_cnt3_bcfg
+ *
+ * GSER Lane Reset State Machine Delay Count Register 3
+ * Wait counts for the lane reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_lanex_rst_cnt3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst_cnt3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t rx_pre_run_wait : 11; /**< [ 58: 48](R/W) Wait count in service clock cycles after deasserting reset to the
+ baseline wander correction logic before indicating that the receiver
+ is ready. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t pre_rst_iq_wait : 9; /**< [ 40: 32](R/W) Wait count in service clock cycles after deasserting reset to the
+ receiver clock divider before deasserting reset to the i, q, and e
+ pipe interpolators. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t pre_tx_div_rst_wait : 9; /**< [ 24: 16](R/W) Wait count in service clock cycles after deasserting reset to the duty cycle
+ correctors in the transmitter before deasserting reset to the transmitter clock
+ divider. Set this field to one less than the desired number of cycles of
+ delay. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t pre_en_cdrfsm_wait : 9; /**< [ 8: 0](R/W) Wait count in service clock cycles after asserting power up to the
+ custom receiver before enabling the CDR finite state machine. Set
+ this field to one less than the desired number of cycles of delay. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_en_cdrfsm_wait : 9; /**< [ 8: 0](R/W) Wait count in service clock cycles after asserting power up to the
+ custom receiver before enabling the CDR finite state machine. Set
+ this field to one less than the desired number of cycles of delay. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t pre_tx_div_rst_wait : 9; /**< [ 24: 16](R/W) Wait count in service clock cycles after deasserting reset to the duty cycle
+ correctors in the transmitter before deasserting reset to the transmitter clock
+ divider. Set this field to one less than the desired number of cycles of
+ delay. */
+ uint64_t reserved_25_31 : 7;
+ uint64_t pre_rst_iq_wait : 9; /**< [ 40: 32](R/W) Wait count in service clock cycles after deasserting reset to the
+ receiver clock divider before deasserting reset to the i, q, and e
+ pipe interpolators. Set this field to one less than the desired
+ number of cycles of delay. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t rx_pre_run_wait : 11; /**< [ 58: 48](R/W) Wait count in service clock cycles after deasserting reset to the
+ baseline wander correction logic before indicating that the receiver
+ is ready. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst_cnt3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst_cnt3_bcfg bdk_gsernx_lanex_rst_cnt3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000350ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST_CNT3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) bdk_gsernx_lanex_rst_cnt3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) "GSERNX_LANEX_RST_CNT3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST_CNT3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst_cnt4_bcfg
+ *
+ * GSER Lane Reset State Machine Delay Count Register 4
+ * Wait counts for the lane reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_lanex_rst_cnt4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst_cnt4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t svc_clk_freq : 1; /**< [ 56: 56](R/W) For diagnostic use only.
+ Internal:
+ This bit reserved for future enhancements. The RTL to use it is not coded. Freq selection
+ for service clock as used in the reset state machine. 0 = 100 MHz. 1 = 156.25 MHz. This
+ scales only the wait counts not set via CSR registers. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t blwc_reset_wait : 18; /**< [ 49: 32](R/W) Wait count in service clock cycles after deasserting reset to the
+ CDR FSM before deasserting reset to the baseline wander correction
+ circuit (BLWC). The power-up document specifies this as 16 service
+ clock cycles, but verbal communication says that's only correct for
+ cases of small frequency offset between the lane PLL and the
+ received data stream clock, i.e., it doesn't apply for SSC (except
+ PCIe). Since the actual requirement is not specified, this field
+ allows for the full range of the counter in the receiver reset state
+ machine. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t dfe_afe_oscal_wait : 20; /**< [ 19: 0](R/W) Maximum wait count in service clock cycles after triggering the dfe
+ and afe offset calibration sequences before deasserting
+ reset_voter. Normally the receiver reset state machine will move on
+ when DFE and AFE offset calibration is complete. This is a time-out
+ parameter in case the offset calibration state machines do not
+ complete. Set this field to one less than the desired number of
+ cycles of delay. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_afe_oscal_wait : 20; /**< [ 19: 0](R/W) Maximum wait count in service clock cycles after triggering the dfe
+ and afe offset calibration sequences before deasserting
+ reset_voter. Normally the receiver reset state machine will move on
+ when DFE and AFE offset calibration is complete. This is a time-out
+ parameter in case the offset calibration state machines do not
+ complete. Set this field to one less than the desired number of
+ cycles of delay. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t blwc_reset_wait : 18; /**< [ 49: 32](R/W) Wait count in service clock cycles after deasserting reset to the
+ CDR FSM before deasserting reset to the baseline wander correction
+ circuit (BLWC). The power-up document specifies this as 16 service
+ clock cycles, but verbal communication says that's only correct for
+ cases of small frequency offset between the lane PLL and the
+ received data stream clock, i.e., it doesn't apply for SSC (except
+ PCIe). Since the actual requirement is not specified, this field
+ allows for the full range of the counter in the receiver reset state
+ machine. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t svc_clk_freq : 1; /**< [ 56: 56](R/W) For diagnostic use only.
+ Internal:
+ This bit reserved for future enhancements. The RTL to use it is not coded. Freq selection
+ for service clock as used in the reset state machine. 0 = 100 MHz. 1 = 156.25 MHz. This
+ scales only the wait counts not set via CSR registers. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst_cnt4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst_cnt4_bcfg bdk_gsernx_lanex_rst_cnt4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000360ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST_CNT4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) bdk_gsernx_lanex_rst_cnt4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) "GSERNX_LANEX_RST_CNT4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST_CNT4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rst_cnt5_bcfg
+ *
+ * GSER Lane Reset State Machine Delay Count Register 4
+ * Wait counts for the lane reset state machines. All fields must be set
+ * before bringing the lane out of reset.
+ */
+union bdk_gsernx_lanex_rst_cnt5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rst_cnt5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t idle_exit_wait_en : 1; /**< [ 32: 32](R/W) Enable use of [IDLE_EXIT_WAIT] as a limit on the wait time for the receiver
+ electrical idle indicator to deassert after resetting the voter. When
+ [IDLE_EXIT_WAIT_EN] is low, the state machine will wait forever for the
+ electrical idle signal to deassert. Note that the reset state machine will not
+ see idle deassert until after the first idle offset calibration has completed
+ after exiting reset. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t idle_exit_wait : 28; /**< [ 27: 0](R/W) Maximum wait count in service clock cycles for the receiver electrical idle
+ indicator to deassert after resetting the voter. If the receiver electrical idle
+ indication remains asserted, the reset state machine will move on after this
+ count expires. */
+#else /* Word 0 - Little Endian */
+ uint64_t idle_exit_wait : 28; /**< [ 27: 0](R/W) Maximum wait count in service clock cycles for the receiver electrical idle
+ indicator to deassert after resetting the voter. If the receiver electrical idle
+ indication remains asserted, the reset state machine will move on after this
+ count expires. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t idle_exit_wait_en : 1; /**< [ 32: 32](R/W) Enable use of [IDLE_EXIT_WAIT] as a limit on the wait time for the receiver
+ electrical idle indicator to deassert after resetting the voter. When
+ [IDLE_EXIT_WAIT_EN] is low, the state machine will wait forever for the
+ electrical idle signal to deassert. Note that the reset state machine will not
+ see idle deassert until after the first idle offset calibration has completed
+ after exiting reset. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rst_cnt5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rst_cnt5_bcfg bdk_gsernx_lanex_rst_cnt5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RST_CNT5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000370ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RST_CNT5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) bdk_gsernx_lanex_rst_cnt5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) "GSERNX_LANEX_RST_CNT5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RST_CNT5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rstclkmsk_bcfg
+ *
+ * GSER Lane Reset State Machine Transmit Clock Alignment Register
+ * Controls for transmit alignment of lanes within a link requiring aligned transmit
+ * data.
+ */
+union bdk_gsernx_lanex_rstclkmsk_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rstclkmsk_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_44_63 : 20;
+ uint64_t txdivrst_algn_qlm_mask : 4; /**< [ 43: 40](R/W) Selection control for which QLMs in this QLM's link group to align in timing the
+ deassertion of reset to this lane's transmitter's clock divider.
+ \<0\> = Wait for QLM 0.
+ \<1\> = Wait for QLM 1.
+ \<2\> = Wait for QLM 2.
+ \<3\> = Wait for QLM 3.
+
+ The bit corresponding to the current QLM is ignored. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t txdivrst_algn_lane_mask : 4;/**< [ 35: 32](R/W) Selection control for which lanes in the current QLM to align in timing the
+ deassertion of reset to this lane's transmitter's clock divider.
+ \<0\> = Wait for lane 0.
+ \<1\> = Wait for lane 1.
+ \<2\> = Wait for lane 2.
+ \<3\> = Wait for lane 3.
+
+ The bit corresponding to the current Lane is ignored. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t txdivrst_algn_wait_en : 1; /**< [ 20: 20](R/W) Enable use of [TXDIVRST_ALGN_WAIT] as a time out waiting for other lanes to be
+ ready to start their divided transmit clocks. With this bit cleared the lane
+ will wait indefinitely. */
+ uint64_t txdivrst_algn_wait : 20; /**< [ 19: 0](R/W) Maximum wait count in service clock cycles, after this lane is ready to start
+ its divided transmit clock, for other lanes in the link to be ready to start
+ their divided transmit clocks. This is the maximum wait time, after which the
+ state machine will move on, whether the other lanes have indicated ready or not. */
+#else /* Word 0 - Little Endian */
+ uint64_t txdivrst_algn_wait : 20; /**< [ 19: 0](R/W) Maximum wait count in service clock cycles, after this lane is ready to start
+ its divided transmit clock, for other lanes in the link to be ready to start
+ their divided transmit clocks. This is the maximum wait time, after which the
+ state machine will move on, whether the other lanes have indicated ready or not. */
+ uint64_t txdivrst_algn_wait_en : 1; /**< [ 20: 20](R/W) Enable use of [TXDIVRST_ALGN_WAIT] as a time out waiting for other lanes to be
+ ready to start their divided transmit clocks. With this bit cleared the lane
+ will wait indefinitely. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t txdivrst_algn_lane_mask : 4;/**< [ 35: 32](R/W) Selection control for which lanes in the current QLM to align in timing the
+ deassertion of reset to this lane's transmitter's clock divider.
+ \<0\> = Wait for lane 0.
+ \<1\> = Wait for lane 1.
+ \<2\> = Wait for lane 2.
+ \<3\> = Wait for lane 3.
+
+ The bit corresponding to the current Lane is ignored. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t txdivrst_algn_qlm_mask : 4; /**< [ 43: 40](R/W) Selection control for which QLMs in this QLM's link group to align in timing the
+ deassertion of reset to this lane's transmitter's clock divider.
+ \<0\> = Wait for QLM 0.
+ \<1\> = Wait for QLM 1.
+ \<2\> = Wait for QLM 2.
+ \<3\> = Wait for QLM 3.
+
+ The bit corresponding to the current QLM is ignored. */
+ uint64_t reserved_44_63 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rstclkmsk_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rstclkmsk_bcfg bdk_gsernx_lanex_rstclkmsk_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000470ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RSTCLKMSK_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) bdk_gsernx_lanex_rstclkmsk_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) "GSERNX_LANEX_RSTCLKMSK_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RSTCLKMSK_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_0_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 0
+ * Register controls for postcursor overrides from c2 through c9. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t c9_ovrd_en : 1; /**< [ 62: 62](R/W) Enable use of [C9_OVRD]. */
+ uint64_t c9_ovrd : 6; /**< [ 61: 56](R/W) 9th postcursor override value. */
+ uint64_t reserved_55 : 1;
+ uint64_t c8_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [C8_OVRD]. */
+ uint64_t c8_ovrd : 6; /**< [ 53: 48](R/W) 8th postcursor override value. */
+ uint64_t reserved_47 : 1;
+ uint64_t c7_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C7_OVRD]. */
+ uint64_t c7_ovrd : 6; /**< [ 45: 40](R/W) 7th postcursor override value. */
+ uint64_t reserved_39 : 1;
+ uint64_t c6_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C6_OVRD]. */
+ uint64_t c6_ovrd : 6; /**< [ 37: 32](R/W) 6th postcursor override value. */
+ uint64_t reserved_31 : 1;
+ uint64_t c5_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C5_OVRD]. */
+ uint64_t c5_ovrd : 6; /**< [ 29: 24](R/W) 5th postcursor override value. */
+ uint64_t reserved_23 : 1;
+ uint64_t c4_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C4_OVRD]. */
+ uint64_t c4_ovrd : 6; /**< [ 21: 16](R/W) 4th postcursor value override. */
+ uint64_t reserved_15 : 1;
+ uint64_t c3_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C3_OVRD]. */
+ uint64_t c3_ovrd : 6; /**< [ 13: 8](R/W) 3rd postcursor override value. */
+ uint64_t reserved_7 : 1;
+ uint64_t c2_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C2_OVRD]. */
+ uint64_t c2_ovrd : 6; /**< [ 5: 0](R/W) Second postcursor override value. */
+#else /* Word 0 - Little Endian */
+ uint64_t c2_ovrd : 6; /**< [ 5: 0](R/W) Second postcursor override value. */
+ uint64_t c2_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C2_OVRD]. */
+ uint64_t reserved_7 : 1;
+ uint64_t c3_ovrd : 6; /**< [ 13: 8](R/W) 3rd postcursor override value. */
+ uint64_t c3_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C3_OVRD]. */
+ uint64_t reserved_15 : 1;
+ uint64_t c4_ovrd : 6; /**< [ 21: 16](R/W) 4th postcursor value override. */
+ uint64_t c4_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C4_OVRD]. */
+ uint64_t reserved_23 : 1;
+ uint64_t c5_ovrd : 6; /**< [ 29: 24](R/W) 5th postcursor override value. */
+ uint64_t c5_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C5_OVRD]. */
+ uint64_t reserved_31 : 1;
+ uint64_t c6_ovrd : 6; /**< [ 37: 32](R/W) 6th postcursor override value. */
+ uint64_t c6_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C6_OVRD]. */
+ uint64_t reserved_39 : 1;
+ uint64_t c7_ovrd : 6; /**< [ 45: 40](R/W) 7th postcursor override value. */
+ uint64_t c7_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C7_OVRD]. */
+ uint64_t reserved_47 : 1;
+ uint64_t c8_ovrd : 6; /**< [ 53: 48](R/W) 8th postcursor override value. */
+ uint64_t c8_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [C8_OVRD]. */
+ uint64_t reserved_55 : 1;
+ uint64_t c9_ovrd : 6; /**< [ 61: 56](R/W) 9th postcursor override value. */
+ uint64_t c9_ovrd_en : 1; /**< [ 62: 62](R/W) Enable use of [C9_OVRD]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_0_bcfg bdk_gsernx_lanex_rx_0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_0_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_0_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000c60ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_0_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) bdk_gsernx_lanex_rx_0_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) "GSERNX_LANEX_RX_0_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_0_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_0_bsts
+ *
+ * GSER Lane RX Base Status Register 0
+ * Status registers for postcursor values (either calibration results or
+ * overrides) from c2 through c9. Values in this register are only valid if
+ * GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted (indicating DFE adaptation has
+ * completed), or if the corresponding CSR override enable is asserted.
+ */
+union bdk_gsernx_lanex_rx_0_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_0_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t c9 : 6; /**< [ 61: 56](RO/H) 9th postcursor value. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t c8 : 6; /**< [ 53: 48](RO/H) 8th postcursor value. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t c7 : 6; /**< [ 45: 40](RO/H) 7th postcursor value. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c6 : 6; /**< [ 37: 32](RO/H) 6th postcursor value. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c5 : 6; /**< [ 29: 24](RO/H) 5th postcursor value. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c4 : 6; /**< [ 21: 16](RO/H) 4th postcursor value. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c3 : 6; /**< [ 13: 8](RO/H) 3rd postcursor value. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c2 : 6; /**< [ 5: 0](RO/H) 2nd postcursor value. */
+#else /* Word 0 - Little Endian */
+ uint64_t c2 : 6; /**< [ 5: 0](RO/H) 2nd postcursor value. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c3 : 6; /**< [ 13: 8](RO/H) 3rd postcursor value. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c4 : 6; /**< [ 21: 16](RO/H) 4th postcursor value. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c5 : 6; /**< [ 29: 24](RO/H) 5th postcursor value. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c6 : 6; /**< [ 37: 32](RO/H) 6th postcursor value. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c7 : 6; /**< [ 45: 40](RO/H) 7th postcursor value. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t c8 : 6; /**< [ 53: 48](RO/H) 8th postcursor value. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t c9 : 6; /**< [ 61: 56](RO/H) 9th postcursor value. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_0_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_0_bsts bdk_gsernx_lanex_rx_0_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_0_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_0_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001650ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_0_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) bdk_gsernx_lanex_rx_0_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) "GSERNX_LANEX_RX_0_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_0_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_10_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 10
+ * Configuration registers for LMS adaptation. Deadband increment settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_10_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_10_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ctlelte_deadband_inc : 12; /**< [ 59: 48](R/W) CTLELTE adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t ctlez_deadband_inc : 12; /**< [ 47: 36](R/W) CTLEZ adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t ctle_deadband_inc : 12; /**< [ 35: 24](R/W) CTLE adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t dfe_deadband_inc : 12; /**< [ 23: 12](R/W) Coeff adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t vga_deadband_inc : 12; /**< [ 11: 0](R/W) VGA adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_deadband_inc : 12; /**< [ 11: 0](R/W) VGA adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t dfe_deadband_inc : 12; /**< [ 23: 12](R/W) Coeff adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t ctle_deadband_inc : 12; /**< [ 35: 24](R/W) CTLE adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t ctlez_deadband_inc : 12; /**< [ 47: 36](R/W) CTLEZ adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t ctlelte_deadband_inc : 12; /**< [ 59: 48](R/W) CTLELTE adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_10_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_10_bcfg bdk_gsernx_lanex_rx_10_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_10_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_10_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d00ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_10_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) bdk_gsernx_lanex_rx_10_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) "GSERNX_LANEX_RX_10_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_10_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_10_bsts
+ *
+ * GSER Lane RX Base Status Register 10
+ * Status registers for BLWC LMS adaptation. Current BLWC Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_10_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_10_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t blwc_subrate_now : 16; /**< [ 63: 48](RO/H) BLWC subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t blwc_upv_count : 16; /**< [ 43: 28](RO/H) BLWC up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t blwc_adapt_status : 1; /**< [ 27: 27](RO/H) BLWC adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t blwc_adapt_count : 15; /**< [ 26: 12](RO/H) BLWC adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t blwc_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of BLWC adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t blwc_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of BLWC adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t blwc_adapt_count : 15; /**< [ 26: 12](RO/H) BLWC adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t blwc_adapt_status : 1; /**< [ 27: 27](RO/H) BLWC adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t blwc_upv_count : 16; /**< [ 43: 28](RO/H) BLWC up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t blwc_subrate_now : 16; /**< [ 63: 48](RO/H) BLWC subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS] is clear. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_10_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_10_bsts bdk_gsernx_lanex_rx_10_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_10_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_10_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_10_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) bdk_gsernx_lanex_rx_10_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) "GSERNX_LANEX_RX_10_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_10_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_11_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 11
+ * Configuration registers for Offset Compensation.
+ */
+union bdk_gsernx_lanex_rx_11_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_11_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t afe_oscomp_delay : 8; /**< [ 15: 8](R/W) Start delay for the AFE offset compensation, after DFE offset
+ compensation completes. */
+ uint64_t dfe_oscomp_delay : 8; /**< [ 7: 0](R/W) Start delay for the DFE offset compensation. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_oscomp_delay : 8; /**< [ 7: 0](R/W) Start delay for the DFE offset compensation. */
+ uint64_t afe_oscomp_delay : 8; /**< [ 15: 8](R/W) Start delay for the AFE offset compensation, after DFE offset
+ compensation completes. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_11_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_11_bcfg bdk_gsernx_lanex_rx_11_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_11_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_11_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_11_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) bdk_gsernx_lanex_rx_11_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) "GSERNX_LANEX_RX_11_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_11_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_11_bsts
+ *
+ * GSER Lane RX Base Status Register 11
+ * Status registers for PREVGA_GN LMS adaptation. Current PREVGA_GN Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_11_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_11_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t prevga_gn_subrate_now : 16; /**< [ 63: 48](RO/H) PREVGA_GN subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t prevga_gn_upv_count : 16; /**< [ 43: 28](RO/H) PREVGA_GN up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t prevga_gn_adapt_status : 1; /**< [ 27: 27](RO/H) PREVGA_GN adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t prevga_gn_adapt_count : 15; /**< [ 26: 12](RO/H) PREVGA_GN adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t prevga_gn_deadband_now : 12;/**< [ 11: 0](RO/H) Current 12-bit integer value of PREVGA_GN adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t prevga_gn_deadband_now : 12;/**< [ 11: 0](RO/H) Current 12-bit integer value of PREVGA_GN adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t prevga_gn_adapt_count : 15; /**< [ 26: 12](RO/H) PREVGA_GN adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t prevga_gn_adapt_status : 1; /**< [ 27: 27](RO/H) PREVGA_GN adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t prevga_gn_upv_count : 16; /**< [ 43: 28](RO/H) PREVGA_GN up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t prevga_gn_subrate_now : 16; /**< [ 63: 48](RO/H) PREVGA_GN subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is clear. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_11_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_11_bsts bdk_gsernx_lanex_rx_11_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_11_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_11_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001700ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_11_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) bdk_gsernx_lanex_rx_11_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) "GSERNX_LANEX_RX_11_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_11_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_12_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 12
+ * Configuration registers for AFE Offset Adaptation.
+ */
+union bdk_gsernx_lanex_rx_12_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_12_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t afeos_leak_sgn : 1; /**< [ 51: 51](R/W) AFEOS leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t afeos_deadband : 12; /**< [ 50: 39](R/W) AFE OS adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t afeos_deadband_inc : 12; /**< [ 38: 27](R/W) AFE OS adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t afeos_leak : 3; /**< [ 26: 24](R/W) AFEOS adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t afeos_mu : 3; /**< [ 18: 16](R/W) AFEOS adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_15 : 1;
+ uint64_t afeos_timer_max : 15; /**< [ 14: 0](R/W) AFEOS adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t afeos_timer_max : 15; /**< [ 14: 0](R/W) AFEOS adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t reserved_15 : 1;
+ uint64_t afeos_mu : 3; /**< [ 18: 16](R/W) AFEOS adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t afeos_leak : 3; /**< [ 26: 24](R/W) AFEOS adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t afeos_deadband_inc : 12; /**< [ 38: 27](R/W) AFE OS adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t afeos_deadband : 12; /**< [ 50: 39](R/W) AFE OS adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t afeos_leak_sgn : 1; /**< [ 51: 51](R/W) AFEOS leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_12_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_12_bcfg bdk_gsernx_lanex_rx_12_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_12_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_12_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d20ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_12_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) bdk_gsernx_lanex_rx_12_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) "GSERNX_LANEX_RX_12_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_12_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_13_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 13
+ * Configuration registers for AFE LMS adaptation
+ * Adaptation controls for Subrate parameters.
+ */
+union bdk_gsernx_lanex_rx_13_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_13_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t afeos_subrate_scale : 3; /**< [ 34: 32](R/W) AFE subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t afeos_subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the starting value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t afeos_subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t afeos_subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t afeos_subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the starting value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t afeos_subrate_scale : 3; /**< [ 34: 32](R/W) AFE subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_13_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_13_bcfg bdk_gsernx_lanex_rx_13_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_13_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_13_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_13_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) bdk_gsernx_lanex_rx_13_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) "GSERNX_LANEX_RX_13_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_13_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_14_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 14
+ * This register configures LMS adaptation.
+ */
+union bdk_gsernx_lanex_rx_14_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_14_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_44_63 : 20;
+ uint64_t c6_c15_limit_hi : 6; /**< [ 43: 38](R/W) C6 to C15 postcursor limit high. */
+ uint64_t c6_c15_limit_lo : 6; /**< [ 37: 32](R/W) C6 to C15 postcursor limit low. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t dfe_c1_deadband : 12; /**< [ 23: 12](R/W) DFE C1 adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t dfe_c1_deadband_inc : 12; /**< [ 11: 0](R/W) DFE C1 adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_c1_deadband_inc : 12; /**< [ 11: 0](R/W) DFE C1 adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t dfe_c1_deadband : 12; /**< [ 23: 12](R/W) DFE C1 adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t c6_c15_limit_lo : 6; /**< [ 37: 32](R/W) C6 to C15 postcursor limit low. */
+ uint64_t c6_c15_limit_hi : 6; /**< [ 43: 38](R/W) C6 to C15 postcursor limit high. */
+ uint64_t reserved_44_63 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_14_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_14_bcfg bdk_gsernx_lanex_rx_14_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_14_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_14_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d40ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_14_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) bdk_gsernx_lanex_rx_14_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) "GSERNX_LANEX_RX_14_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_14_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_15_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 15
+ * This register configures LMS adaptation.
+ */
+union bdk_gsernx_lanex_rx_15_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_15_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+#else /* Word 0 - Little Endian */
+ uint64_t c1_limit_lo : 6; /**< [ 5: 0](R/W) C1 postcursor limit low. */
+ uint64_t c2_limit_lo : 6; /**< [ 11: 6](R/W) C2 postcursor limit low. */
+ uint64_t c3_limit_lo : 6; /**< [ 17: 12](R/W) C3 postcursor limit low. */
+ uint64_t c4_limit_lo : 6; /**< [ 23: 18](R/W) C4 postcursor limit low. */
+ uint64_t c5_limit_lo : 6; /**< [ 29: 24](R/W) C5 postcursor limit low. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c1_limit_hi : 6; /**< [ 37: 32](R/W) C1 postcursor limit high. */
+ uint64_t c2_limit_hi : 6; /**< [ 43: 38](R/W) C2 postcursor limit high. */
+ uint64_t c3_limit_hi : 6; /**< [ 49: 44](R/W) C3 postcursor limit high. */
+ uint64_t c4_limit_hi : 6; /**< [ 55: 50](R/W) C4 postcursor limit high. */
+ uint64_t c5_limit_hi : 6; /**< [ 61: 56](R/W) C5 postcursor limit high. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_15_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_15_bcfg bdk_gsernx_lanex_rx_15_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_15_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_15_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d50ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_15_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) bdk_gsernx_lanex_rx_15_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) "GSERNX_LANEX_RX_15_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_15_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_16_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 16
+ * Override registers for LMS adaptation. Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_16_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_16_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t ctlez_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [CTLEZ_DEADBAND_NOW_OVRD]. */
+ uint64_t ctlez_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) CTLEZ adaptation deadband now override. */
+ uint64_t ctle_deadband_now_ovrd_en : 1;/**< [ 38: 38](R/W) Enable use of [CTLE_DEADBAND_NOW_OVRD]. */
+ uint64_t ctle_deadband_now_ovrd : 12;/**< [ 37: 26](R/W) CTLE adaptation deadband now override. */
+ uint64_t dfe_deadband_now_ovrd_en : 1;/**< [ 25: 25](R/W) Enable use of [DFE_DEADBAND_NOW_OVRD]. */
+ uint64_t dfe_deadband_now_ovrd : 12; /**< [ 24: 13](R/W) Coeff Adaptation deadband now override. */
+ uint64_t vga_deadband_now_ovrd_en : 1;/**< [ 12: 12](R/W) Enable use of [VGA_DEADBAND_NOW_OVRD]. */
+ uint64_t vga_deadband_now_ovrd : 12; /**< [ 11: 0](R/W) VGA adaptation deadband now override. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_deadband_now_ovrd : 12; /**< [ 11: 0](R/W) VGA adaptation deadband now override. */
+ uint64_t vga_deadband_now_ovrd_en : 1;/**< [ 12: 12](R/W) Enable use of [VGA_DEADBAND_NOW_OVRD]. */
+ uint64_t dfe_deadband_now_ovrd : 12; /**< [ 24: 13](R/W) Coeff Adaptation deadband now override. */
+ uint64_t dfe_deadband_now_ovrd_en : 1;/**< [ 25: 25](R/W) Enable use of [DFE_DEADBAND_NOW_OVRD]. */
+ uint64_t ctle_deadband_now_ovrd : 12;/**< [ 37: 26](R/W) CTLE adaptation deadband now override. */
+ uint64_t ctle_deadband_now_ovrd_en : 1;/**< [ 38: 38](R/W) Enable use of [CTLE_DEADBAND_NOW_OVRD]. */
+ uint64_t ctlez_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) CTLEZ adaptation deadband now override. */
+ uint64_t ctlez_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [CTLEZ_DEADBAND_NOW_OVRD]. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_16_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_16_bcfg bdk_gsernx_lanex_rx_16_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_16_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_16_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d60ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_16_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) bdk_gsernx_lanex_rx_16_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) "GSERNX_LANEX_RX_16_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_16_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_17_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 17
+ * Override registers for LMS adaptation. Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_17_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_17_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t blwc_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [BLWC_DEADBAND_NOW_OVRD]. */
+ uint64_t blwc_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) BLWC adaptation deadband now override. */
+ uint64_t dfe_c1_deadband_now_ovrd_en : 1;/**< [ 38: 38](R/W) Enable use of [DFE_C1_DEADBAND_NOW_OVRD]. */
+ uint64_t dfe_c1_deadband_now_ovrd : 12;/**< [ 37: 26](R/W) DFE C1 Adaptation deadband now override. */
+ uint64_t afeos_deadband_now_ovrd_en : 1;/**< [ 25: 25](R/W) Enable use of [AFEOS_DEADBAND_NOW_OVRD]. */
+ uint64_t afeos_deadband_now_ovrd : 12;/**< [ 24: 13](R/W) AFE OS adaptation deadband now override. */
+ uint64_t ctlelte_deadband_now_ovrd_en : 1;/**< [ 12: 12](R/W) Enable use of [CTLELTE_DEADBAND_NOW_OVRD]. */
+ uint64_t ctlelte_deadband_now_ovrd : 12;/**< [ 11: 0](R/W) CTLELTE adaptation deadband now override. */
+#else /* Word 0 - Little Endian */
+ uint64_t ctlelte_deadband_now_ovrd : 12;/**< [ 11: 0](R/W) CTLELTE adaptation deadband now override. */
+ uint64_t ctlelte_deadband_now_ovrd_en : 1;/**< [ 12: 12](R/W) Enable use of [CTLELTE_DEADBAND_NOW_OVRD]. */
+ uint64_t afeos_deadband_now_ovrd : 12;/**< [ 24: 13](R/W) AFE OS adaptation deadband now override. */
+ uint64_t afeos_deadband_now_ovrd_en : 1;/**< [ 25: 25](R/W) Enable use of [AFEOS_DEADBAND_NOW_OVRD]. */
+ uint64_t dfe_c1_deadband_now_ovrd : 12;/**< [ 37: 26](R/W) DFE C1 Adaptation deadband now override. */
+ uint64_t dfe_c1_deadband_now_ovrd_en : 1;/**< [ 38: 38](R/W) Enable use of [DFE_C1_DEADBAND_NOW_OVRD]. */
+ uint64_t blwc_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) BLWC adaptation deadband now override. */
+ uint64_t blwc_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [BLWC_DEADBAND_NOW_OVRD]. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_17_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_17_bcfg bdk_gsernx_lanex_rx_17_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_17_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_17_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d70ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_17_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) bdk_gsernx_lanex_rx_17_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) "GSERNX_LANEX_RX_17_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_17_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_18_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 18
+ * Override registers for LMS adaptation. Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_18_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_18_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t blwc_subrate_now_ovrd_en : 1;/**< [ 50: 50](R/W) Enable use of [BLWC_SUBRATE_NOW_OVRD]. */
+ uint64_t afeos_subrate_now_ovrd_en : 1;/**< [ 49: 49](R/W) Enable use of [AFEOS_SUBRATE_NOW_OVRD]. */
+ uint64_t subrate_now_ovrd_en : 1; /**< [ 48: 48](R/W) Enable use of [SUBRATE_NOW_OVRD]. */
+ uint64_t blwc_subrate_now_ovrd : 16; /**< [ 47: 32](R/W) BLWC Subrate_Now counter override value. */
+ uint64_t afeos_subrate_now_ovrd : 16;/**< [ 31: 16](R/W) AFEOS Subrate_Now counter override value. */
+ uint64_t subrate_now_ovrd : 16; /**< [ 15: 0](R/W) Subrate_Now counter override value. */
+#else /* Word 0 - Little Endian */
+ uint64_t subrate_now_ovrd : 16; /**< [ 15: 0](R/W) Subrate_Now counter override value. */
+ uint64_t afeos_subrate_now_ovrd : 16;/**< [ 31: 16](R/W) AFEOS Subrate_Now counter override value. */
+ uint64_t blwc_subrate_now_ovrd : 16; /**< [ 47: 32](R/W) BLWC Subrate_Now counter override value. */
+ uint64_t subrate_now_ovrd_en : 1; /**< [ 48: 48](R/W) Enable use of [SUBRATE_NOW_OVRD]. */
+ uint64_t afeos_subrate_now_ovrd_en : 1;/**< [ 49: 49](R/W) Enable use of [AFEOS_SUBRATE_NOW_OVRD]. */
+ uint64_t blwc_subrate_now_ovrd_en : 1;/**< [ 50: 50](R/W) Enable use of [BLWC_SUBRATE_NOW_OVRD]. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_18_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_18_bcfg bdk_gsernx_lanex_rx_18_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_18_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_18_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d80ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_18_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) bdk_gsernx_lanex_rx_18_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) "GSERNX_LANEX_RX_18_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_18_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_19_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 19
+ * Configuration registers for AFE Offset Adaptation.
+ */
+union bdk_gsernx_lanex_rx_19_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_19_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t blwc_leak_sgn : 1; /**< [ 56: 56](R/W) BLWC leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t blwc_updn_len : 5; /**< [ 55: 51](R/W) Accumulation length for BLWC drift up/down control. Range is 1 to 20. */
+ uint64_t blwc_deadband : 12; /**< [ 50: 39](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t blwc_deadband_inc : 12; /**< [ 38: 27](R/W) BLWC adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t blwc_leak : 3; /**< [ 26: 24](R/W) BLWC adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t blwc_mu : 3; /**< [ 18: 16](R/W) BLWC adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_15 : 1;
+ uint64_t blwc_timer_max : 15; /**< [ 14: 0](R/W) BLWC adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t blwc_timer_max : 15; /**< [ 14: 0](R/W) BLWC adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t reserved_15 : 1;
+ uint64_t blwc_mu : 3; /**< [ 18: 16](R/W) BLWC adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t blwc_leak : 3; /**< [ 26: 24](R/W) BLWC adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t blwc_deadband_inc : 12; /**< [ 38: 27](R/W) BLWC adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t blwc_deadband : 12; /**< [ 50: 39](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t blwc_updn_len : 5; /**< [ 55: 51](R/W) Accumulation length for BLWC drift up/down control. Range is 1 to 20. */
+ uint64_t blwc_leak_sgn : 1; /**< [ 56: 56](R/W) BLWC leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_19_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_19_bcfg bdk_gsernx_lanex_rx_19_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_19_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_19_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000d90ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_19_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) bdk_gsernx_lanex_rx_19_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) "GSERNX_LANEX_RX_19_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_19_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_1_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 1
+ * Register controls for postcursor overrides from c10 through c15, and BLWC gain.
+ * Each override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t prevga_gn_ovrd_en : 1; /**< [ 56: 56](R/W) Enable use of [PREVGA_GN_OVRD]. */
+ uint64_t prevga_gn_ovrd : 3; /**< [ 55: 53](R/W) PREVGA_GN gain value override. */
+ uint64_t blwc_ovrd_en : 1; /**< [ 52: 52](R/W) Enable use of [BLWC_OVRD]. */
+ uint64_t blwc_ovrd : 5; /**< [ 51: 47](R/W) BLWC gain value override. */
+ uint64_t c15_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C15_OVRD]. */
+ uint64_t c15_ovrd : 6; /**< [ 45: 40](R/W) 15th postcursor value override. */
+ uint64_t reserved_39 : 1;
+ uint64_t c14_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C14_OVRD]. */
+ uint64_t c14_ovrd : 6; /**< [ 37: 32](R/W) 14th postcursor value override. */
+ uint64_t reserved_31 : 1;
+ uint64_t c13_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C13_OVRD]. */
+ uint64_t c13_ovrd : 6; /**< [ 29: 24](R/W) 13th postcursor value override. */
+ uint64_t reserved_23 : 1;
+ uint64_t c12_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C12_OVRD]. */
+ uint64_t c12_ovrd : 6; /**< [ 21: 16](R/W) 12th postcursor value override. */
+ uint64_t reserved_15 : 1;
+ uint64_t c11_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C11_OVRD]. */
+ uint64_t c11_ovrd : 6; /**< [ 13: 8](R/W) 11th postcursor value override. */
+ uint64_t reserved_7 : 1;
+ uint64_t c10_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C10_OVRD]. */
+ uint64_t c10_ovrd : 6; /**< [ 5: 0](R/W) 10th postcursor value override. */
+#else /* Word 0 - Little Endian */
+ uint64_t c10_ovrd : 6; /**< [ 5: 0](R/W) 10th postcursor value override. */
+ uint64_t c10_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C10_OVRD]. */
+ uint64_t reserved_7 : 1;
+ uint64_t c11_ovrd : 6; /**< [ 13: 8](R/W) 11th postcursor value override. */
+ uint64_t c11_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C11_OVRD]. */
+ uint64_t reserved_15 : 1;
+ uint64_t c12_ovrd : 6; /**< [ 21: 16](R/W) 12th postcursor value override. */
+ uint64_t c12_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C12_OVRD]. */
+ uint64_t reserved_23 : 1;
+ uint64_t c13_ovrd : 6; /**< [ 29: 24](R/W) 13th postcursor value override. */
+ uint64_t c13_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C13_OVRD]. */
+ uint64_t reserved_31 : 1;
+ uint64_t c14_ovrd : 6; /**< [ 37: 32](R/W) 14th postcursor value override. */
+ uint64_t c14_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C14_OVRD]. */
+ uint64_t reserved_39 : 1;
+ uint64_t c15_ovrd : 6; /**< [ 45: 40](R/W) 15th postcursor value override. */
+ uint64_t c15_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C15_OVRD]. */
+ uint64_t blwc_ovrd : 5; /**< [ 51: 47](R/W) BLWC gain value override. */
+ uint64_t blwc_ovrd_en : 1; /**< [ 52: 52](R/W) Enable use of [BLWC_OVRD]. */
+ uint64_t prevga_gn_ovrd : 3; /**< [ 55: 53](R/W) PREVGA_GN gain value override. */
+ uint64_t prevga_gn_ovrd_en : 1; /**< [ 56: 56](R/W) Enable use of [PREVGA_GN_OVRD]. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_1_bcfg bdk_gsernx_lanex_rx_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000c70ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) bdk_gsernx_lanex_rx_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) "GSERNX_LANEX_RX_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_1_bsts
+ *
+ * GSER Lane RX Base Status Register 1
+ * Status registers for postcursor values (either calibration results or
+ * overrides) from c10 through c15. Values in this register are only valid
+ * if GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted (indicating DFE adaptation
+ * has completed), or if the corresponding CSR override enable is asserted.
+ */
+union bdk_gsernx_lanex_rx_1_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_1_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_46_63 : 18;
+ uint64_t c15 : 6; /**< [ 45: 40](RO/H) 15th postcursor value. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c14 : 6; /**< [ 37: 32](RO/H) 14th postcursor value. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c13 : 6; /**< [ 29: 24](RO/H) 13th postcursor value. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c12 : 6; /**< [ 21: 16](RO/H) 12th postcursor value. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c11 : 6; /**< [ 13: 8](RO/H) 11th postcursor value. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c10 : 6; /**< [ 5: 0](RO/H) 10th postcursor value. */
+#else /* Word 0 - Little Endian */
+ uint64_t c10 : 6; /**< [ 5: 0](RO/H) 10th postcursor value. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c11 : 6; /**< [ 13: 8](RO/H) 11th postcursor value. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c12 : 6; /**< [ 21: 16](RO/H) 12th postcursor value. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c13 : 6; /**< [ 29: 24](RO/H) 13th postcursor value. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c14 : 6; /**< [ 37: 32](RO/H) 14th postcursor value. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c15 : 6; /**< [ 45: 40](RO/H) 15th postcursor value. */
+ uint64_t reserved_46_63 : 18;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_1_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_1_bsts bdk_gsernx_lanex_rx_1_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_1_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_1_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001660ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_1_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) bdk_gsernx_lanex_rx_1_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) "GSERNX_LANEX_RX_1_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_1_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_20_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 20
+ * Configuration registers for BLWC LMS adaptation
+ * Adaptation controls for Subrate parameters.
+ */
+union bdk_gsernx_lanex_rx_20_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_20_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t blwc_subrate_scale : 3; /**< [ 34: 32](R/W) BLWC subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t blwc_subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t blwc_subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+#else /* Word 0 - Little Endian */
+ uint64_t blwc_subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+ uint64_t blwc_subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t blwc_subrate_scale : 3; /**< [ 34: 32](R/W) BLWC subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_20_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_20_bcfg bdk_gsernx_lanex_rx_20_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_20_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_20_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000da0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_20_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) bdk_gsernx_lanex_rx_20_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) "GSERNX_LANEX_RX_20_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_20_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_21_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 20
+ * Configuration registers for PREVGA_GN LMS adaptation
+ * Adaptation controls for Subrate parameters.
+ */
+union bdk_gsernx_lanex_rx_21_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_21_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t prevga_gn_subrate_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [PREVGA_GN_SUBRATE_NOW_OVRD]. */
+ uint64_t prevga_gn_subrate_now_ovrd : 16;/**< [ 50: 35](R/W) PREVGA_GN Subrate_Now counter override value. */
+ uint64_t prevga_gn_subrate_scale : 3;/**< [ 34: 32](R/W) PREVGA_GN subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t prevga_gn_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t prevga_gn_subrate_fin : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+#else /* Word 0 - Little Endian */
+ uint64_t prevga_gn_subrate_fin : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+ uint64_t prevga_gn_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t prevga_gn_subrate_scale : 3;/**< [ 34: 32](R/W) PREVGA_GN subrate now counter scaling value for comparison against the up vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t prevga_gn_subrate_now_ovrd : 16;/**< [ 50: 35](R/W) PREVGA_GN Subrate_Now counter override value. */
+ uint64_t prevga_gn_subrate_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [PREVGA_GN_SUBRATE_NOW_OVRD]. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_21_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_21_bcfg bdk_gsernx_lanex_rx_21_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_21_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_21_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000db0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_21_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) bdk_gsernx_lanex_rx_21_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) "GSERNX_LANEX_RX_21_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_21_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_22_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 22
+ * Override registers for LMS adaptation. Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_22_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_22_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t prevga_gn_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [PREVGA_GN_DEADBAND_NOW_OVRD]. */
+ uint64_t prevga_gn_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) PREVGA_GN adaptation deadband now override. */
+ uint64_t reserved_0_38 : 39;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_38 : 39;
+ uint64_t prevga_gn_deadband_now_ovrd : 12;/**< [ 50: 39](R/W) PREVGA_GN adaptation deadband now override. */
+ uint64_t prevga_gn_deadband_now_ovrd_en : 1;/**< [ 51: 51](R/W) Enable use of [PREVGA_GN_DEADBAND_NOW_OVRD]. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_22_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_22_bcfg bdk_gsernx_lanex_rx_22_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_22_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_22_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000dc0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_22_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) bdk_gsernx_lanex_rx_22_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) "GSERNX_LANEX_RX_22_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_22_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_23_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 23
+ * Configuration registers for PREVGA_GN gain adaptation.
+ */
+union bdk_gsernx_lanex_rx_23_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_23_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t prevga_gn_leak_sgn : 1; /**< [ 51: 51](R/W) PREVGA_GN leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t prevga_gn_deadband : 12; /**< [ 50: 39](R/W) PREVGA_GN adaptation deadband settings. Typically a value less than 0x0FF is used. */
+ uint64_t prevga_gn_deadband_inc : 12;/**< [ 38: 27](R/W) PREVGA_GN adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t prevga_gn_leak : 3; /**< [ 26: 24](R/W) PREVGA_GN adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t prevga_gn_mu : 3; /**< [ 18: 16](R/W) PREVGA_GN adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_15 : 1;
+ uint64_t prevga_gn_timer_max : 15; /**< [ 14: 0](R/W) PREVGA_GN adaptation timer maximum count value. */
+#else /* Word 0 - Little Endian */
+ uint64_t prevga_gn_timer_max : 15; /**< [ 14: 0](R/W) PREVGA_GN adaptation timer maximum count value. */
+ uint64_t reserved_15 : 1;
+ uint64_t prevga_gn_mu : 3; /**< [ 18: 16](R/W) PREVGA_GN adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t reserved_19_23 : 5;
+ uint64_t prevga_gn_leak : 3; /**< [ 26: 24](R/W) PREVGA_GN adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t prevga_gn_deadband_inc : 12;/**< [ 38: 27](R/W) PREVGA_GN adaptation deadband increment setting.
+ 12-bit field with 4 integer bits and 8 fraction bits (unsigned). */
+ uint64_t prevga_gn_deadband : 12; /**< [ 50: 39](R/W) PREVGA_GN adaptation deadband settings. Typically a value less than 0x0FF is used. */
+ uint64_t prevga_gn_leak_sgn : 1; /**< [ 51: 51](R/W) PREVGA_GN leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_23_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_23_bcfg bdk_gsernx_lanex_rx_23_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_23_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_23_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000dd0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_23_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) bdk_gsernx_lanex_rx_23_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) "GSERNX_LANEX_RX_23_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_23_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_24_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 24
+ * Configuration registers for DFE offset compensation timer.
+ */
+union bdk_gsernx_lanex_rx_24_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_24_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dfe_oscomp_timer_en : 1; /**< [ 63: 63](R/W) Enable for DFE offset compensation timer. When set, allows DFE offset
+ compensation timer to trigger DFE offset compensation upon timer expiration. */
+ uint64_t reserved_32_62 : 31;
+ uint64_t dfe_oscomp_timer_max : 32; /**< [ 31: 0](R/W) Maximum value of the DFE offset compensation Timer. When the timer reaches the
+ value set by this field, the DFE offset compensation process is triggered. Also,
+ when the timer reaches this value, the timer is reset to zero and allowed to
+ begin counting again. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_oscomp_timer_max : 32; /**< [ 31: 0](R/W) Maximum value of the DFE offset compensation Timer. When the timer reaches the
+ value set by this field, the DFE offset compensation process is triggered. Also,
+ when the timer reaches this value, the timer is reset to zero and allowed to
+ begin counting again. */
+ uint64_t reserved_32_62 : 31;
+ uint64_t dfe_oscomp_timer_en : 1; /**< [ 63: 63](R/W) Enable for DFE offset compensation timer. When set, allows DFE offset
+ compensation timer to trigger DFE offset compensation upon timer expiration. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_24_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_24_bcfg bdk_gsernx_lanex_rx_24_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_24_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_24_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000de0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_24_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) bdk_gsernx_lanex_rx_24_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) "GSERNX_LANEX_RX_24_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_24_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_2_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 2
+ * Register controls for first postcursor overrides of even/odd paths. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t c1_1e_ovrd_en : 1; /**< [ 62: 62](R/W) Enable use of [C1_1E_OVRD]. */
+ uint64_t c1_1e_ovrd : 6; /**< [ 61: 56](R/W) First postcursor value on odd E path override. */
+ uint64_t reserved_55 : 1;
+ uint64_t c1_0e_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [C1_0E_OVRD]. */
+ uint64_t c1_0e_ovrd : 6; /**< [ 53: 48](R/W) First postcursor value on even E path override. */
+ uint64_t reserved_47 : 1;
+ uint64_t c1_1x_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C1_1X_OVRD]. */
+ uint64_t c1_1x_ovrd : 6; /**< [ 45: 40](R/W) First postcursor value on odd X path override. */
+ uint64_t reserved_39 : 1;
+ uint64_t c1_0x_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C1_0X_OVRD]. */
+ uint64_t c1_0x_ovrd : 6; /**< [ 37: 32](R/W) First postcursor value on even X path override. */
+ uint64_t reserved_31 : 1;
+ uint64_t c1_1i_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C1_1I_OVRD]. */
+ uint64_t c1_1i_ovrd : 6; /**< [ 29: 24](R/W) First postcursor value on odd I path override. */
+ uint64_t reserved_23 : 1;
+ uint64_t c1_0i_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C1_0I_OVRD]. */
+ uint64_t c1_0i_ovrd : 6; /**< [ 21: 16](R/W) First postcursor value on even I path override. */
+ uint64_t reserved_15 : 1;
+ uint64_t c1_1q_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C1_1Q_OVRD]. */
+ uint64_t c1_1q_ovrd : 6; /**< [ 13: 8](R/W) First postcursor value on odd Q path override. */
+ uint64_t reserved_7 : 1;
+ uint64_t c1_0q_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C1_0Q_OVRD]. */
+ uint64_t c1_0q_ovrd : 6; /**< [ 5: 0](R/W) First postcursor value on even Q path override. */
+#else /* Word 0 - Little Endian */
+ uint64_t c1_0q_ovrd : 6; /**< [ 5: 0](R/W) First postcursor value on even Q path override. */
+ uint64_t c1_0q_ovrd_en : 1; /**< [ 6: 6](R/W) Enable use of [C1_0Q_OVRD]. */
+ uint64_t reserved_7 : 1;
+ uint64_t c1_1q_ovrd : 6; /**< [ 13: 8](R/W) First postcursor value on odd Q path override. */
+ uint64_t c1_1q_ovrd_en : 1; /**< [ 14: 14](R/W) Enable use of [C1_1Q_OVRD]. */
+ uint64_t reserved_15 : 1;
+ uint64_t c1_0i_ovrd : 6; /**< [ 21: 16](R/W) First postcursor value on even I path override. */
+ uint64_t c1_0i_ovrd_en : 1; /**< [ 22: 22](R/W) Enable use of [C1_0I_OVRD]. */
+ uint64_t reserved_23 : 1;
+ uint64_t c1_1i_ovrd : 6; /**< [ 29: 24](R/W) First postcursor value on odd I path override. */
+ uint64_t c1_1i_ovrd_en : 1; /**< [ 30: 30](R/W) Enable use of [C1_1I_OVRD]. */
+ uint64_t reserved_31 : 1;
+ uint64_t c1_0x_ovrd : 6; /**< [ 37: 32](R/W) First postcursor value on even X path override. */
+ uint64_t c1_0x_ovrd_en : 1; /**< [ 38: 38](R/W) Enable use of [C1_0X_OVRD]. */
+ uint64_t reserved_39 : 1;
+ uint64_t c1_1x_ovrd : 6; /**< [ 45: 40](R/W) First postcursor value on odd X path override. */
+ uint64_t c1_1x_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [C1_1X_OVRD]. */
+ uint64_t reserved_47 : 1;
+ uint64_t c1_0e_ovrd : 6; /**< [ 53: 48](R/W) First postcursor value on even E path override. */
+ uint64_t c1_0e_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [C1_0E_OVRD]. */
+ uint64_t reserved_55 : 1;
+ uint64_t c1_1e_ovrd : 6; /**< [ 61: 56](R/W) First postcursor value on odd E path override. */
+ uint64_t c1_1e_ovrd_en : 1; /**< [ 62: 62](R/W) Enable use of [C1_1E_OVRD]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_2_bcfg bdk_gsernx_lanex_rx_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000c80ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) bdk_gsernx_lanex_rx_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) "GSERNX_LANEX_RX_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_2_bsts
+ *
+ * GSER Lane RX Base Status Register 2
+ * Status registers for first postcursor values (either calibration
+ * results or overrides) of even/odd paths. Values in this register are
+ * only valid if GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted (indicating DFE
+ * adaptation has completed), or if the corresponding CSR override enable
+ * is asserted.
+ */
+union bdk_gsernx_lanex_rx_2_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_2_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t c1_1e : 6; /**< [ 61: 56](RO/H) First postcursor value on odd E path. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t c1_0e : 6; /**< [ 53: 48](RO/H) First postcursor value on even E path. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t c1_1x : 6; /**< [ 45: 40](RO/H) First postcursor value on odd X path. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c1_0x : 6; /**< [ 37: 32](RO/H) First postcursor value on even X path. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c1_1i : 6; /**< [ 29: 24](RO/H) First postcursor value on odd I path. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c1_0i : 6; /**< [ 21: 16](RO/H) First postcursor value on even I path. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c1_1q : 6; /**< [ 13: 8](RO/H) First postcursor value on odd Q path. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c1_0q : 6; /**< [ 5: 0](RO/H) First postcursor value on even Q path. */
+#else /* Word 0 - Little Endian */
+ uint64_t c1_0q : 6; /**< [ 5: 0](RO/H) First postcursor value on even Q path. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t c1_1q : 6; /**< [ 13: 8](RO/H) First postcursor value on odd Q path. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t c1_0i : 6; /**< [ 21: 16](RO/H) First postcursor value on even I path. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t c1_1i : 6; /**< [ 29: 24](RO/H) First postcursor value on odd I path. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t c1_0x : 6; /**< [ 37: 32](RO/H) First postcursor value on even X path. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t c1_1x : 6; /**< [ 45: 40](RO/H) First postcursor value on odd X path. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t c1_0e : 6; /**< [ 53: 48](RO/H) First postcursor value on even E path. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t c1_1e : 6; /**< [ 61: 56](RO/H) First postcursor value on odd E path. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_2_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_2_bsts bdk_gsernx_lanex_rx_2_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_2_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_2_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001670ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_2_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) bdk_gsernx_lanex_rx_2_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) "GSERNX_LANEX_RX_2_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_2_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_3_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 3
+ * Register controls for calibration muxes and switch enable overrides.
+ * Some bit is this register are override controls (*_OVRD). Each
+ * override setting has a corresponding enable which will cause the
+ * calibration logic to use the override register setting instead of the
+ * calibration result.
+ */
+union bdk_gsernx_lanex_rx_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t cali1_odd_ovrd_en : 1; /**< [ 59: 59](R/W) Enable use of [CALI1_ODD_OVRD]. */
+ uint64_t cali1_even_ovrd_en : 1; /**< [ 58: 58](R/W) Enable use of [CALI1_EVEN_OVRD]. */
+ uint64_t cali0_odd_ovrd_en : 1; /**< [ 57: 57](R/W) Enable use of [CALI0_ODD_OVRD]. */
+ uint64_t cali0_even_ovrd_en : 1; /**< [ 56: 56](R/W) Enable use of [CALI0_EVEN_OVRD]. */
+ uint64_t cali1_odd_ovrd : 8; /**< [ 55: 48](R/W) Input calibration switch enable for speculation path 1
+ in odd paths override. */
+ uint64_t cali1_even_ovrd : 8; /**< [ 47: 40](R/W) Input calibration switch enable for speculation path 1
+ in even paths override. */
+ uint64_t cali0_odd_ovrd : 8; /**< [ 39: 32](R/W) Input calibration switch enable for speculation path 0
+ in odd paths override. */
+ uint64_t cali0_even_ovrd : 8; /**< [ 31: 24](R/W) Input calibration switch enable for speculation path 0
+ in even paths override. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t calsel_odd_ovrd_en : 1; /**< [ 19: 19](R/W) Enable use of [CALSEL_ODD_OVRD]. */
+ uint64_t calsel_even_ovrd_en : 1; /**< [ 18: 18](R/W) Enable use of [CALSEL_EVEN_OVRD]. */
+ uint64_t calo_odd_ovrd_en : 1; /**< [ 17: 17](R/W) Enable use of [CALO_ODD_OVRD]. */
+ uint64_t calo_even_ovrd_en : 1; /**< [ 16: 16](R/W) Enable use of [CALO_EVEN_OVRD]. */
+ uint64_t calsel_odd_ovrd : 4; /**< [ 15: 12](R/W) Odd calibration speculation mux override value. */
+ uint64_t calsel_even_ovrd : 4; /**< [ 11: 8](R/W) Even calibration speculation mux override value. */
+ uint64_t calo_odd_ovrd : 4; /**< [ 7: 4](R/W) Odd Slicer output calibration mux control override value. */
+ uint64_t calo_even_ovrd : 4; /**< [ 3: 0](R/W) Even Slicer output calibration mux control override value. */
+#else /* Word 0 - Little Endian */
+ uint64_t calo_even_ovrd : 4; /**< [ 3: 0](R/W) Even Slicer output calibration mux control override value. */
+ uint64_t calo_odd_ovrd : 4; /**< [ 7: 4](R/W) Odd Slicer output calibration mux control override value. */
+ uint64_t calsel_even_ovrd : 4; /**< [ 11: 8](R/W) Even calibration speculation mux override value. */
+ uint64_t calsel_odd_ovrd : 4; /**< [ 15: 12](R/W) Odd calibration speculation mux override value. */
+ uint64_t calo_even_ovrd_en : 1; /**< [ 16: 16](R/W) Enable use of [CALO_EVEN_OVRD]. */
+ uint64_t calo_odd_ovrd_en : 1; /**< [ 17: 17](R/W) Enable use of [CALO_ODD_OVRD]. */
+ uint64_t calsel_even_ovrd_en : 1; /**< [ 18: 18](R/W) Enable use of [CALSEL_EVEN_OVRD]. */
+ uint64_t calsel_odd_ovrd_en : 1; /**< [ 19: 19](R/W) Enable use of [CALSEL_ODD_OVRD]. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t cali0_even_ovrd : 8; /**< [ 31: 24](R/W) Input calibration switch enable for speculation path 0
+ in even paths override. */
+ uint64_t cali0_odd_ovrd : 8; /**< [ 39: 32](R/W) Input calibration switch enable for speculation path 0
+ in odd paths override. */
+ uint64_t cali1_even_ovrd : 8; /**< [ 47: 40](R/W) Input calibration switch enable for speculation path 1
+ in even paths override. */
+ uint64_t cali1_odd_ovrd : 8; /**< [ 55: 48](R/W) Input calibration switch enable for speculation path 1
+ in odd paths override. */
+ uint64_t cali0_even_ovrd_en : 1; /**< [ 56: 56](R/W) Enable use of [CALI0_EVEN_OVRD]. */
+ uint64_t cali0_odd_ovrd_en : 1; /**< [ 57: 57](R/W) Enable use of [CALI0_ODD_OVRD]. */
+ uint64_t cali1_even_ovrd_en : 1; /**< [ 58: 58](R/W) Enable use of [CALI1_EVEN_OVRD]. */
+ uint64_t cali1_odd_ovrd_en : 1; /**< [ 59: 59](R/W) Enable use of [CALI1_ODD_OVRD]. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_3_bcfg bdk_gsernx_lanex_rx_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000c90ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) bdk_gsernx_lanex_rx_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) "GSERNX_LANEX_RX_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_3_bsts
+ *
+ * GSER Lane RX Base Status Register 3
+ * Status registers for calibration muxes and switch enables (either
+ * calibration results ors). Values in this register are only valid if
+ * GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted (indicating DFE adaptation has
+ * completed), or if the corresponding CSR override enable is asserted.
+ */
+union bdk_gsernx_lanex_rx_3_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_3_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t cali1_odd : 8; /**< [ 55: 48](RO/H) Input calibration switch enable for speculation path 1
+ in odd paths. */
+ uint64_t cali1_even : 8; /**< [ 47: 40](RO/H) Input calibration switch enable for speculation path 1
+ in even paths. */
+ uint64_t cali0_odd : 8; /**< [ 39: 32](RO/H) Input calibration switch enable for speculation path 0
+ in odd paths. */
+ uint64_t cali0_even : 8; /**< [ 31: 24](RO/H) Input calibration switch enable for speculation path 0
+ in even paths. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t calsel_odd : 4; /**< [ 15: 12](RO/H) Odd calibration speculation mux. */
+ uint64_t calsel_even : 4; /**< [ 11: 8](RO/H) Even calibration speculation mux. */
+ uint64_t calo_odd : 4; /**< [ 7: 4](RO/H) Odd slicer output calibration mux control. */
+ uint64_t calo_even : 4; /**< [ 3: 0](RO/H) Even slicer output calibration mux control. */
+#else /* Word 0 - Little Endian */
+ uint64_t calo_even : 4; /**< [ 3: 0](RO/H) Even slicer output calibration mux control. */
+ uint64_t calo_odd : 4; /**< [ 7: 4](RO/H) Odd slicer output calibration mux control. */
+ uint64_t calsel_even : 4; /**< [ 11: 8](RO/H) Even calibration speculation mux. */
+ uint64_t calsel_odd : 4; /**< [ 15: 12](RO/H) Odd calibration speculation mux. */
+ uint64_t reserved_16_23 : 8;
+ uint64_t cali0_even : 8; /**< [ 31: 24](RO/H) Input calibration switch enable for speculation path 0
+ in even paths. */
+ uint64_t cali0_odd : 8; /**< [ 39: 32](RO/H) Input calibration switch enable for speculation path 0
+ in odd paths. */
+ uint64_t cali1_even : 8; /**< [ 47: 40](RO/H) Input calibration switch enable for speculation path 1
+ in even paths. */
+ uint64_t cali1_odd : 8; /**< [ 55: 48](RO/H) Input calibration switch enable for speculation path 1
+ in odd paths. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_3_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_3_bsts bdk_gsernx_lanex_rx_3_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_3_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_3_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001680ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_3_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) bdk_gsernx_lanex_rx_3_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) "GSERNX_LANEX_RX_3_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_3_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_4_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 4
+ * Register controls for VGA, CTLE, and OS_AFE overrides.
+ * Some bit is this register are override controls (*_OVRD). Each
+ * override setting has a corresponding enable which will cause the
+ * calibration logic to use the override register setting instead of the
+ * calibration result.
+ */
+union bdk_gsernx_lanex_rx_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t edgesel_even_ovrd_en : 1; /**< [ 61: 61](R/W) Enable use of [EDGESEL_EVEN_OVRD]. */
+ uint64_t edgesel_even_ovrd : 1; /**< [ 60: 60](R/W) EDGESEL_EVEN override value. */
+ uint64_t edgesel_odd_ovrd_en : 1; /**< [ 59: 59](R/W) Enable use of [EDGESEL_ODD_OVRD]. */
+ uint64_t edgesel_odd_ovrd : 1; /**< [ 58: 58](R/W) EDGESEL_ODD override value. */
+ uint64_t en_os_afe_ovrd_en : 1; /**< [ 57: 57](R/W) Enable use of [EN_OS_AFE_OVRD]. */
+ uint64_t en_os_afe_ovrd : 1; /**< [ 56: 56](R/W) OS_AFE_EN override value. */
+ uint64_t reserved_55 : 1;
+ uint64_t os_afe_odd_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [OS_AFE_ODD_OVRD]. */
+ uint64_t os_afe_odd_ovrd : 6; /**< [ 53: 48](R/W) OS_AFE_ODD offset override value. */
+ uint64_t reserved_47 : 1;
+ uint64_t os_afe_even_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [OS_AFE_EVEN_OVRD]. */
+ uint64_t os_afe_even_ovrd : 6; /**< [ 45: 40](R/W) OS_AFE_EVEN offset override value. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t ctle_lte_zero_ovrd_en : 1; /**< [ 36: 36](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t ctle_lte_zero_ovrd : 4; /**< [ 35: 32](R/W) CTLE LTE zero frequency override value. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ctle_lte_gain_ovrd_en : 1; /**< [ 28: 28](R/W) Enable use of [CTLE_LTE_GAIN_OVRD]. */
+ uint64_t ctle_lte_gain_ovrd : 4; /**< [ 27: 24](R/W) CTLE LTE DC gain override value. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t ctle_zero_ovrd_en : 1; /**< [ 20: 20](R/W) Enable use of [CTLE_ZERO_OVRD]. */
+ uint64_t ctle_zero_ovrd : 4; /**< [ 19: 16](R/W) CTLE zero frequency override value. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t ctle_gain_ovrd_en : 1; /**< [ 12: 12](R/W) Enable use of [CTLE_GAIN_OVRD]. */
+ uint64_t ctle_gain_ovrd : 4; /**< [ 11: 8](R/W) CTLE DC gain override value. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t vga_gain_ovrd_en : 1; /**< [ 4: 4](R/W) Enable use of [VGA_GAIN_OVRD]. */
+ uint64_t vga_gain_ovrd : 4; /**< [ 3: 0](R/W) VGA DC gain override value. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_gain_ovrd : 4; /**< [ 3: 0](R/W) VGA DC gain override value. */
+ uint64_t vga_gain_ovrd_en : 1; /**< [ 4: 4](R/W) Enable use of [VGA_GAIN_OVRD]. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t ctle_gain_ovrd : 4; /**< [ 11: 8](R/W) CTLE DC gain override value. */
+ uint64_t ctle_gain_ovrd_en : 1; /**< [ 12: 12](R/W) Enable use of [CTLE_GAIN_OVRD]. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t ctle_zero_ovrd : 4; /**< [ 19: 16](R/W) CTLE zero frequency override value. */
+ uint64_t ctle_zero_ovrd_en : 1; /**< [ 20: 20](R/W) Enable use of [CTLE_ZERO_OVRD]. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t ctle_lte_gain_ovrd : 4; /**< [ 27: 24](R/W) CTLE LTE DC gain override value. */
+ uint64_t ctle_lte_gain_ovrd_en : 1; /**< [ 28: 28](R/W) Enable use of [CTLE_LTE_GAIN_OVRD]. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ctle_lte_zero_ovrd : 4; /**< [ 35: 32](R/W) CTLE LTE zero frequency override value. */
+ uint64_t ctle_lte_zero_ovrd_en : 1; /**< [ 36: 36](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t os_afe_even_ovrd : 6; /**< [ 45: 40](R/W) OS_AFE_EVEN offset override value. */
+ uint64_t os_afe_even_ovrd_en : 1; /**< [ 46: 46](R/W) Enable use of [OS_AFE_EVEN_OVRD]. */
+ uint64_t reserved_47 : 1;
+ uint64_t os_afe_odd_ovrd : 6; /**< [ 53: 48](R/W) OS_AFE_ODD offset override value. */
+ uint64_t os_afe_odd_ovrd_en : 1; /**< [ 54: 54](R/W) Enable use of [OS_AFE_ODD_OVRD]. */
+ uint64_t reserved_55 : 1;
+ uint64_t en_os_afe_ovrd : 1; /**< [ 56: 56](R/W) OS_AFE_EN override value. */
+ uint64_t en_os_afe_ovrd_en : 1; /**< [ 57: 57](R/W) Enable use of [EN_OS_AFE_OVRD]. */
+ uint64_t edgesel_odd_ovrd : 1; /**< [ 58: 58](R/W) EDGESEL_ODD override value. */
+ uint64_t edgesel_odd_ovrd_en : 1; /**< [ 59: 59](R/W) Enable use of [EDGESEL_ODD_OVRD]. */
+ uint64_t edgesel_even_ovrd : 1; /**< [ 60: 60](R/W) EDGESEL_EVEN override value. */
+ uint64_t edgesel_even_ovrd_en : 1; /**< [ 61: 61](R/W) Enable use of [EDGESEL_EVEN_OVRD]. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_4_bcfg bdk_gsernx_lanex_rx_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000ca0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) bdk_gsernx_lanex_rx_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) "GSERNX_LANEX_RX_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_4_bsts
+ *
+ * GSER Lane RX Base Status Register 4
+ * Status registers for VGA, CTLE, and OS_AFE values
+ * (either calibration results ors).
+ */
+union bdk_gsernx_lanex_rx_4_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_4_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t blwc : 5; /**< [ 63: 59](RO/H) BLWC. This field is only valid if GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS]
+ is deasserted (indicating BLWC adaptation has completed), or if the
+ corresponding CSR override enable is asserted. */
+ uint64_t reserved_57_58 : 2;
+ uint64_t en_os_afe : 1; /**< [ 56: 56](RO/H) AFE offset compensation enable value in-use. This field is only
+ valid if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE
+ offset adaptation has completed), or if the corresponding CSR
+ override enable is asserted. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os_afe_odd : 6; /**< [ 53: 48](RO/H) AFE odd offset compensation value in-use. This field is only valid
+ if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE offset
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os_afe_even : 6; /**< [ 45: 40](RO/H) AFE even offset compensation value in-use. This field is only valid
+ if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE offset
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t ctle_lte_zero : 4; /**< [ 35: 32](RO/H) CTLE LTE zero frequency. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ctle_lte_gain : 4; /**< [ 27: 24](RO/H) CTLE LTE DC gain. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t ctle_zero : 4; /**< [ 19: 16](RO/H) CTLE zero frequency. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t ctle_gain : 4; /**< [ 11: 8](RO/H) CTLE DC gain. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_7 : 1;
+ uint64_t prevga_gn : 3; /**< [ 6: 4](RO/H) Pre-VGA gain. This field is only valid if
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is deasserted (indicating Pre-VGA
+ gain adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t vga_gain : 4; /**< [ 3: 0](RO/H) VGA DC gain. This field is only valid if GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS]
+ is deasserted (indicating VGA adaptation has completed), or if the
+ corresponding CSR override enable is asserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_gain : 4; /**< [ 3: 0](RO/H) VGA DC gain. This field is only valid if GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS]
+ is deasserted (indicating VGA adaptation has completed), or if the
+ corresponding CSR override enable is asserted. */
+ uint64_t prevga_gn : 3; /**< [ 6: 4](RO/H) Pre-VGA gain. This field is only valid if
+ GSERN()_LANE()_RX_11_BSTS[PREVGA_GN_ADAPT_STATUS] is deasserted (indicating Pre-VGA
+ gain adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_7 : 1;
+ uint64_t ctle_gain : 4; /**< [ 11: 8](RO/H) CTLE DC gain. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t ctle_zero : 4; /**< [ 19: 16](RO/H) CTLE zero frequency. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t ctle_lte_gain : 4; /**< [ 27: 24](RO/H) CTLE LTE DC gain. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ctle_lte_zero : 4; /**< [ 35: 32](RO/H) CTLE LTE zero frequency. This field is only valid if
+ GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is deasserted (indicating VGA
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t os_afe_even : 6; /**< [ 45: 40](RO/H) AFE even offset compensation value in-use. This field is only valid
+ if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE offset
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os_afe_odd : 6; /**< [ 53: 48](RO/H) AFE odd offset compensation value in-use. This field is only valid
+ if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE offset
+ adaptation has completed), or if the corresponding CSR override
+ enable is asserted. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t en_os_afe : 1; /**< [ 56: 56](RO/H) AFE offset compensation enable value in-use. This field is only
+ valid if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] is asserted (indicating AFE
+ offset adaptation has completed), or if the corresponding CSR
+ override enable is asserted. */
+ uint64_t reserved_57_58 : 2;
+ uint64_t blwc : 5; /**< [ 63: 59](RO/H) BLWC. This field is only valid if GSERN()_LANE()_RX_10_BSTS[BLWC_ADAPT_STATUS]
+ is deasserted (indicating BLWC adaptation has completed), or if the
+ corresponding CSR override enable is asserted. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_4_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_4_bsts bdk_gsernx_lanex_rx_4_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_4_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_4_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001690ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_4_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) bdk_gsernx_lanex_rx_4_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) "GSERNX_LANEX_RX_4_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_4_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_5_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 5
+ * Adaptation parameters for DFE coefficients.
+ */
+union bdk_gsernx_lanex_rx_5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t ctle_leak_sgn : 1; /**< [ 62: 62](R/W) CTLE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t ctlez_leak_sgn : 1; /**< [ 61: 61](R/W) CTLE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t dfe_c1_leak_sgn : 1; /**< [ 60: 60](R/W) DFE C1 leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t vga_leak_sgn : 1; /**< [ 59: 59](R/W) VGA leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t dfe_c1_leak : 3; /**< [ 58: 56](R/W) DFE C1 Gain adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t dfe_c1_mu : 3; /**< [ 55: 53](R/W) DFE C1 adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t vga_leak : 3; /**< [ 52: 50](R/W) VGA gain adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t vga_mu : 3; /**< [ 49: 47](R/W) VGA adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t vga_timer_max : 15; /**< [ 46: 32](R/W) VGA adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t dfe_leak_sgn : 1; /**< [ 21: 21](R/W) DFE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t dfe_leak : 3; /**< [ 20: 18](R/W) DFE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t dfe_mu : 3; /**< [ 17: 15](R/W) DFE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t dfe_timer_max : 15; /**< [ 14: 0](R/W) DFE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_timer_max : 15; /**< [ 14: 0](R/W) DFE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t dfe_mu : 3; /**< [ 17: 15](R/W) DFE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t dfe_leak : 3; /**< [ 20: 18](R/W) DFE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t dfe_leak_sgn : 1; /**< [ 21: 21](R/W) DFE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t reserved_22_31 : 10;
+ uint64_t vga_timer_max : 15; /**< [ 46: 32](R/W) VGA adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t vga_mu : 3; /**< [ 49: 47](R/W) VGA adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t vga_leak : 3; /**< [ 52: 50](R/W) VGA gain adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t dfe_c1_mu : 3; /**< [ 55: 53](R/W) DFE C1 adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t dfe_c1_leak : 3; /**< [ 58: 56](R/W) DFE C1 Gain adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t vga_leak_sgn : 1; /**< [ 59: 59](R/W) VGA leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t dfe_c1_leak_sgn : 1; /**< [ 60: 60](R/W) DFE C1 leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t ctlez_leak_sgn : 1; /**< [ 61: 61](R/W) CTLE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t ctle_leak_sgn : 1; /**< [ 62: 62](R/W) CTLE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_5_bcfg bdk_gsernx_lanex_rx_5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000cb0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) bdk_gsernx_lanex_rx_5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) "GSERNX_LANEX_RX_5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_5_bsts
+ *
+ * GSER Lane RX Base Status Register 5
+ * Status registers for VGA, CTLE, and DFE adaptation.
+ */
+union bdk_gsernx_lanex_rx_5_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_5_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ctlez_adapt_count : 15; /**< [ 63: 49](RO/H) CTLEZ adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is deasserted. */
+ uint64_t ctlez_adapt_status : 1; /**< [ 48: 48](RO/H) CTLEZ adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t ctle_adapt_count : 15; /**< [ 47: 33](RO/H) CTLE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctle_adapt_status : 1; /**< [ 32: 32](RO/H) CTLE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t dfe_adapt_count : 15; /**< [ 31: 17](RO/H) DFE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted. */
+ uint64_t dfe_adapt_status : 1; /**< [ 16: 16](RO/H) DFE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t vga_adapt_count : 15; /**< [ 15: 1](RO/H) VGA Gain adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS] is deasserted. */
+ uint64_t vga_adapt_status : 1; /**< [ 0: 0](RO/H) VGA Gain adaptation status. When 0, training is inactive. When 1, training is active. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_adapt_status : 1; /**< [ 0: 0](RO/H) VGA Gain adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t vga_adapt_count : 15; /**< [ 15: 1](RO/H) VGA Gain adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS] is deasserted. */
+ uint64_t dfe_adapt_status : 1; /**< [ 16: 16](RO/H) DFE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t dfe_adapt_count : 15; /**< [ 31: 17](RO/H) DFE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctle_adapt_status : 1; /**< [ 32: 32](RO/H) CTLE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t ctle_adapt_count : 15; /**< [ 47: 33](RO/H) CTLE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctlez_adapt_status : 1; /**< [ 48: 48](RO/H) CTLEZ adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t ctlez_adapt_count : 15; /**< [ 63: 49](RO/H) CTLEZ adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is deasserted. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_5_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_5_bsts bdk_gsernx_lanex_rx_5_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_5_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_5_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016a0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_5_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) bdk_gsernx_lanex_rx_5_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) "GSERNX_LANEX_RX_5_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_5_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_6_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 6
+ * Adaptation controls for DFE CTLE and CTLEZ parameter.
+ */
+union bdk_gsernx_lanex_rx_6_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_6_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ctlelte_leak_sgn : 1; /**< [ 63: 63](R/W) CTLELTE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+ uint64_t ctlelte_leak : 3; /**< [ 62: 60](R/W) CTLELTE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctlelte_mu : 3; /**< [ 59: 57](R/W) CTLELTE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctlelte_timer_max : 15; /**< [ 56: 42](R/W) CTLELTE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t ctlez_leak : 3; /**< [ 41: 39](R/W) CTLEZ adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctlez_mu : 3; /**< [ 38: 36](R/W) CTLEZ adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctlez_timer_max : 15; /**< [ 35: 21](R/W) CTLEZ adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t ctle_leak : 3; /**< [ 20: 18](R/W) DFE CTLE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctle_mu : 3; /**< [ 17: 15](R/W) DFE CTLE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctle_timer_max : 15; /**< [ 14: 0](R/W) DFE CTLE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t ctle_timer_max : 15; /**< [ 14: 0](R/W) DFE CTLE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t ctle_mu : 3; /**< [ 17: 15](R/W) DFE CTLE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctle_leak : 3; /**< [ 20: 18](R/W) DFE CTLE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctlez_timer_max : 15; /**< [ 35: 21](R/W) CTLEZ adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t ctlez_mu : 3; /**< [ 38: 36](R/W) CTLEZ adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctlez_leak : 3; /**< [ 41: 39](R/W) CTLEZ adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctlelte_timer_max : 15; /**< [ 56: 42](R/W) CTLELTE adaptation timer maximum count value.
+ 15-bit field, maximum value 0x7FFF. */
+ uint64_t ctlelte_mu : 3; /**< [ 59: 57](R/W) CTLELTE adaptation mu parameter setting.
+ 0x0 = 1/16.
+ 0x1 = 1/8.
+ 0x2 = 1/4.
+ 0x3 = 1/2.
+ 0x4 = 1.
+ 0x5 = 2.
+ 0x6 = 4.
+ 0x7 = 8. */
+ uint64_t ctlelte_leak : 3; /**< [ 62: 60](R/W) CTLELTE adaptation leak parameter setting.
+ 0x0 = 1/128.
+ 0x1 = 1/64.
+ 0x2 = 1/32.
+ 0x3 = 1/16.
+ 0x4 = 1/8.
+ 0x5 = 1/4.
+ 0x6 = 1/2.
+ 0x7 = Disabled. */
+ uint64_t ctlelte_leak_sgn : 1; /**< [ 63: 63](R/W) CTLELTE leak sign. 0 = Positive (add). 1 = Negative (subtract). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_6_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_6_bcfg bdk_gsernx_lanex_rx_6_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_6_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_6_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000cc0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_6_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) bdk_gsernx_lanex_rx_6_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) "GSERNX_LANEX_RX_6_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_6_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_6_bsts
+ *
+ * GSER Lane RX Base Status Register 6
+ * Status registers for LMS adaptation.
+ */
+union bdk_gsernx_lanex_rx_6_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_6_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t ctlelte_adapt_count : 15; /**< [ 47: 33](RO/H) CTLELTE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctlelte_adapt_status : 1; /**< [ 32: 32](RO/H) CTLELTE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t subrate_now : 16; /**< [ 31: 16](RO/H) Subrate_Now counter value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+ uint64_t upv_count : 16; /**< [ 15: 0](RO/H) UPV (Up-Vote) counter value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t upv_count : 16; /**< [ 15: 0](RO/H) UPV (Up-Vote) counter value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+ uint64_t subrate_now : 16; /**< [ 31: 16](RO/H) Subrate_Now counter value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctlelte_adapt_status : 1; /**< [ 32: 32](RO/H) CTLELTE adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t ctlelte_adapt_count : 15; /**< [ 47: 33](RO/H) CTLELTE adaptation timer count value. Only valid when
+ GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is deasserted. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_6_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_6_bsts bdk_gsernx_lanex_rx_6_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_6_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_6_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_6_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) bdk_gsernx_lanex_rx_6_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) "GSERNX_LANEX_RX_6_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_6_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_7_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 7
+ * Adaptation reset/mode for the DFE.
+ */
+union bdk_gsernx_lanex_rx_7_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_7_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t gain_diff_max : 4; /**< [ 27: 24](R/W) Gain Difference Maximum Value. This value is used in the correlation function
+ for the Pre-VGA Gain and VGA Gain adaptation.
+ The gain difference maximum value is used to manage the adapation rates of these
+ two parameters (Pre-VGA Gain and VGA Gain). */
+ uint64_t prevga_gn_upv_rst : 1; /**< [ 23: 23](R/W) PREVGA_GN UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using PREVGA_GN adaptation subrate gear-shifting.
+ When enabled, the gear-shifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t prevga_gn_subrate_rst : 1; /**< [ 22: 22](R/W) PREVGA_GN subrate counter reset. The subrate counter controls the interval between LMS
+ updates.
+ When 1, the counter is reset. When 0, the counter increments to the value
+ controlled by GSERN()_LANE()_RX_21_BCFG[PREVGA_GN_SUBRATE_INIT] and
+ GSERN()_LANE()_RX_21_BCFG[PREVGA_GN_SUBRATE_FIN]. */
+ uint64_t prevga_gn_rst : 2; /**< [ 21: 20](R/W) PREVGA_GN adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t blwc_upv_rst : 1; /**< [ 19: 19](R/W) BLWC UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using BLWC adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t blwc_subrate_rst : 1; /**< [ 18: 18](R/W) BLWC subrate counter reset. The subrate counter controls the interval between LMS updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the BLWC_SUBRATE_INIT and BLWC_SUBRATE_FINAL registers. */
+ uint64_t blwc_rst : 2; /**< [ 17: 16](R/W) BLWC adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t afeos_upv_rst : 1; /**< [ 15: 15](R/W) AFEOS UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using AFEOS adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t afeos_subrate_rst : 1; /**< [ 14: 14](R/W) AFEOS subrate counter reset. The subrate counter controls the interval between LMS
+ updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the AFEOS_SUBRATE_INIT and AFEOS_SUBRATE_FINAL registers. */
+ uint64_t afeos_rst : 2; /**< [ 13: 12](R/W) AFE offset adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t upv_rst : 1; /**< [ 11: 11](R/W) UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t subrate_rst : 1; /**< [ 10: 10](R/W) Subrate counter reset. The subrate counter controls the interval between LMS updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the SUBRATE INIT and SUBRATE_FINAL registers. */
+ uint64_t ctlelte_rst : 2; /**< [ 9: 8](R/W) CTLELTE adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t ctlez_rst : 2; /**< [ 7: 6](R/W) CTLEZ adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t vga_rst : 2; /**< [ 5: 4](R/W) VGA Gain adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t ctle_rst : 2; /**< [ 3: 2](R/W) CTLE/CTLEZ adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t dfe_rst : 2; /**< [ 1: 0](R/W) DFE adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_rst : 2; /**< [ 1: 0](R/W) DFE adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t ctle_rst : 2; /**< [ 3: 2](R/W) CTLE/CTLEZ adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t vga_rst : 2; /**< [ 5: 4](R/W) VGA Gain adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t ctlez_rst : 2; /**< [ 7: 6](R/W) CTLEZ adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t ctlelte_rst : 2; /**< [ 9: 8](R/W) CTLELTE adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t subrate_rst : 1; /**< [ 10: 10](R/W) Subrate counter reset. The subrate counter controls the interval between LMS updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the SUBRATE INIT and SUBRATE_FINAL registers. */
+ uint64_t upv_rst : 1; /**< [ 11: 11](R/W) UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t afeos_rst : 2; /**< [ 13: 12](R/W) AFE offset adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t afeos_subrate_rst : 1; /**< [ 14: 14](R/W) AFEOS subrate counter reset. The subrate counter controls the interval between LMS
+ updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the AFEOS_SUBRATE_INIT and AFEOS_SUBRATE_FINAL registers. */
+ uint64_t afeos_upv_rst : 1; /**< [ 15: 15](R/W) AFEOS UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using AFEOS adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t blwc_rst : 2; /**< [ 17: 16](R/W) BLWC adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t blwc_subrate_rst : 1; /**< [ 18: 18](R/W) BLWC subrate counter reset. The subrate counter controls the interval between LMS updates.
+ When 1, the counter is reset. When 0, the counter increments to the value controlled by
+ the BLWC_SUBRATE_INIT and BLWC_SUBRATE_FINAL registers. */
+ uint64_t blwc_upv_rst : 1; /**< [ 19: 19](R/W) BLWC UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using BLWC adaptation subrate gearshifting.
+ When enabled, the gearshifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t prevga_gn_rst : 2; /**< [ 21: 20](R/W) PREVGA_GN adaptation reset/mode setting.
+ 0x0 = Reset.
+ 0x1 = Run once adaptation.
+ 0x2 = Pause adaptation.
+ 0x3 = Run continuous adaptation. */
+ uint64_t prevga_gn_subrate_rst : 1; /**< [ 22: 22](R/W) PREVGA_GN subrate counter reset. The subrate counter controls the interval between LMS
+ updates.
+ When 1, the counter is reset. When 0, the counter increments to the value
+ controlled by GSERN()_LANE()_RX_21_BCFG[PREVGA_GN_SUBRATE_INIT] and
+ GSERN()_LANE()_RX_21_BCFG[PREVGA_GN_SUBRATE_FIN]. */
+ uint64_t prevga_gn_upv_rst : 1; /**< [ 23: 23](R/W) PREVGA_GN UPV count reset. Set to zero before running the receiver reset state
+ machine to bring the receiver up using PREVGA_GN adaptation subrate gear-shifting.
+ When enabled, the gear-shifting function can increment the current subrate
+ when the UPV count equals the current subrate (scaled). May be set to 1 if
+ gearshifting is not used. */
+ uint64_t gain_diff_max : 4; /**< [ 27: 24](R/W) Gain Difference Maximum Value. This value is used in the correlation function
+ for the Pre-VGA Gain and VGA Gain adaptation.
+ The gain difference maximum value is used to manage the adapation rates of these
+ two parameters (Pre-VGA Gain and VGA Gain). */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_7_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_7_bcfg bdk_gsernx_lanex_rx_7_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_7_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_7_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000cd0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_7_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) bdk_gsernx_lanex_rx_7_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) "GSERNX_LANEX_RX_7_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_7_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_7_bsts
+ *
+ * GSER Lane RX Base Status Register 7
+ * Configuration registers for LMS adaptation. Current Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_7_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_7_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ctlelte_deadband_now : 12; /**< [ 59: 48](RO/H) Current 12-bit integer value of CTLELTE adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is
+ asserted. */
+ uint64_t ctlez_deadband_now : 12; /**< [ 47: 36](RO/H) Current 12-bit integer value of CTLEZ adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is
+ deasserted. */
+ uint64_t ctle_deadband_now : 12; /**< [ 35: 24](RO/H) Current 12-bit integer value of CTLE adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is
+ deasserted. */
+ uint64_t dfe_deadband_now : 12; /**< [ 23: 12](RO/H) Current 12-bit integer value of Coeff Adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted. */
+ uint64_t vga_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of VGA adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS] is deasserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of VGA adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[VGA_ADAPT_STATUS] is deasserted. */
+ uint64_t dfe_deadband_now : 12; /**< [ 23: 12](RO/H) Current 12-bit integer value of Coeff Adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is deasserted. */
+ uint64_t ctle_deadband_now : 12; /**< [ 35: 24](RO/H) Current 12-bit integer value of CTLE adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[CTLE_ADAPT_STATUS] is
+ deasserted. */
+ uint64_t ctlez_deadband_now : 12; /**< [ 47: 36](RO/H) Current 12-bit integer value of CTLEZ adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[CTLEZ_ADAPT_STATUS] is
+ deasserted. */
+ uint64_t ctlelte_deadband_now : 12; /**< [ 59: 48](RO/H) Current 12-bit integer value of CTLELTE adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_6_BSTS[CTLELTE_ADAPT_STATUS] is
+ asserted. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_7_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_7_bsts bdk_gsernx_lanex_rx_7_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_7_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_7_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_7_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) bdk_gsernx_lanex_rx_7_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) "GSERNX_LANEX_RX_7_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_7_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_8_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 8
+ * Configuration registers for LMS adaptation
+ * Adaptation controls for Subrate parameters.
+ */
+union bdk_gsernx_lanex_rx_8_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_8_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t dfe_edgemode_ovrd : 1; /**< [ 49: 49](R/W) 0 = Selects non-transition bits for DFE adaptation.
+ 1 = Selects transition bits for DFE adaptation.
+
+ It applies the mode to the I, Q, and X paths.
+ GSERN()_LANE()_EYE_CTL_2[CAPTURE_EDGEMODE] sets the E path. */
+ uint64_t dfe_edgemode_ovrd_en : 1; /**< [ 48: 48](R/W) 0 = DFE state machine controls DFE edge mode select.
+ Currently, the DFE FSM will time interleave between both
+ edge modes (i.e. 50% non-transition, 50% transition).
+
+ 1 = [DFE_EDGEMODE_OVRD] controls DFE edge mode select. */
+ uint64_t reserved_35_47 : 13;
+ uint64_t subrate_scale : 3; /**< [ 34: 32](R/W) Subrate now counter scaling value for compare against Up Vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the starting value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set [SUBRATE_INIT] = [SUBRATE_FINAL] if subrate gearshifting is not
+ enabled. */
+ uint64_t subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the final value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set [SUBRATE_INIT] = [SUBRATE_FINAL] if subrate gearshifting is not
+ enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t subrate_final : 16; /**< [ 15: 0](R/W) Subrate counter final value. Sets the final value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set [SUBRATE_INIT] = [SUBRATE_FINAL] if subrate gearshifting is not
+ enabled. */
+ uint64_t subrate_init : 16; /**< [ 31: 16](R/W) Subrate counter initial value. Sets the starting value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set [SUBRATE_INIT] = [SUBRATE_FINAL] if subrate gearshifting is not
+ enabled. */
+ uint64_t subrate_scale : 3; /**< [ 34: 32](R/W) Subrate now counter scaling value for compare against Up Vote counter.
+ 0x0 = 1/32.
+ 0x1 = 1/16.
+ 0x2 = 3/32.
+ 0x3 = 1/8.
+ 0x4 = 3/16.
+ 0x5 = 1/4.
+ 0x6 = 3/8.
+ 0x7 = 1/2. */
+ uint64_t reserved_35_47 : 13;
+ uint64_t dfe_edgemode_ovrd_en : 1; /**< [ 48: 48](R/W) 0 = DFE state machine controls DFE edge mode select.
+ Currently, the DFE FSM will time interleave between both
+ edge modes (i.e. 50% non-transition, 50% transition).
+
+ 1 = [DFE_EDGEMODE_OVRD] controls DFE edge mode select. */
+ uint64_t dfe_edgemode_ovrd : 1; /**< [ 49: 49](R/W) 0 = Selects non-transition bits for DFE adaptation.
+ 1 = Selects transition bits for DFE adaptation.
+
+ It applies the mode to the I, Q, and X paths.
+ GSERN()_LANE()_EYE_CTL_2[CAPTURE_EDGEMODE] sets the E path. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_8_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_8_bcfg bdk_gsernx_lanex_rx_8_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_8_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_8_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000ce0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_8_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) bdk_gsernx_lanex_rx_8_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) "GSERNX_LANEX_RX_8_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_8_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_8_bsts
+ *
+ * GSER Lane RX Base Status Register 8
+ * Status registers for AFEOS LMS adaptation. Current AFEOS Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_8_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_8_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t afeos_subrate_now : 16; /**< [ 63: 48](RO/H) AFEOS subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t afeos_upv_count : 16; /**< [ 43: 28](RO/H) AFE up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t afeos_adapt_status : 1; /**< [ 27: 27](RO/H) AFEOS adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t afeos_adapt_count : 15; /**< [ 26: 12](RO/H) AFEOS adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t afeos_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of AFEOS adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t afeos_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of AFEOS adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t afeos_adapt_count : 15; /**< [ 26: 12](RO/H) AFEOS adaptation timer current count value. 15-bit field, maximum value 0x7FFF.
+ Only valid when GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t afeos_adapt_status : 1; /**< [ 27: 27](RO/H) AFEOS adaptation status. When 0, training is inactive. When 1, training is active. */
+ uint64_t afeos_upv_count : 16; /**< [ 43: 28](RO/H) AFE up-vote counter value. Only valid when
+ GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t afeos_subrate_now : 16; /**< [ 63: 48](RO/H) AFEOS subrate_now counter value. Only valid when
+ GSERN()_LANE()_RX_8_BSTS[AFEOS_ADAPT_STATUS] is clear. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_8_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_8_bsts bdk_gsernx_lanex_rx_8_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_8_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_8_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_8_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) bdk_gsernx_lanex_rx_8_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) "GSERNX_LANEX_RX_8_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_8_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_9_bcfg
+ *
+ * GSER Lane RX Base Configuration Register 9
+ * Configuration registers for LMS adaptation. Deadband settings for adaptation.
+ */
+union bdk_gsernx_lanex_rx_9_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_9_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ctlelte_deadband : 12; /**< [ 59: 48](R/W) CTLELTE adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t ctlez_deadband : 12; /**< [ 47: 36](R/W) CTLEZ adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t ctle_deadband : 12; /**< [ 35: 24](R/W) CTLE adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t dfe_deadband : 12; /**< [ 23: 12](R/W) Coeff adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t vga_deadband : 12; /**< [ 11: 0](R/W) VGA adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+#else /* Word 0 - Little Endian */
+ uint64_t vga_deadband : 12; /**< [ 11: 0](R/W) VGA adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t dfe_deadband : 12; /**< [ 23: 12](R/W) Coeff adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t ctle_deadband : 12; /**< [ 35: 24](R/W) CTLE adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t ctlez_deadband : 12; /**< [ 47: 36](R/W) CTLEZ adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t ctlelte_deadband : 12; /**< [ 59: 48](R/W) CTLELTE adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_9_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_9_bcfg bdk_gsernx_lanex_rx_9_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_9_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_9_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000cf0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_9_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) bdk_gsernx_lanex_rx_9_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) "GSERNX_LANEX_RX_9_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_9_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_9_bsts
+ *
+ * GSER Lane RX Base Status Register 9
+ * Status registers for DFE LMS adaptation.
+ */
+union bdk_gsernx_lanex_rx_9_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_9_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_12_63 : 52;
+ uint64_t dfe_c1_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of Coeff adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_c1_deadband_now : 12; /**< [ 11: 0](RO/H) Current 12-bit integer value of Coeff adaptation deadband
+ setting. Note that the 8 fraction bits of the accumulator are not
+ reported. Only valid when GSERN()_LANE()_RX_5_BSTS[DFE_ADAPT_STATUS] is clear. */
+ uint64_t reserved_12_63 : 52;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_9_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_9_bsts bdk_gsernx_lanex_rx_9_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_9_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_9_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900016e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_9_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) bdk_gsernx_lanex_rx_9_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) "GSERNX_LANEX_RX_9_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_9_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_idle_cal_cfg
+ *
+ * GSER Lane RX Idle Offset Dynamic ReCalibration Control Register
+ * Idle dynamic recalibration FSM control register. Used to configure the duration,
+ * frequency, and modes for the dynamic recalibration of the idle offset. Also,
+ * allows for enable/disable of this feature.
+ */
+union bdk_gsernx_lanex_rx_idle_cal_cfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_idle_cal_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t idle_recal_disable : 1; /**< [ 63: 63](R/W) Single bit for enabling or disability the recalibration if idle offset. (This
+ bit does not affect the initial calibration of the idle offset).
+ 0 = Allow idle recalibration to run.
+ 1 = Disable dynamic recalibration of the idle offset. */
+ uint64_t idle_recal_oob_mode_disable : 1;/**< [ 62: 62](R/W) Single bit for enabling or disability the dynamic recalibration OOB delay feature.
+ This feature allows us to push out any idle offset recalibration when any OOB
+ activity has been detected on the idle signal.
+ 0 = Allow idle recalibration to detect OOB transactions and delay recalibration
+ 1 = Disable OOB transaction detection and do NOT delay recalibration. */
+ uint64_t idle_oob_adder_counter_clear : 1;/**< [ 61: 61](R/W) This bit one set to high, forces the counter counting the number of OOB caused
+ dealys to 8'h00. This is a static clear signal and has to be asserted to enable
+ the counter to resume counting. The count is in terms of the number of
+ RECALIBRATION_OOB_COUNT_ADDER increments.
+ 0 = Allow [OOB_DELAY_ADDER_COUNT] to increment.
+ 1 = Forces [OOB_DELAY_ADDER_COUNT] to 0x0.
+
+ Internal:
+ FIXME no such field RECALIBRATION_OOB_COUNT_ADDER then remove above exempt attribute. */
+ uint64_t reserved_40_60 : 21;
+ uint64_t max_oob_adder_count : 8; /**< [ 39: 32](R/W) Maximum number of OOB forced pushouts of the idle recalibrations allowed. If the
+ number of pushouts matches this number, the the idle offset is forced to recalibrate
+ regardless of the state of the link. */
+ uint64_t oob_delay_adder_count : 32; /**< [ 31: 0](R/W) Number of svc_clk ticks allowed to delay the idle recalibration. Default is equal to
+ 1 second based on a 10 ns service clock cycle time. */
+#else /* Word 0 - Little Endian */
+ uint64_t oob_delay_adder_count : 32; /**< [ 31: 0](R/W) Number of svc_clk ticks allowed to delay the idle recalibration. Default is equal to
+ 1 second based on a 10 ns service clock cycle time. */
+ uint64_t max_oob_adder_count : 8; /**< [ 39: 32](R/W) Maximum number of OOB forced pushouts of the idle recalibrations allowed. If the
+ number of pushouts matches this number, the the idle offset is forced to recalibrate
+ regardless of the state of the link. */
+ uint64_t reserved_40_60 : 21;
+ uint64_t idle_oob_adder_counter_clear : 1;/**< [ 61: 61](R/W) This bit one set to high, forces the counter counting the number of OOB caused
+ dealys to 8'h00. This is a static clear signal and has to be asserted to enable
+ the counter to resume counting. The count is in terms of the number of
+ RECALIBRATION_OOB_COUNT_ADDER increments.
+ 0 = Allow [OOB_DELAY_ADDER_COUNT] to increment.
+ 1 = Forces [OOB_DELAY_ADDER_COUNT] to 0x0.
+
+ Internal:
+ FIXME no such field RECALIBRATION_OOB_COUNT_ADDER then remove above exempt attribute. */
+ uint64_t idle_recal_oob_mode_disable : 1;/**< [ 62: 62](R/W) Single bit for enabling or disability the dynamic recalibration OOB delay feature.
+ This feature allows us to push out any idle offset recalibration when any OOB
+ activity has been detected on the idle signal.
+ 0 = Allow idle recalibration to detect OOB transactions and delay recalibration
+ 1 = Disable OOB transaction detection and do NOT delay recalibration. */
+ uint64_t idle_recal_disable : 1; /**< [ 63: 63](R/W) Single bit for enabling or disability the recalibration if idle offset. (This
+ bit does not affect the initial calibration of the idle offset).
+ 0 = Allow idle recalibration to run.
+ 1 = Disable dynamic recalibration of the idle offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_idle_cal_cfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_idle_cal_cfg bdk_gsernx_lanex_rx_idle_cal_cfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001530ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_IDLE_CAL_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) bdk_gsernx_lanex_rx_idle_cal_cfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) "GSERNX_LANEX_RX_IDLE_CAL_CFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_IDLE_CAL_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_idle_recal_cnt
+ *
+ * GSER Lane RX Idle Duration Count Before ReCalibration Register
+ * Count used to specify the duration of time between idle offset recalibrations.
+ */
+union bdk_gsernx_lanex_rx_idle_recal_cnt
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_idle_recal_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t idle_recal_duration_count : 48;/**< [ 47: 0](R/W) Number of svc_clk ticks to specify the delay between idle recalibration
+ triggers. Default is equal to
+ 1 min based on a 10ns svc_clk cycle time. */
+#else /* Word 0 - Little Endian */
+ uint64_t idle_recal_duration_count : 48;/**< [ 47: 0](R/W) Number of svc_clk ticks to specify the delay between idle recalibration
+ triggers. Default is equal to
+ 1 min based on a 10ns svc_clk cycle time. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_idle_recal_cnt_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_idle_recal_cnt bdk_gsernx_lanex_rx_idle_recal_cnt_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001540ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_IDLE_RECAL_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) bdk_gsernx_lanex_rx_idle_recal_cnt_t
+#define bustype_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) "GSERNX_LANEX_RX_IDLE_RECAL_CNT"
+#define device_bar_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_IDLE_RECAL_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_idledet_1_bcfg
+ *
+ * GSER Lane RX Idle Detection Filter Control Register 1
+ * Parameters controlling the digital filter of the analog receiver's raw idle
+ * signal. Setting all fields to 1, i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_rx_idledet_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_idledet_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reset_filter : 1; /**< [ 63: 63](R/W) Reset for the digital filter of the analog receiver's raw idle signal. Set the
+ other fields in this register as desired before releasing [RESET_FILTER]. Note
+ that while the filter is in reset, the filter output will be high, indicating
+ idle.
+ 0 = Allow filter to run.
+ 1 = Hold filter in reset. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reset_filter : 1; /**< [ 63: 63](R/W) Reset for the digital filter of the analog receiver's raw idle signal. Set the
+ other fields in this register as desired before releasing [RESET_FILTER]. Note
+ that while the filter is in reset, the filter output will be high, indicating
+ idle.
+ 0 = Allow filter to run.
+ 1 = Hold filter in reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_idledet_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_idledet_1_bcfg bdk_gsernx_lanex_rx_idledet_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001100ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_IDLEDET_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) bdk_gsernx_lanex_rx_idledet_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) "GSERNX_LANEX_RX_IDLEDET_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_IDLEDET_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_idledet_2_bcfg
+ *
+ * GSER Lane RX Idle Detection Filter Control Register 2
+ * Parameters controlling the digital filter of the analog receiver's raw idle
+ * signal. Setting all fields to 1, i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_rx_idledet_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_idledet_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t frc_en : 1; /**< [ 55: 55](R/W) Force enable.
+ 0 = Use the filter output based on the input from the analog idle detector.
+ 1 = Force the output of the digital idle filter to the value specified by
+ [FRC_VAL]. */
+ uint64_t frc_val : 1; /**< [ 54: 54](R/W) When [FRC_EN] is set to 1, this will be the value forced at the output of the
+ digital idle filter. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 1. */
+ uint64_t frc_val : 1; /**< [ 54: 54](R/W) When [FRC_EN] is set to 1, this will be the value forced at the output of the
+ digital idle filter. */
+ uint64_t frc_en : 1; /**< [ 55: 55](R/W) Force enable.
+ 0 = Use the filter output based on the input from the analog idle detector.
+ 1 = Force the output of the digital idle filter to the value specified by
+ [FRC_VAL]. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_idledet_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_idledet_2_bcfg bdk_gsernx_lanex_rx_idledet_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001110ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_IDLEDET_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) bdk_gsernx_lanex_rx_idledet_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) "GSERNX_LANEX_RX_IDLEDET_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_IDLEDET_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_idledet_bsts
+ *
+ * GSER Lane RX Base Idle Status Register
+ * Status register for receiver idle detection status.
+ */
+union bdk_gsernx_lanex_rx_idledet_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_idledet_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t idle : 1; /**< [ 0: 0](RO/H) One indicates that the receiver idle detection circuit has detected no input
+ data stream. Valid results can be expected anytime after the custom receiver
+ power-up and reset-exit sequence is complete. This is the output of the digital
+ idle detection filter. */
+#else /* Word 0 - Little Endian */
+ uint64_t idle : 1; /**< [ 0: 0](RO/H) One indicates that the receiver idle detection circuit has detected no input
+ data stream. Valid results can be expected anytime after the custom receiver
+ power-up and reset-exit sequence is complete. This is the output of the digital
+ idle detection filter. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_idledet_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_idledet_bsts bdk_gsernx_lanex_rx_idledet_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001120ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_IDLEDET_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) bdk_gsernx_lanex_rx_idledet_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) "GSERNX_LANEX_RX_IDLEDET_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_IDLEDET_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_0_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Value Settings Register 0
+ * ir25_trim override settings are in groups of 4 bits. These only take
+ * effect when the corresponding enable bit(s) are set.
+ */
+union bdk_gsernx_lanex_rx_itrim_0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trim15_ovrd : 4; /**< [ 63: 60](R/W) Override setting for bits 87..84 of 180b ir25_trim. */
+ uint64_t trim14_ovrd : 4; /**< [ 59: 56](R/W) Override setting for bits 83..80 of 180b ir25_trim. */
+ uint64_t trim13_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 79..76 of 180b ir25_trim. */
+ uint64_t trim12_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 75..72 of 180b ir25_trim. */
+ uint64_t trim11_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 71..68 of 180b ir25_trim. */
+ uint64_t trim10_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 67..64 of 180b ir25_trim. */
+ uint64_t trim9_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 63..60 of 180b ir25_trim. */
+ uint64_t trim8_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 59..56 of 180b ir25_trim. */
+ uint64_t trim7_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 55..52 of 180b ir25_trim. */
+ uint64_t trim6_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 51..48 of 180b ir25_trim. */
+ uint64_t trim5_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 47..44 of 180b ir25_trim. */
+ uint64_t trim4_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 43..40 of 180b ir25_trim. */
+ uint64_t trim3_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 39..36 of 180b ir25_trim. */
+ uint64_t trim2_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 35..32 of 180b ir25_trim. */
+ uint64_t trim1_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 31..28 of 180b ir25_trim. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t trim1_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 31..28 of 180b ir25_trim. */
+ uint64_t trim2_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 35..32 of 180b ir25_trim. */
+ uint64_t trim3_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 39..36 of 180b ir25_trim. */
+ uint64_t trim4_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 43..40 of 180b ir25_trim. */
+ uint64_t trim5_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 47..44 of 180b ir25_trim. */
+ uint64_t trim6_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 51..48 of 180b ir25_trim. */
+ uint64_t trim7_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 55..52 of 180b ir25_trim. */
+ uint64_t trim8_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 59..56 of 180b ir25_trim. */
+ uint64_t trim9_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 63..60 of 180b ir25_trim. */
+ uint64_t trim10_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 67..64 of 180b ir25_trim. */
+ uint64_t trim11_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 71..68 of 180b ir25_trim. */
+ uint64_t trim12_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 75..72 of 180b ir25_trim. */
+ uint64_t trim13_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 79..76 of 180b ir25_trim. */
+ uint64_t trim14_ovrd : 4; /**< [ 59: 56](R/W) Override setting for bits 83..80 of 180b ir25_trim. */
+ uint64_t trim15_ovrd : 4; /**< [ 63: 60](R/W) Override setting for bits 87..84 of 180b ir25_trim. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_0_bcfg bdk_gsernx_lanex_rx_itrim_0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001a80ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_0_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_0_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_0_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_0_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_0_bsts
+ *
+ * GSER Lane Receiver Ir25 Trim Settings Register 0
+ * These are the ir25_trim settings in use. ir25_trim settings are in groups of 4 bits.
+ */
+union bdk_gsernx_lanex_rx_itrim_0_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_0_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trim15 : 4; /**< [ 63: 60](RO/H) Setting for bits 87..84 of 180b ir25_trim. */
+ uint64_t trim14 : 4; /**< [ 59: 56](RO/H) Setting for bits 83..80 of 180b ir25_trim. */
+ uint64_t trim13 : 4; /**< [ 55: 52](RO/H) Setting for bits 79..76 of 180b ir25_trim. */
+ uint64_t trim12 : 4; /**< [ 51: 48](RO/H) Setting for bits 75..72 of 180b ir25_trim. */
+ uint64_t trim11 : 4; /**< [ 47: 44](RO/H) Setting for bits 71..68 of 180b ir25_trim. */
+ uint64_t trim10 : 4; /**< [ 43: 40](RO/H) Setting for bits 67..64 of 180b ir25_trim. */
+ uint64_t trim9 : 4; /**< [ 39: 36](RO/H) Setting for bits 63..60 of 180b ir25_trim. */
+ uint64_t trim8 : 4; /**< [ 35: 32](RO/H) Setting for bits 59..56 of 180b ir25_trim. */
+ uint64_t trim7 : 4; /**< [ 31: 28](RO/H) Setting for bits 55..52 of 180b ir25_trim. */
+ uint64_t trim6 : 4; /**< [ 27: 24](RO/H) Setting for bits 51..48 of 180b ir25_trim. */
+ uint64_t trim5 : 4; /**< [ 23: 20](RO/H) Setting for bits 47..44 of 180b ir25_trim. */
+ uint64_t trim4 : 4; /**< [ 19: 16](RO/H) Setting for bits 43..40 of 180b ir25_trim. */
+ uint64_t trim3 : 4; /**< [ 15: 12](RO/H) Setting for bits 39..36 of 180b ir25_trim. */
+ uint64_t trim2 : 4; /**< [ 11: 8](RO/H) Setting for bits 35..32 of 180b ir25_trim. */
+ uint64_t trim1 : 4; /**< [ 7: 4](RO/H) Setting for bits 31..28 of 180b ir25_trim. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t trim1 : 4; /**< [ 7: 4](RO/H) Setting for bits 31..28 of 180b ir25_trim. */
+ uint64_t trim2 : 4; /**< [ 11: 8](RO/H) Setting for bits 35..32 of 180b ir25_trim. */
+ uint64_t trim3 : 4; /**< [ 15: 12](RO/H) Setting for bits 39..36 of 180b ir25_trim. */
+ uint64_t trim4 : 4; /**< [ 19: 16](RO/H) Setting for bits 43..40 of 180b ir25_trim. */
+ uint64_t trim5 : 4; /**< [ 23: 20](RO/H) Setting for bits 47..44 of 180b ir25_trim. */
+ uint64_t trim6 : 4; /**< [ 27: 24](RO/H) Setting for bits 51..48 of 180b ir25_trim. */
+ uint64_t trim7 : 4; /**< [ 31: 28](RO/H) Setting for bits 55..52 of 180b ir25_trim. */
+ uint64_t trim8 : 4; /**< [ 35: 32](RO/H) Setting for bits 59..56 of 180b ir25_trim. */
+ uint64_t trim9 : 4; /**< [ 39: 36](RO/H) Setting for bits 63..60 of 180b ir25_trim. */
+ uint64_t trim10 : 4; /**< [ 43: 40](RO/H) Setting for bits 67..64 of 180b ir25_trim. */
+ uint64_t trim11 : 4; /**< [ 47: 44](RO/H) Setting for bits 71..68 of 180b ir25_trim. */
+ uint64_t trim12 : 4; /**< [ 51: 48](RO/H) Setting for bits 75..72 of 180b ir25_trim. */
+ uint64_t trim13 : 4; /**< [ 55: 52](RO/H) Setting for bits 79..76 of 180b ir25_trim. */
+ uint64_t trim14 : 4; /**< [ 59: 56](RO/H) Setting for bits 83..80 of 180b ir25_trim. */
+ uint64_t trim15 : 4; /**< [ 63: 60](RO/H) Setting for bits 87..84 of 180b ir25_trim. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_0_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_0_bsts bdk_gsernx_lanex_rx_itrim_0_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001bd0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_0_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) bdk_gsernx_lanex_rx_itrim_0_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) "GSERNX_LANEX_RX_ITRIM_0_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_0_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_1_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Value Settings Register 1
+ * ir25_trim override settings are in groups of 4 bits. These only take
+ * effect when the corresponding enable bit(s) are set.
+ */
+union bdk_gsernx_lanex_rx_itrim_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trim31_ovrd : 4; /**< [ 63: 60](R/W) Override setting for bits 179..176 of 180b ir25_trim. */
+ uint64_t trim30_ovrd : 4; /**< [ 59: 56](R/W) Override setting for bits 175..172 of 180b ir25_trim. */
+ uint64_t trim29_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 171..168 of 180b ir25_trim. */
+ uint64_t trim28_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 167..164 of 180b ir25_trim. */
+ uint64_t trim27_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 163..160 of 180b ir25_trim. */
+ uint64_t trim26_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 159..156 of 180b ir25_trim. */
+ uint64_t trim25_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 155..152 of 180b ir25_trim. */
+ uint64_t trim24_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 151..148 of 180b ir25_trim. */
+ uint64_t trim23_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 147..144 of 180b ir25_trim. */
+ uint64_t trim22_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 143..140 of 180b ir25_trim. */
+ uint64_t trim21_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 139..136 of 180b ir25_trim. */
+ uint64_t trim20_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 135..132 of 180b ir25_trim. */
+ uint64_t trim19_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 131..128 of 180b ir25_trim. */
+ uint64_t trim18_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 127..124 of 180b ir25_trim. */
+ uint64_t trim17_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 123..120 of 180b ir25_trim. */
+ uint64_t trim16_ovrd : 4; /**< [ 3: 0](R/W) Override setting for bits 119..116 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim16_ovrd : 4; /**< [ 3: 0](R/W) Override setting for bits 119..116 of 180b ir25_trim. */
+ uint64_t trim17_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 123..120 of 180b ir25_trim. */
+ uint64_t trim18_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 127..124 of 180b ir25_trim. */
+ uint64_t trim19_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 131..128 of 180b ir25_trim. */
+ uint64_t trim20_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 135..132 of 180b ir25_trim. */
+ uint64_t trim21_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 139..136 of 180b ir25_trim. */
+ uint64_t trim22_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 143..140 of 180b ir25_trim. */
+ uint64_t trim23_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 147..144 of 180b ir25_trim. */
+ uint64_t trim24_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 151..148 of 180b ir25_trim. */
+ uint64_t trim25_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 155..152 of 180b ir25_trim. */
+ uint64_t trim26_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 159..156 of 180b ir25_trim. */
+ uint64_t trim27_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 163..160 of 180b ir25_trim. */
+ uint64_t trim28_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 167..164 of 180b ir25_trim. */
+ uint64_t trim29_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 171..168 of 180b ir25_trim. */
+ uint64_t trim30_ovrd : 4; /**< [ 59: 56](R/W) Override setting for bits 175..172 of 180b ir25_trim. */
+ uint64_t trim31_ovrd : 4; /**< [ 63: 60](R/W) Override setting for bits 179..176 of 180b ir25_trim. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_1_bcfg bdk_gsernx_lanex_rx_itrim_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001a90ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_1_bsts
+ *
+ * GSER Lane Receiver Ir25 Trim Settings Register 1
+ * These are the ir25_trim settings in use. ir25_trim settings are in groups of 4 bits.
+ */
+union bdk_gsernx_lanex_rx_itrim_1_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_1_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trim31 : 4; /**< [ 63: 60](RO/H) Setting for bits 179..176 of 180b ir25_trim. */
+ uint64_t trim30 : 4; /**< [ 59: 56](RO/H) Setting for bits 175..172 of 180b ir25_trim. */
+ uint64_t trim29 : 4; /**< [ 55: 52](RO/H) Setting for bits 171..168 of 180b ir25_trim. */
+ uint64_t trim28 : 4; /**< [ 51: 48](RO/H) Setting for bits 167..164 of 180b ir25_trim. */
+ uint64_t trim27 : 4; /**< [ 47: 44](RO/H) Setting for bits 163..160 of 180b ir25_trim. */
+ uint64_t trim26 : 4; /**< [ 43: 40](RO/H) Setting for bits 159..156 of 180b ir25_trim. */
+ uint64_t trim25 : 4; /**< [ 39: 36](RO/H) Setting for bits 155..152 of 180b ir25_trim. */
+ uint64_t trim24 : 4; /**< [ 35: 32](RO/H) Setting for bits 151..148 of 180b ir25_trim. */
+ uint64_t trim23 : 4; /**< [ 31: 28](RO/H) Setting for bits 147..144 of 180b ir25_trim. */
+ uint64_t trim22 : 4; /**< [ 27: 24](RO/H) Setting for bits 143..140 of 180b ir25_trim. */
+ uint64_t trim21 : 4; /**< [ 23: 20](RO/H) Setting for bits 139..136 of 180b ir25_trim. */
+ uint64_t trim20 : 4; /**< [ 19: 16](RO/H) Setting for bits 135..132 of 180b ir25_trim. */
+ uint64_t trim19 : 4; /**< [ 15: 12](RO/H) Setting for bits 131..128 of 180b ir25_trim. */
+ uint64_t trim18 : 4; /**< [ 11: 8](RO/H) Setting for bits 127..124 of 180b ir25_trim. */
+ uint64_t trim17 : 4; /**< [ 7: 4](RO/H) Setting for bits 123..120 of 180b ir25_trim. */
+ uint64_t trim16 : 4; /**< [ 3: 0](RO/H) Setting for bits 119..116 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim16 : 4; /**< [ 3: 0](RO/H) Setting for bits 119..116 of 180b ir25_trim. */
+ uint64_t trim17 : 4; /**< [ 7: 4](RO/H) Setting for bits 123..120 of 180b ir25_trim. */
+ uint64_t trim18 : 4; /**< [ 11: 8](RO/H) Setting for bits 127..124 of 180b ir25_trim. */
+ uint64_t trim19 : 4; /**< [ 15: 12](RO/H) Setting for bits 131..128 of 180b ir25_trim. */
+ uint64_t trim20 : 4; /**< [ 19: 16](RO/H) Setting for bits 135..132 of 180b ir25_trim. */
+ uint64_t trim21 : 4; /**< [ 23: 20](RO/H) Setting for bits 139..136 of 180b ir25_trim. */
+ uint64_t trim22 : 4; /**< [ 27: 24](RO/H) Setting for bits 143..140 of 180b ir25_trim. */
+ uint64_t trim23 : 4; /**< [ 31: 28](RO/H) Setting for bits 147..144 of 180b ir25_trim. */
+ uint64_t trim24 : 4; /**< [ 35: 32](RO/H) Setting for bits 151..148 of 180b ir25_trim. */
+ uint64_t trim25 : 4; /**< [ 39: 36](RO/H) Setting for bits 155..152 of 180b ir25_trim. */
+ uint64_t trim26 : 4; /**< [ 43: 40](RO/H) Setting for bits 159..156 of 180b ir25_trim. */
+ uint64_t trim27 : 4; /**< [ 47: 44](RO/H) Setting for bits 163..160 of 180b ir25_trim. */
+ uint64_t trim28 : 4; /**< [ 51: 48](RO/H) Setting for bits 167..164 of 180b ir25_trim. */
+ uint64_t trim29 : 4; /**< [ 55: 52](RO/H) Setting for bits 171..168 of 180b ir25_trim. */
+ uint64_t trim30 : 4; /**< [ 59: 56](RO/H) Setting for bits 175..172 of 180b ir25_trim. */
+ uint64_t trim31 : 4; /**< [ 63: 60](RO/H) Setting for bits 179..176 of 180b ir25_trim. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_1_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_1_bsts bdk_gsernx_lanex_rx_itrim_1_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001be0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_1_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) bdk_gsernx_lanex_rx_itrim_1_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) "GSERNX_LANEX_RX_ITRIM_1_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_1_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_2_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Value Settings Register 2
+ * ir25_trim override settings are in groups of 4 bits. These only take
+ * effect when the corresponding enable bit(s) are set.
+ */
+union bdk_gsernx_lanex_rx_itrim_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t trim45_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 27..24 of 180b ir25_trim. */
+ uint64_t trim44_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 115..112 of 180b ir25_trim. */
+ uint64_t trim43_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 23..20 of 180b ir25_trim. */
+ uint64_t trim42_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 111..108 of 180b ir25_trim. */
+ uint64_t trim41_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 19..16 of 180b ir25_trim. */
+ uint64_t trim40_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 107..104 of 180b ir25_trim. */
+ uint64_t trim39_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 15..12 of 180b ir25_trim. */
+ uint64_t trim38_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 103..100 of 180b ir25_trim. */
+ uint64_t trim37_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 11..8 of 180b ir25_trim. */
+ uint64_t trim36_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 99..96 of 180b ir25_trim. */
+ uint64_t trim35_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 7..4 of 180b ir25_trim. */
+ uint64_t trim34_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 95..92 of 180b ir25_trim. */
+ uint64_t trim33_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 3..0 of 180b ir25_trim. */
+ uint64_t trim32_ovrd : 4; /**< [ 3: 0](R/W) Override setting for bits 91..88 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim32_ovrd : 4; /**< [ 3: 0](R/W) Override setting for bits 91..88 of 180b ir25_trim. */
+ uint64_t trim33_ovrd : 4; /**< [ 7: 4](R/W) Override setting for bits 3..0 of 180b ir25_trim. */
+ uint64_t trim34_ovrd : 4; /**< [ 11: 8](R/W) Override setting for bits 95..92 of 180b ir25_trim. */
+ uint64_t trim35_ovrd : 4; /**< [ 15: 12](R/W) Override setting for bits 7..4 of 180b ir25_trim. */
+ uint64_t trim36_ovrd : 4; /**< [ 19: 16](R/W) Override setting for bits 99..96 of 180b ir25_trim. */
+ uint64_t trim37_ovrd : 4; /**< [ 23: 20](R/W) Override setting for bits 11..8 of 180b ir25_trim. */
+ uint64_t trim38_ovrd : 4; /**< [ 27: 24](R/W) Override setting for bits 103..100 of 180b ir25_trim. */
+ uint64_t trim39_ovrd : 4; /**< [ 31: 28](R/W) Override setting for bits 15..12 of 180b ir25_trim. */
+ uint64_t trim40_ovrd : 4; /**< [ 35: 32](R/W) Override setting for bits 107..104 of 180b ir25_trim. */
+ uint64_t trim41_ovrd : 4; /**< [ 39: 36](R/W) Override setting for bits 19..16 of 180b ir25_trim. */
+ uint64_t trim42_ovrd : 4; /**< [ 43: 40](R/W) Override setting for bits 111..108 of 180b ir25_trim. */
+ uint64_t trim43_ovrd : 4; /**< [ 47: 44](R/W) Override setting for bits 23..20 of 180b ir25_trim. */
+ uint64_t trim44_ovrd : 4; /**< [ 51: 48](R/W) Override setting for bits 115..112 of 180b ir25_trim. */
+ uint64_t trim45_ovrd : 4; /**< [ 55: 52](R/W) Override setting for bits 27..24 of 180b ir25_trim. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_2_bcfg bdk_gsernx_lanex_rx_itrim_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001aa0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_2_bsts
+ *
+ * GSER Lane Receiver Ir25 Trim Settings Register 2
+ * These are the ir25_trim settings in use. ir25_trim settings are in groups of 4 bits.
+ */
+union bdk_gsernx_lanex_rx_itrim_2_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_2_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_56_63 : 8;
+ uint64_t trim45 : 4; /**< [ 55: 52](RO/H) Setting for bits 27..24 of 180b ir25_trim. */
+ uint64_t trim44 : 4; /**< [ 51: 48](RO/H) Setting for bits 115..112 of 180b ir25_trim. */
+ uint64_t trim43 : 4; /**< [ 47: 44](RO/H) Setting for bits 23..20 of 180b ir25_trim. */
+ uint64_t trim42 : 4; /**< [ 43: 40](RO/H) Setting for bits 111..108 of 180b ir25_trim. */
+ uint64_t trim41 : 4; /**< [ 39: 36](RO/H) Setting for bits 19..16 of 180b ir25_trim. */
+ uint64_t trim40 : 4; /**< [ 35: 32](RO/H) Setting for bits 107..104 of 180b ir25_trim. */
+ uint64_t trim39 : 4; /**< [ 31: 28](RO/H) Setting for bits 15..12 of 180b ir25_trim. */
+ uint64_t trim38 : 4; /**< [ 27: 24](RO/H) Setting for bits 103..100 of 180b ir25_trim. */
+ uint64_t trim37 : 4; /**< [ 23: 20](RO/H) Setting for bits 11..8 of 180b ir25_trim. */
+ uint64_t trim36 : 4; /**< [ 19: 16](RO/H) Setting for bits 99..96 of 180b ir25_trim. */
+ uint64_t trim35 : 4; /**< [ 15: 12](RO/H) Setting for bits 7..4 of 180b ir25_trim. */
+ uint64_t trim34 : 4; /**< [ 11: 8](RO/H) Setting for bits 95..92 of 180b ir25_trim. */
+ uint64_t trim33 : 4; /**< [ 7: 4](RO/H) Setting for bits 3..0 of 180b ir25_trim. */
+ uint64_t trim32 : 4; /**< [ 3: 0](RO/H) Setting for bits 91..88 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim32 : 4; /**< [ 3: 0](RO/H) Setting for bits 91..88 of 180b ir25_trim. */
+ uint64_t trim33 : 4; /**< [ 7: 4](RO/H) Setting for bits 3..0 of 180b ir25_trim. */
+ uint64_t trim34 : 4; /**< [ 11: 8](RO/H) Setting for bits 95..92 of 180b ir25_trim. */
+ uint64_t trim35 : 4; /**< [ 15: 12](RO/H) Setting for bits 7..4 of 180b ir25_trim. */
+ uint64_t trim36 : 4; /**< [ 19: 16](RO/H) Setting for bits 99..96 of 180b ir25_trim. */
+ uint64_t trim37 : 4; /**< [ 23: 20](RO/H) Setting for bits 11..8 of 180b ir25_trim. */
+ uint64_t trim38 : 4; /**< [ 27: 24](RO/H) Setting for bits 103..100 of 180b ir25_trim. */
+ uint64_t trim39 : 4; /**< [ 31: 28](RO/H) Setting for bits 15..12 of 180b ir25_trim. */
+ uint64_t trim40 : 4; /**< [ 35: 32](RO/H) Setting for bits 107..104 of 180b ir25_trim. */
+ uint64_t trim41 : 4; /**< [ 39: 36](RO/H) Setting for bits 19..16 of 180b ir25_trim. */
+ uint64_t trim42 : 4; /**< [ 43: 40](RO/H) Setting for bits 111..108 of 180b ir25_trim. */
+ uint64_t trim43 : 4; /**< [ 47: 44](RO/H) Setting for bits 23..20 of 180b ir25_trim. */
+ uint64_t trim44 : 4; /**< [ 51: 48](RO/H) Setting for bits 115..112 of 180b ir25_trim. */
+ uint64_t trim45 : 4; /**< [ 55: 52](RO/H) Setting for bits 27..24 of 180b ir25_trim. */
+ uint64_t reserved_56_63 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_2_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_2_bsts bdk_gsernx_lanex_rx_itrim_2_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001bf0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_2_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) bdk_gsernx_lanex_rx_itrim_2_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) "GSERNX_LANEX_RX_ITRIM_2_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_2_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_3_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Enables Register 0
+ * Enables in this register allow the corresponding override value setting to take
+ * effect.
+ */
+union bdk_gsernx_lanex_rx_itrim_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t trim15_ovrd_en : 1; /**< [ 60: 60](R/W) Override enable for bits 87..84 of 180b ir25_trim. */
+ uint64_t reserved_57_59 : 3;
+ uint64_t trim14_ovrd_en : 1; /**< [ 56: 56](R/W) Override enable for bits 83..80 of 180b ir25_trim. */
+ uint64_t reserved_53_55 : 3;
+ uint64_t trim13_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 79..76 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim12_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 75..72 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim11_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 71..68 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim10_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 67..64 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim9_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 63..60 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim8_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 59..56 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim7_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 55..52 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim6_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 51..48 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim5_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 47..44 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim4_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 43..40 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim3_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 39..36 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim2_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 35..32 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim1_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 31..28 of 180b ir25_trim. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t trim1_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 31..28 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim2_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 35..32 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim3_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 39..36 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim4_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 43..40 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim5_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 47..44 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim6_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 51..48 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim7_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 55..52 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim8_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 59..56 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim9_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 63..60 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim10_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 67..64 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim11_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 71..68 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim12_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 75..72 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim13_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 79..76 of 180b ir25_trim. */
+ uint64_t reserved_53_55 : 3;
+ uint64_t trim14_ovrd_en : 1; /**< [ 56: 56](R/W) Override enable for bits 83..80 of 180b ir25_trim. */
+ uint64_t reserved_57_59 : 3;
+ uint64_t trim15_ovrd_en : 1; /**< [ 60: 60](R/W) Override enable for bits 87..84 of 180b ir25_trim. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_3_bcfg bdk_gsernx_lanex_rx_itrim_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001ab0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_4_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Enables Register 1
+ * ir25_trim override settings are in groups of 4 bits. These only take
+ * effect when the corresponding enable bit(s) are set.
+ */
+union bdk_gsernx_lanex_rx_itrim_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t trim31_ovrd_en : 1; /**< [ 60: 60](R/W) Override enable for bits 179..176 of 180b ir25_trim. */
+ uint64_t reserved_57_59 : 3;
+ uint64_t trim30_ovrd_en : 1; /**< [ 56: 56](R/W) Override enable for bits 175..172 of 180b ir25_trim. */
+ uint64_t reserved_53_55 : 3;
+ uint64_t trim29_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 171..168 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim28_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 167..164 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim27_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 163..160 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim26_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 159..156 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim25_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 155..152 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim24_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 151..148 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim23_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 147..144 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim22_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 143..140 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim21_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 139..136 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim20_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 135..132 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim19_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 131..128 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim18_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 127..124 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim17_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 123..120 of 180b ir25_trim. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t trim16_ovrd_en : 1; /**< [ 0: 0](R/W) Override enable for bits 119..116 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim16_ovrd_en : 1; /**< [ 0: 0](R/W) Override enable for bits 119..116 of 180b ir25_trim. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t trim17_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 123..120 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim18_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 127..124 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim19_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 131..128 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim20_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 135..132 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim21_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 139..136 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim22_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 143..140 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim23_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 147..144 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim24_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 151..148 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim25_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 155..152 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim26_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 159..156 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim27_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 163..160 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim28_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 167..164 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim29_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 171..168 of 180b ir25_trim. */
+ uint64_t reserved_53_55 : 3;
+ uint64_t trim30_ovrd_en : 1; /**< [ 56: 56](R/W) Override enable for bits 175..172 of 180b ir25_trim. */
+ uint64_t reserved_57_59 : 3;
+ uint64_t trim31_ovrd_en : 1; /**< [ 60: 60](R/W) Override enable for bits 179..176 of 180b ir25_trim. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_4_bcfg bdk_gsernx_lanex_rx_itrim_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001ac0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_itrim_5_bcfg
+ *
+ * GSER Lane Receiver Ir25 Trim Override Enables Register 2
+ * ir25_trim override settings are in groups of 4 bits. These only take
+ * effect when the corresponding enable bit(s) are set.
+ */
+union bdk_gsernx_lanex_rx_itrim_5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_itrim_5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t trim45_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 27..24 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim44_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 115..112 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim43_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 23..20 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim42_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 111..108 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim41_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 19..16 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim40_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 107..104 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim39_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 15..12 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim38_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 103..100 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim37_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 11..8 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim36_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 99..96 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim35_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 7..4 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim34_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 95..92 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim33_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 3..0 of 180b ir25_trim. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t trim32_ovrd_en : 1; /**< [ 0: 0](R/W) Override enable for bits 91..88 of 180b ir25_trim. */
+#else /* Word 0 - Little Endian */
+ uint64_t trim32_ovrd_en : 1; /**< [ 0: 0](R/W) Override enable for bits 91..88 of 180b ir25_trim. */
+ uint64_t reserved_1_3 : 3;
+ uint64_t trim33_ovrd_en : 1; /**< [ 4: 4](R/W) Override enable for bits 3..0 of 180b ir25_trim. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t trim34_ovrd_en : 1; /**< [ 8: 8](R/W) Override enable for bits 95..92 of 180b ir25_trim. */
+ uint64_t reserved_9_11 : 3;
+ uint64_t trim35_ovrd_en : 1; /**< [ 12: 12](R/W) Override enable for bits 7..4 of 180b ir25_trim. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t trim36_ovrd_en : 1; /**< [ 16: 16](R/W) Override enable for bits 99..96 of 180b ir25_trim. */
+ uint64_t reserved_17_19 : 3;
+ uint64_t trim37_ovrd_en : 1; /**< [ 20: 20](R/W) Override enable for bits 11..8 of 180b ir25_trim. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t trim38_ovrd_en : 1; /**< [ 24: 24](R/W) Override enable for bits 103..100 of 180b ir25_trim. */
+ uint64_t reserved_25_27 : 3;
+ uint64_t trim39_ovrd_en : 1; /**< [ 28: 28](R/W) Override enable for bits 15..12 of 180b ir25_trim. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t trim40_ovrd_en : 1; /**< [ 32: 32](R/W) Override enable for bits 107..104 of 180b ir25_trim. */
+ uint64_t reserved_33_35 : 3;
+ uint64_t trim41_ovrd_en : 1; /**< [ 36: 36](R/W) Override enable for bits 19..16 of 180b ir25_trim. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t trim42_ovrd_en : 1; /**< [ 40: 40](R/W) Override enable for bits 111..108 of 180b ir25_trim. */
+ uint64_t reserved_41_43 : 3;
+ uint64_t trim43_ovrd_en : 1; /**< [ 44: 44](R/W) Override enable for bits 23..20 of 180b ir25_trim. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t trim44_ovrd_en : 1; /**< [ 48: 48](R/W) Override enable for bits 115..112 of 180b ir25_trim. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t trim45_ovrd_en : 1; /**< [ 52: 52](R/W) Override enable for bits 27..24 of 180b ir25_trim. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_itrim_5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_itrim_5_bcfg bdk_gsernx_lanex_rx_itrim_5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001ad0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ITRIM_5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) bdk_gsernx_lanex_rx_itrim_5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) "GSERNX_LANEX_RX_ITRIM_5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ITRIM_5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_margin_dbg_cnt
+ *
+ * GSER Lane RX Margining Debug Control Register
+ * CSR basec control of Phy initiated read/write operations to the PEM. This is a
+ * debug field that can be used to check the results of an RX Margining sequence.
+ * The expecation is that the PEM FSM will initiate the transactions and the results
+ * will be placed in MAC/PEM CSRs using the p2m_mesage_bus. However, ability to
+ * read/write these registers into the processor is not clear from Synopsys's MAC
+ * spec. As such, this feature was added to allow an RSL read/write of these registers.
+ * Protocal is Ready & Done based. A transaction is updated in the CSR registers and the
+ * Ready bit is set high. Once it is set high, the mbus_fsm will execute the transaction
+ * and assert the Done bit when done or when results are available in
+ * GSERN()_LANE()_RX_MARGIN_DBG_OBS.
+ */
+union bdk_gsernx_lanex_rx_margin_dbg_cnt
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_margin_dbg_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ready : 1; /**< [ 63: 63](R/W) Handshake bit to indicate there is a valid request from the RSL bus to transact
+ on the mesage bus. Setting this bit triggers the mbus_fsm to execute the
+ transaction. Once a transaction is done, this bit has to be cleared before
+ another transaction is issued.
+ 0 = No mbus transactions are outstanding.
+ 1 = An mbus transaction is outstanding. */
+ uint64_t write_commit : 1; /**< [ 62: 62](R/W) This bit will determin to the mbus transactor if the write operation is a
+ commited write or an uncommited write. When doing a read, this bit is a
+ don't care.
+ 0 = If executing a write, this write operation is not-commited type.
+ 1 = If executing a write, this write operation is a commited type. */
+ uint64_t read_writen : 1; /**< [ 61: 61](R/W) This bit indicates if we are doing a read or write operation.
+ 0 = Performing a write operation.
+ 1 = Performing a read operation. */
+ uint64_t reserved_20_60 : 41;
+ uint64_t address : 12; /**< [ 19: 8](R/W) The 12-bit field of address to be send to the MAC/PEM if we are peforming either
+ a read or write operation. */
+ uint64_t data : 8; /**< [ 7: 0](R/W) The 8-bit field of Data to be send to the MAC/PEM if we are peforming a write operation. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 8; /**< [ 7: 0](R/W) The 8-bit field of Data to be send to the MAC/PEM if we are peforming a write operation. */
+ uint64_t address : 12; /**< [ 19: 8](R/W) The 12-bit field of address to be send to the MAC/PEM if we are peforming either
+ a read or write operation. */
+ uint64_t reserved_20_60 : 41;
+ uint64_t read_writen : 1; /**< [ 61: 61](R/W) This bit indicates if we are doing a read or write operation.
+ 0 = Performing a write operation.
+ 1 = Performing a read operation. */
+ uint64_t write_commit : 1; /**< [ 62: 62](R/W) This bit will determin to the mbus transactor if the write operation is a
+ commited write or an uncommited write. When doing a read, this bit is a
+ don't care.
+ 0 = If executing a write, this write operation is not-commited type.
+ 1 = If executing a write, this write operation is a commited type. */
+ uint64_t ready : 1; /**< [ 63: 63](R/W) Handshake bit to indicate there is a valid request from the RSL bus to transact
+ on the mesage bus. Setting this bit triggers the mbus_fsm to execute the
+ transaction. Once a transaction is done, this bit has to be cleared before
+ another transaction is issued.
+ 0 = No mbus transactions are outstanding.
+ 1 = An mbus transaction is outstanding. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_margin_dbg_cnt_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_margin_dbg_cnt bdk_gsernx_lanex_rx_margin_dbg_cnt_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001220ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_MARGIN_DBG_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) bdk_gsernx_lanex_rx_margin_dbg_cnt_t
+#define bustype_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) "GSERNX_LANEX_RX_MARGIN_DBG_CNT"
+#define device_bar_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_MARGIN_DBG_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_margin_dbg_obs
+ *
+ * GSER Lane RX Margining Debug Result Register
+ * Observes the results of an mbus_messaging transaction. The results are expected to be
+ * valid only when the Done bit is asserted.
+ */
+union bdk_gsernx_lanex_rx_margin_dbg_obs
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_margin_dbg_obs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t done : 1; /**< [ 63: 63](RO/H) Done bit indicating that the outstanding transaction on the mbus
+ has finished and if there are results that are expected, they will
+ be presented to this register. The results are not sticky, so a copy
+ needs to be moved out of this register to another location before
+ de-asserting the READY bit in GSERN()_LANE()_RX_MARGIN_DBG_CNT.
+ De-assertign the READY bit will force this bit low again and remove
+ the data being presented to this CSR inputs. */
+ uint64_t reserved_20_62 : 43;
+ uint64_t address : 12; /**< [ 19: 8](RO/H) Observed Address a read was completed against or location of the write operation being executed. */
+ uint64_t data : 8; /**< [ 7: 0](RO/H) Observed Data read back from the MAC/PEM at the completion of the read operation */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 8; /**< [ 7: 0](RO/H) Observed Data read back from the MAC/PEM at the completion of the read operation */
+ uint64_t address : 12; /**< [ 19: 8](RO/H) Observed Address a read was completed against or location of the write operation being executed. */
+ uint64_t reserved_20_62 : 43;
+ uint64_t done : 1; /**< [ 63: 63](RO/H) Done bit indicating that the outstanding transaction on the mbus
+ has finished and if there are results that are expected, they will
+ be presented to this register. The results are not sticky, so a copy
+ needs to be moved out of this register to another location before
+ de-asserting the READY bit in GSERN()_LANE()_RX_MARGIN_DBG_CNT.
+ De-assertign the READY bit will force this bit low again and remove
+ the data being presented to this CSR inputs. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_margin_dbg_obs_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_margin_dbg_obs bdk_gsernx_lanex_rx_margin_dbg_obs_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001230ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_MARGIN_DBG_OBS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) bdk_gsernx_lanex_rx_margin_dbg_obs_t
+#define bustype_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) "GSERNX_LANEX_RX_MARGIN_DBG_OBS"
+#define device_bar_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_MARGIN_DBG_OBS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_margin_phy_cnt
+ *
+ * GSER Lane RX Margining Overrides of Phy MBUS margining bits Register
+ * Can override existing values generated by the RX Margining FSM. This feature will
+ * allow the RSL interface to provide its own values to the MAC/PEM Phy CSRs for the
+ * mbus interface. This is strictly a debug method for sending the mbus CSRs in the
+ * phy to the MAC/PEM in a predictable method.
+ */
+union bdk_gsernx_lanex_rx_margin_phy_cnt
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_margin_phy_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t override_margining_fsm : 1; /**< [ 63: 63](R/W) The bit that when asserted to 1'b1, will enable the values of this register to
+ replace the values generated by the RX Margining FSM. */
+ uint64_t sample_count_reset : 1; /**< [ 62: 62](R/W) Resets the sample count register for the RX Margining FSM. */
+ uint64_t error_count_reset : 1; /**< [ 61: 61](R/W) Resets the error count register for the RX Margining FSM. */
+ uint64_t margin_voltage_timing : 1; /**< [ 60: 60](R/W) Sets whitch type of margining to perfomr. 1'b0 for timing 1'b1 for voltage */
+ uint64_t start_margining : 1; /**< [ 59: 59](R/W) Enables margining FSM to operate. */
+ uint64_t margin_direction : 1; /**< [ 58: 58](R/W) Sets the direction of the margining.
+ For timing, a 1'b0 steps to the left a 1'b1 steps to the right.
+ For voltage, 1'b0 steps voltage up and 1'b1 steps voltage down. */
+ uint64_t margin_offset : 7; /**< [ 57: 51](R/W) Margining offset for the sample point. */
+ uint64_t reserved_48_50 : 3;
+ uint64_t sample_count_ovr : 40; /**< [ 47: 8](R/W) Margining sample count size. Default is 1K samples, but can be updated to any
+ value with in the 40-bit length. */
+ uint64_t elastic_buffer_depth : 8; /**< [ 7: 0](R/W) Sets the margining buffer depth. Feature is not used */
+#else /* Word 0 - Little Endian */
+ uint64_t elastic_buffer_depth : 8; /**< [ 7: 0](R/W) Sets the margining buffer depth. Feature is not used */
+ uint64_t sample_count_ovr : 40; /**< [ 47: 8](R/W) Margining sample count size. Default is 1K samples, but can be updated to any
+ value with in the 40-bit length. */
+ uint64_t reserved_48_50 : 3;
+ uint64_t margin_offset : 7; /**< [ 57: 51](R/W) Margining offset for the sample point. */
+ uint64_t margin_direction : 1; /**< [ 58: 58](R/W) Sets the direction of the margining.
+ For timing, a 1'b0 steps to the left a 1'b1 steps to the right.
+ For voltage, 1'b0 steps voltage up and 1'b1 steps voltage down. */
+ uint64_t start_margining : 1; /**< [ 59: 59](R/W) Enables margining FSM to operate. */
+ uint64_t margin_voltage_timing : 1; /**< [ 60: 60](R/W) Sets whitch type of margining to perfomr. 1'b0 for timing 1'b1 for voltage */
+ uint64_t error_count_reset : 1; /**< [ 61: 61](R/W) Resets the error count register for the RX Margining FSM. */
+ uint64_t sample_count_reset : 1; /**< [ 62: 62](R/W) Resets the sample count register for the RX Margining FSM. */
+ uint64_t override_margining_fsm : 1; /**< [ 63: 63](R/W) The bit that when asserted to 1'b1, will enable the values of this register to
+ replace the values generated by the RX Margining FSM. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_margin_phy_cnt_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_margin_phy_cnt bdk_gsernx_lanex_rx_margin_phy_cnt_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001330ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_MARGIN_PHY_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) bdk_gsernx_lanex_rx_margin_phy_cnt_t
+#define bustype_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) "GSERNX_LANEX_RX_MARGIN_PHY_CNT"
+#define device_bar_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_MARGIN_PHY_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_margin_phy_obs
+ *
+ * GSER Lane RX Margining Observe of Phy MBUS margining bits Register
+ * Observes the status of phy mbus CSRs. The results are expected to be changed by the
+ * margining FSM. This is strictly an observe path to the mbus CSRs in the phy.
+ */
+union bdk_gsernx_lanex_rx_margin_phy_obs
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_margin_phy_obs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t margin_nak : 1; /**< [ 63: 63](RO/H) Asserted when the margining setup is out of range for the margining hardware to
+ perform. */
+ uint64_t margin_status : 1; /**< [ 62: 62](RO/H) Indicates the status of the margining FSM. If asserted, then there is an open
+ Reciever Margining transaction being executed. */
+ uint64_t elastic_buffer_status : 1; /**< [ 61: 61](RO/H) Indicates the status of the elastic buffer. This feature is not supported and
+ will always return 0. */
+ uint64_t reserved_15_60 : 46;
+ uint64_t sample_count : 7; /**< [ 14: 8](RO/H) Observed Address a read was completed against or location of the write operation being executed. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t error_count : 6; /**< [ 5: 0](RO/H) Observed Data read back from the MAC/PEM at the completion of the read operation */
+#else /* Word 0 - Little Endian */
+ uint64_t error_count : 6; /**< [ 5: 0](RO/H) Observed Data read back from the MAC/PEM at the completion of the read operation */
+ uint64_t reserved_6_7 : 2;
+ uint64_t sample_count : 7; /**< [ 14: 8](RO/H) Observed Address a read was completed against or location of the write operation being executed. */
+ uint64_t reserved_15_60 : 46;
+ uint64_t elastic_buffer_status : 1; /**< [ 61: 61](RO/H) Indicates the status of the elastic buffer. This feature is not supported and
+ will always return 0. */
+ uint64_t margin_status : 1; /**< [ 62: 62](RO/H) Indicates the status of the margining FSM. If asserted, then there is an open
+ Reciever Margining transaction being executed. */
+ uint64_t margin_nak : 1; /**< [ 63: 63](RO/H) Asserted when the margining setup is out of range for the margining hardware to
+ perform. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_margin_phy_obs_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_margin_phy_obs bdk_gsernx_lanex_rx_margin_phy_obs_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001430ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_MARGIN_PHY_OBS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) bdk_gsernx_lanex_rx_margin_phy_obs_t
+#define bustype_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) "GSERNX_LANEX_RX_MARGIN_PHY_OBS"
+#define device_bar_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_MARGIN_PHY_OBS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_1_bcfg
+ *
+ * GSER Lane Receiver Offset Control Group 1 Register
+ * Register controls for offset overrides from os0_0 through os3_1. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_os_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t os3_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS3_1_OVRD]. */
+ uint64_t reserved_62 : 1;
+ uint64_t os3_1_ovrd : 6; /**< [ 61: 56](R/W) os3_1 offset compensation override bits. */
+ uint64_t os3_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS3_0_OVRD]. */
+ uint64_t reserved_54 : 1;
+ uint64_t os3_0_ovrd : 6; /**< [ 53: 48](R/W) os3_0 offset compensation override bits. */
+ uint64_t os2_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS2_1_OVRD]. */
+ uint64_t reserved_46 : 1;
+ uint64_t os2_1_ovrd : 6; /**< [ 45: 40](R/W) os2_1 offset compensation override bits. */
+ uint64_t os2_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS2_0_OVRD]. */
+ uint64_t reserved_38 : 1;
+ uint64_t os2_0_ovrd : 6; /**< [ 37: 32](R/W) os2_0 offset compensation override bits. */
+ uint64_t os1_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS1_1_OVRD]. */
+ uint64_t reserved_30 : 1;
+ uint64_t os1_1_ovrd : 6; /**< [ 29: 24](R/W) os1_1 offset compensation override bits. */
+ uint64_t os1_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS1_0_OVRD]. */
+ uint64_t reserved_22 : 1;
+ uint64_t os1_0_ovrd : 6; /**< [ 21: 16](R/W) os1_0 offset compensation override bits. */
+ uint64_t os0_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS0_1_OVRD]. */
+ uint64_t reserved_14 : 1;
+ uint64_t os0_1_ovrd : 6; /**< [ 13: 8](R/W) os0_1 offset compensation override bits. */
+ uint64_t os0_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS0_0_OVRD]. */
+ uint64_t reserved_6 : 1;
+ uint64_t os0_0_ovrd : 6; /**< [ 5: 0](R/W) os0_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os0_0_ovrd : 6; /**< [ 5: 0](R/W) os0_0 offset compensation override bits. */
+ uint64_t reserved_6 : 1;
+ uint64_t os0_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS0_0_OVRD]. */
+ uint64_t os0_1_ovrd : 6; /**< [ 13: 8](R/W) os0_1 offset compensation override bits. */
+ uint64_t reserved_14 : 1;
+ uint64_t os0_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS0_1_OVRD]. */
+ uint64_t os1_0_ovrd : 6; /**< [ 21: 16](R/W) os1_0 offset compensation override bits. */
+ uint64_t reserved_22 : 1;
+ uint64_t os1_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS1_0_OVRD]. */
+ uint64_t os1_1_ovrd : 6; /**< [ 29: 24](R/W) os1_1 offset compensation override bits. */
+ uint64_t reserved_30 : 1;
+ uint64_t os1_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS1_1_OVRD]. */
+ uint64_t os2_0_ovrd : 6; /**< [ 37: 32](R/W) os2_0 offset compensation override bits. */
+ uint64_t reserved_38 : 1;
+ uint64_t os2_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS2_0_OVRD]. */
+ uint64_t os2_1_ovrd : 6; /**< [ 45: 40](R/W) os2_1 offset compensation override bits. */
+ uint64_t reserved_46 : 1;
+ uint64_t os2_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS2_1_OVRD]. */
+ uint64_t os3_0_ovrd : 6; /**< [ 53: 48](R/W) os3_0 offset compensation override bits. */
+ uint64_t reserved_54 : 1;
+ uint64_t os3_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS3_0_OVRD]. */
+ uint64_t os3_1_ovrd : 6; /**< [ 61: 56](R/W) os3_1 offset compensation override bits. */
+ uint64_t reserved_62 : 1;
+ uint64_t os3_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS3_1_OVRD]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_1_bcfg bdk_gsernx_lanex_rx_os_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001800ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) bdk_gsernx_lanex_rx_os_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) "GSERNX_LANEX_RX_OS_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_1_bsts
+ *
+ * GSER Lane Receiver Offset Status Group 1 Register
+ * Status for offset settings actually in use (either calibration results
+ * or overrides) from os0_0 through os3_1. Results in all fields of this
+ * register are valid only if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] and
+ * GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] are asserted or if the corresponding
+ * override enable bit is asserted.
+ */
+union bdk_gsernx_lanex_rx_os_1_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_1_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t os3_1 : 6; /**< [ 61: 56](RO/H) os3_1 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os3_0 : 6; /**< [ 53: 48](RO/H) os3_0 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os2_1 : 6; /**< [ 45: 40](RO/H) os2_1 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os2_0 : 6; /**< [ 37: 32](RO/H) os2_0 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os1_1 : 6; /**< [ 29: 24](RO/H) os1_1 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os1_0 : 6; /**< [ 21: 16](RO/H) os1_0 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os0_1 : 6; /**< [ 13: 8](RO/H) os0_1 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os0_0 : 6; /**< [ 5: 0](RO/H) os0_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os0_0 : 6; /**< [ 5: 0](RO/H) os0_0 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os0_1 : 6; /**< [ 13: 8](RO/H) os0_1 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os1_0 : 6; /**< [ 21: 16](RO/H) os1_0 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os1_1 : 6; /**< [ 29: 24](RO/H) os1_1 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os2_0 : 6; /**< [ 37: 32](RO/H) os2_0 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os2_1 : 6; /**< [ 45: 40](RO/H) os2_1 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os3_0 : 6; /**< [ 53: 48](RO/H) os3_0 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os3_1 : 6; /**< [ 61: 56](RO/H) os3_1 offset compensation override bits. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_1_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_1_bsts bdk_gsernx_lanex_rx_os_1_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_1_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_1_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001940ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_1_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) bdk_gsernx_lanex_rx_os_1_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) "GSERNX_LANEX_RX_OS_1_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_1_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_2_bcfg
+ *
+ * GSER Lane Receiver Offset Control Group 2 Register
+ * Register controls for offset overrides from os4_0 through os7_1. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_os_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t os7_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS7_1_OVRD]. */
+ uint64_t reserved_62 : 1;
+ uint64_t os7_1_ovrd : 6; /**< [ 61: 56](R/W) os7_1 offset compensation override bits. */
+ uint64_t os7_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS7_0_OVRD]. */
+ uint64_t reserved_54 : 1;
+ uint64_t os7_0_ovrd : 6; /**< [ 53: 48](R/W) os7_0 offset compensation override bits. */
+ uint64_t os6_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS6_1_OVRD]. */
+ uint64_t reserved_46 : 1;
+ uint64_t os6_1_ovrd : 6; /**< [ 45: 40](R/W) os6_1 offset compensation override bits. */
+ uint64_t os6_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS6_0_OVRD]. */
+ uint64_t reserved_38 : 1;
+ uint64_t os6_0_ovrd : 6; /**< [ 37: 32](R/W) os6_0 offset compensation override bits. */
+ uint64_t os5_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS5_1_OVRD]. */
+ uint64_t reserved_30 : 1;
+ uint64_t os5_1_ovrd : 6; /**< [ 29: 24](R/W) os5_1 offset compensation override bits. */
+ uint64_t os5_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS5_0_OVRD]. */
+ uint64_t reserved_22 : 1;
+ uint64_t os5_0_ovrd : 6; /**< [ 21: 16](R/W) os5_0 offset compensation override bits. */
+ uint64_t os4_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS4_1_OVRD]. */
+ uint64_t reserved_14 : 1;
+ uint64_t os4_1_ovrd : 6; /**< [ 13: 8](R/W) os4_1 offset compensation override bits. */
+ uint64_t os4_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS4_0_OVRD]. */
+ uint64_t reserved_6 : 1;
+ uint64_t os4_0_ovrd : 6; /**< [ 5: 0](R/W) os4_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os4_0_ovrd : 6; /**< [ 5: 0](R/W) os4_0 offset compensation override bits. */
+ uint64_t reserved_6 : 1;
+ uint64_t os4_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS4_0_OVRD]. */
+ uint64_t os4_1_ovrd : 6; /**< [ 13: 8](R/W) os4_1 offset compensation override bits. */
+ uint64_t reserved_14 : 1;
+ uint64_t os4_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS4_1_OVRD]. */
+ uint64_t os5_0_ovrd : 6; /**< [ 21: 16](R/W) os5_0 offset compensation override bits. */
+ uint64_t reserved_22 : 1;
+ uint64_t os5_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS5_0_OVRD]. */
+ uint64_t os5_1_ovrd : 6; /**< [ 29: 24](R/W) os5_1 offset compensation override bits. */
+ uint64_t reserved_30 : 1;
+ uint64_t os5_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS5_1_OVRD]. */
+ uint64_t os6_0_ovrd : 6; /**< [ 37: 32](R/W) os6_0 offset compensation override bits. */
+ uint64_t reserved_38 : 1;
+ uint64_t os6_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS6_0_OVRD]. */
+ uint64_t os6_1_ovrd : 6; /**< [ 45: 40](R/W) os6_1 offset compensation override bits. */
+ uint64_t reserved_46 : 1;
+ uint64_t os6_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS6_1_OVRD]. */
+ uint64_t os7_0_ovrd : 6; /**< [ 53: 48](R/W) os7_0 offset compensation override bits. */
+ uint64_t reserved_54 : 1;
+ uint64_t os7_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS7_0_OVRD]. */
+ uint64_t os7_1_ovrd : 6; /**< [ 61: 56](R/W) os7_1 offset compensation override bits. */
+ uint64_t reserved_62 : 1;
+ uint64_t os7_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS7_1_OVRD]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_2_bcfg bdk_gsernx_lanex_rx_os_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001810ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) bdk_gsernx_lanex_rx_os_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) "GSERNX_LANEX_RX_OS_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_2_bsts
+ *
+ * GSER Lane Receiver Offset Status Group 2 Register
+ * Status for offset settings actually in use (either calibration results
+ * or overrides) from os4_0 through os7_1. Results in all fields of this
+ * register are valid only if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] and
+ * GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] are asserted or if the corresponding
+ * override enable bit is asserted.
+ */
+union bdk_gsernx_lanex_rx_os_2_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_2_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t os7_1 : 6; /**< [ 61: 56](RO/H) os7_1 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os7_0 : 6; /**< [ 53: 48](RO/H) os7_0 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os6_1 : 6; /**< [ 45: 40](RO/H) os6_1 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os6_0 : 6; /**< [ 37: 32](RO/H) os6_0 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os5_1 : 6; /**< [ 29: 24](RO/H) os5_1 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os5_0 : 6; /**< [ 21: 16](RO/H) os5_0 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os4_1 : 6; /**< [ 13: 8](RO/H) os4_1 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os4_0 : 6; /**< [ 5: 0](RO/H) os4_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os4_0 : 6; /**< [ 5: 0](RO/H) os4_0 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os4_1 : 6; /**< [ 13: 8](RO/H) os4_1 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os5_0 : 6; /**< [ 21: 16](RO/H) os5_0 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os5_1 : 6; /**< [ 29: 24](RO/H) os5_1 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os6_0 : 6; /**< [ 37: 32](RO/H) os6_0 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os6_1 : 6; /**< [ 45: 40](RO/H) os6_1 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os7_0 : 6; /**< [ 53: 48](RO/H) os7_0 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os7_1 : 6; /**< [ 61: 56](RO/H) os7_1 offset compensation override bits. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_2_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_2_bsts bdk_gsernx_lanex_rx_os_2_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_2_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_2_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001950ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_2_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) bdk_gsernx_lanex_rx_os_2_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) "GSERNX_LANEX_RX_OS_2_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_2_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_3_bcfg
+ *
+ * GSER Lane Receiver Offset Control Group 3 Register
+ * Register controls for offset overrides from os8_0 through os11_1. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_os_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t os11_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS11_1_OVRD]. */
+ uint64_t reserved_62 : 1;
+ uint64_t os11_1_ovrd : 6; /**< [ 61: 56](R/W) os11_1 offset compensation override bits. */
+ uint64_t os11_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS11_0_OVRD]. */
+ uint64_t reserved_54 : 1;
+ uint64_t os11_0_ovrd : 6; /**< [ 53: 48](R/W) os11_0 offset compensation override bits. */
+ uint64_t os10_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS10_1_OVRD]. */
+ uint64_t reserved_46 : 1;
+ uint64_t os10_1_ovrd : 6; /**< [ 45: 40](R/W) os10_1 offset compensation override bits. */
+ uint64_t os10_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS10_0_OVRD]. */
+ uint64_t reserved_38 : 1;
+ uint64_t os10_0_ovrd : 6; /**< [ 37: 32](R/W) os10_0 offset compensation override bits. */
+ uint64_t os9_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS9_1_OVRD]. */
+ uint64_t reserved_30 : 1;
+ uint64_t os9_1_ovrd : 6; /**< [ 29: 24](R/W) os9_1 offset compensation override bits. */
+ uint64_t os9_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS9_0_OVRD]. */
+ uint64_t reserved_22 : 1;
+ uint64_t os9_0_ovrd : 6; /**< [ 21: 16](R/W) os9_0 offset compensation override bits. */
+ uint64_t os8_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS8_1_OVRD]. */
+ uint64_t reserved_14 : 1;
+ uint64_t os8_1_ovrd : 6; /**< [ 13: 8](R/W) os8_1 offset compensation override bits. */
+ uint64_t os8_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS8_0_OVRD]. */
+ uint64_t reserved_6 : 1;
+ uint64_t os8_0_ovrd : 6; /**< [ 5: 0](R/W) os8_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os8_0_ovrd : 6; /**< [ 5: 0](R/W) os8_0 offset compensation override bits. */
+ uint64_t reserved_6 : 1;
+ uint64_t os8_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS8_0_OVRD]. */
+ uint64_t os8_1_ovrd : 6; /**< [ 13: 8](R/W) os8_1 offset compensation override bits. */
+ uint64_t reserved_14 : 1;
+ uint64_t os8_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS8_1_OVRD]. */
+ uint64_t os9_0_ovrd : 6; /**< [ 21: 16](R/W) os9_0 offset compensation override bits. */
+ uint64_t reserved_22 : 1;
+ uint64_t os9_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS9_0_OVRD]. */
+ uint64_t os9_1_ovrd : 6; /**< [ 29: 24](R/W) os9_1 offset compensation override bits. */
+ uint64_t reserved_30 : 1;
+ uint64_t os9_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS9_1_OVRD]. */
+ uint64_t os10_0_ovrd : 6; /**< [ 37: 32](R/W) os10_0 offset compensation override bits. */
+ uint64_t reserved_38 : 1;
+ uint64_t os10_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS10_0_OVRD]. */
+ uint64_t os10_1_ovrd : 6; /**< [ 45: 40](R/W) os10_1 offset compensation override bits. */
+ uint64_t reserved_46 : 1;
+ uint64_t os10_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS10_1_OVRD]. */
+ uint64_t os11_0_ovrd : 6; /**< [ 53: 48](R/W) os11_0 offset compensation override bits. */
+ uint64_t reserved_54 : 1;
+ uint64_t os11_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS11_0_OVRD]. */
+ uint64_t os11_1_ovrd : 6; /**< [ 61: 56](R/W) os11_1 offset compensation override bits. */
+ uint64_t reserved_62 : 1;
+ uint64_t os11_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS11_1_OVRD]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_3_bcfg bdk_gsernx_lanex_rx_os_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001820ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) bdk_gsernx_lanex_rx_os_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) "GSERNX_LANEX_RX_OS_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_3_bsts
+ *
+ * GSER Lane Receiver Offset Status Group 3 Register
+ * Status for offset settings actually in use (either calibration results
+ * or overrides) from os8_0 through os11_1. Results in all fields of this
+ * register are valid only if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] and
+ * GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] are asserted or if the corresponding
+ * override enable bit is asserted.
+ */
+union bdk_gsernx_lanex_rx_os_3_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_3_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t os11_1 : 6; /**< [ 61: 56](RO/H) os11_1 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os11_0 : 6; /**< [ 53: 48](RO/H) os11_0 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os10_1 : 6; /**< [ 45: 40](RO/H) os10_1 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os10_0 : 6; /**< [ 37: 32](RO/H) os10_0 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os9_1 : 6; /**< [ 29: 24](RO/H) os9_1 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os9_0 : 6; /**< [ 21: 16](RO/H) os9_0 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os8_1 : 6; /**< [ 13: 8](RO/H) os8_1 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os8_0 : 6; /**< [ 5: 0](RO/H) os8_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os8_0 : 6; /**< [ 5: 0](RO/H) os8_0 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os8_1 : 6; /**< [ 13: 8](RO/H) os8_1 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os9_0 : 6; /**< [ 21: 16](RO/H) os9_0 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os9_1 : 6; /**< [ 29: 24](RO/H) os9_1 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os10_0 : 6; /**< [ 37: 32](RO/H) os10_0 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os10_1 : 6; /**< [ 45: 40](RO/H) os10_1 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os11_0 : 6; /**< [ 53: 48](RO/H) os11_0 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os11_1 : 6; /**< [ 61: 56](RO/H) os11_1 offset compensation override bits. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_3_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_3_bsts bdk_gsernx_lanex_rx_os_3_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_3_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_3_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001960ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_3_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) bdk_gsernx_lanex_rx_os_3_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) "GSERNX_LANEX_RX_OS_3_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_3_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_4_bcfg
+ *
+ * GSER Lane Receiver Offset Control Group 4 Register
+ * Register controls for offset overrides from os12_0 through os15_1. Each
+ * override setting has a corresponding enable bit which will cause the
+ * calibration control logic to use the override register setting instead
+ * of the calibration result.
+ */
+union bdk_gsernx_lanex_rx_os_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t os15_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS15_1_OVRD]. */
+ uint64_t reserved_62 : 1;
+ uint64_t os15_1_ovrd : 6; /**< [ 61: 56](R/W) os15_1 offset compensation override bits. */
+ uint64_t os15_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS15_0_OVRD]. */
+ uint64_t reserved_54 : 1;
+ uint64_t os15_0_ovrd : 6; /**< [ 53: 48](R/W) os15_0 offset compensation override bits. */
+ uint64_t os14_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS14_1_OVRD]. */
+ uint64_t reserved_46 : 1;
+ uint64_t os14_1_ovrd : 6; /**< [ 45: 40](R/W) os10_1 offset compensation override bits. */
+ uint64_t os14_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS14_0_OVRD]. */
+ uint64_t reserved_38 : 1;
+ uint64_t os14_0_ovrd : 6; /**< [ 37: 32](R/W) os14_0 offset compensation override bits. */
+ uint64_t os13_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS13_1_OVRD]. */
+ uint64_t reserved_30 : 1;
+ uint64_t os13_1_ovrd : 6; /**< [ 29: 24](R/W) os13_1 offset compensation override bits. */
+ uint64_t os13_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS13_0_OVRD]. */
+ uint64_t reserved_22 : 1;
+ uint64_t os13_0_ovrd : 6; /**< [ 21: 16](R/W) os13_0 offset compensation override bits. */
+ uint64_t os12_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS12_1_OVRD]. */
+ uint64_t reserved_14 : 1;
+ uint64_t os12_1_ovrd : 6; /**< [ 13: 8](R/W) os12_1 offset compensation override bits. */
+ uint64_t os12_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS12_0_OVRD]. */
+ uint64_t reserved_6 : 1;
+ uint64_t os12_0_ovrd : 6; /**< [ 5: 0](R/W) os12_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os12_0_ovrd : 6; /**< [ 5: 0](R/W) os12_0 offset compensation override bits. */
+ uint64_t reserved_6 : 1;
+ uint64_t os12_0_ovrd_en : 1; /**< [ 7: 7](R/W) Enable use of [OS12_0_OVRD]. */
+ uint64_t os12_1_ovrd : 6; /**< [ 13: 8](R/W) os12_1 offset compensation override bits. */
+ uint64_t reserved_14 : 1;
+ uint64_t os12_1_ovrd_en : 1; /**< [ 15: 15](R/W) Enable use of [OS12_1_OVRD]. */
+ uint64_t os13_0_ovrd : 6; /**< [ 21: 16](R/W) os13_0 offset compensation override bits. */
+ uint64_t reserved_22 : 1;
+ uint64_t os13_0_ovrd_en : 1; /**< [ 23: 23](R/W) Enable use of [OS13_0_OVRD]. */
+ uint64_t os13_1_ovrd : 6; /**< [ 29: 24](R/W) os13_1 offset compensation override bits. */
+ uint64_t reserved_30 : 1;
+ uint64_t os13_1_ovrd_en : 1; /**< [ 31: 31](R/W) Enable use of [OS13_1_OVRD]. */
+ uint64_t os14_0_ovrd : 6; /**< [ 37: 32](R/W) os14_0 offset compensation override bits. */
+ uint64_t reserved_38 : 1;
+ uint64_t os14_0_ovrd_en : 1; /**< [ 39: 39](R/W) Enable use of [OS14_0_OVRD]. */
+ uint64_t os14_1_ovrd : 6; /**< [ 45: 40](R/W) os10_1 offset compensation override bits. */
+ uint64_t reserved_46 : 1;
+ uint64_t os14_1_ovrd_en : 1; /**< [ 47: 47](R/W) Enable use of [OS14_1_OVRD]. */
+ uint64_t os15_0_ovrd : 6; /**< [ 53: 48](R/W) os15_0 offset compensation override bits. */
+ uint64_t reserved_54 : 1;
+ uint64_t os15_0_ovrd_en : 1; /**< [ 55: 55](R/W) Enable use of [OS15_0_OVRD]. */
+ uint64_t os15_1_ovrd : 6; /**< [ 61: 56](R/W) os15_1 offset compensation override bits. */
+ uint64_t reserved_62 : 1;
+ uint64_t os15_1_ovrd_en : 1; /**< [ 63: 63](R/W) Enable use of [OS15_1_OVRD]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_4_bcfg bdk_gsernx_lanex_rx_os_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001830ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) bdk_gsernx_lanex_rx_os_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) "GSERNX_LANEX_RX_OS_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_4_bsts
+ *
+ * GSER Lane Receiver Offset Status Group 4 Register
+ * Status for offset settings actually in use (either calibration results
+ * or overrides) from os12_0 through os15_1. Results in all fields of this
+ * register are valid only if GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS] and
+ * GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] are asserted or if the corresponding
+ * override enable bit is asserted.
+ */
+union bdk_gsernx_lanex_rx_os_4_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_4_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t os15_1 : 6; /**< [ 61: 56](RO/H) os15_1 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os15_0 : 6; /**< [ 53: 48](RO/H) os15_0 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os14_1 : 6; /**< [ 45: 40](RO/H) os10_1 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os14_0 : 6; /**< [ 37: 32](RO/H) os14_0 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os13_1 : 6; /**< [ 29: 24](RO/H) os13_1 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os13_0 : 6; /**< [ 21: 16](RO/H) os13_0 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os12_1 : 6; /**< [ 13: 8](RO/H) os12_1 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os12_0 : 6; /**< [ 5: 0](RO/H) os12_0 offset compensation override bits. */
+#else /* Word 0 - Little Endian */
+ uint64_t os12_0 : 6; /**< [ 5: 0](RO/H) os12_0 offset compensation override bits. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t os12_1 : 6; /**< [ 13: 8](RO/H) os12_1 offset compensation override bits. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t os13_0 : 6; /**< [ 21: 16](RO/H) os13_0 offset compensation override bits. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t os13_1 : 6; /**< [ 29: 24](RO/H) os13_1 offset compensation override bits. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t os14_0 : 6; /**< [ 37: 32](RO/H) os14_0 offset compensation override bits. */
+ uint64_t reserved_38_39 : 2;
+ uint64_t os14_1 : 6; /**< [ 45: 40](RO/H) os10_1 offset compensation override bits. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t os15_0 : 6; /**< [ 53: 48](RO/H) os15_0 offset compensation override bits. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t os15_1 : 6; /**< [ 61: 56](RO/H) os15_1 offset compensation override bits. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_4_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_4_bsts bdk_gsernx_lanex_rx_os_4_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_4_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_4_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001970ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_4_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) bdk_gsernx_lanex_rx_os_4_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) "GSERNX_LANEX_RX_OS_4_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_4_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_5_bcfg
+ *
+ * GSER Lane Receiver Offset Control Group 5 Register
+ * This register controls for triggering RX offset compensation state machines.
+ */
+union bdk_gsernx_lanex_rx_os_5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t run_eye_oscal : 1; /**< [ 54: 54](R/W) Enables eye (doute) DFE offset compensation to run at the correct
+ point in the hardware-driven reset sequence if asserted when the eye data path
+ bringup sequence begins. If deasserted when the eye data path bringup sequence
+ is run, this bit may be asserted later under software control prior to
+ performing eye measurements. */
+ uint64_t reserved_53 : 1;
+ uint64_t c1_e_adjust : 5; /**< [ 52: 48](R/W) Adjust value magnitude for the error slice in the E path. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t c1_i_adjust : 5; /**< [ 44: 40](R/W) Adjust value magnitude for the error slice in the I path. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t c1_q_adjust : 5; /**< [ 36: 32](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t offset_comp_en : 1; /**< [ 31: 31](R/W) Enable AFE and DFE offset compensation to run at the
+ correct point in the hardware-driven reset sequence if asserted when
+ the reset sequence begins. If deasserted when the hardware-driven
+ reset sequence is run, this bit should be asserted later, once,
+ under software control to initiate AFE and DFE offset compensation
+ in a pure software-driven bringup. This bit field affects both AFE
+ and DFE offset compensation training. */
+ uint64_t binsrch_margin : 3; /**< [ 30: 28](R/W) Binary Search Noise Margin. This value is added to the binary search difference
+ count value. This bit field affects the binary search engine for IR TRIM.
+ 0x0 = 13'h000
+ 0x1 = 13'h020
+ 0x2 = 13'h040
+ 0x3 = 13'h080
+ 0x4 = 13'h100
+ 0x5 = 13'h200
+ 0x6 = 13'h400
+ 0x7 = 13'h800 (use with caution, may cause difference count overflow) */
+ uint64_t binsrch_wait : 10; /**< [ 27: 18](R/W) Number of clock cycles to wait after changing the offset code.
+ It is used to allow adjustments in wait time due to changes in the service clock
+ frequency.
+ This bit field affects the binary seach engines for DFE/AFE offset and IR TRIM. */
+ uint64_t binsrch_acclen : 2; /**< [ 17: 16](R/W) Number of words to include in the binary search accumulation. This bit field
+ affects the binary seach engines for DFE/AFE offset and IR TRIM.
+ 0x0 = 16 words.
+ 0x1 = 32 words.
+ 0x2 = 64 words.
+ 0x3 = 128 words. */
+ uint64_t settle_wait : 4; /**< [ 15: 12](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t ir_trim_early_iter_max : 5; /**< [ 9: 5](R/W) Early IR TRIM Iteration Count Max. Controls the number of iterations
+ to perform during the Early IR trim. If set to 0, no iterations are done
+ and Early IR TRIM is skipped. Valid range 0 to 31. Note that
+ GSERN()_LANE()_RST_CNT4_BCFG[DFE_AFE_OSCAL_WAIT] must be increased to allow for
+ iterations. */
+ uint64_t ir_trim_comp_en : 1; /**< [ 4: 4](R/W) Enable IR TRIM compensation to run at the correct
+ point in the hardware-driven reset sequence if asserted when the
+ reset sequence begins. This bit field affects only IR trim compensation. */
+ uint64_t ir_trim_trigger : 1; /**< [ 3: 3](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the IR trim compensation FSM to run. Note that this is
+ a debug-only feature. */
+ uint64_t idle_offset_trigger : 1; /**< [ 2: 2](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the IDLE offset compensation training FSM to run. Note
+ that this is a debug-only feature. */
+ uint64_t afe_offset_trigger : 1; /**< [ 1: 1](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the AFE offset compensation training FSM to run. Note
+ that this is a debug-only feature and should not be performed while
+ transferring data on the serial link. Note also that only one of the
+ offset compensation training engines can be run at a time. To
+ trigger both DFE offset compensation and AFE offset compensation,
+ they must be run sequentially with the CSR write to trigger the
+ second in the sequence waiting until the first has completed
+ (indicated in GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] or
+ GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS]). */
+ uint64_t dfe_offset_trigger : 1; /**< [ 0: 0](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the DFE offset compensation training FSM to run. Note
+ that only one of the offset compensation training engines can be run
+ at a time. To trigger both DFE offset compensation and AFE offset
+ compensation, they must be run sequentially with the CSR write to
+ the second in the sequence waiting until the first has completed
+ (indicated in GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] or
+ GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS]). */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_offset_trigger : 1; /**< [ 0: 0](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the DFE offset compensation training FSM to run. Note
+ that only one of the offset compensation training engines can be run
+ at a time. To trigger both DFE offset compensation and AFE offset
+ compensation, they must be run sequentially with the CSR write to
+ the second in the sequence waiting until the first has completed
+ (indicated in GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] or
+ GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS]). */
+ uint64_t afe_offset_trigger : 1; /**< [ 1: 1](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the AFE offset compensation training FSM to run. Note
+ that this is a debug-only feature and should not be performed while
+ transferring data on the serial link. Note also that only one of the
+ offset compensation training engines can be run at a time. To
+ trigger both DFE offset compensation and AFE offset compensation,
+ they must be run sequentially with the CSR write to trigger the
+ second in the sequence waiting until the first has completed
+ (indicated in GSERN()_LANE()_RX_OS_5_BSTS[DFE_OFFSET_STATUS] or
+ GSERN()_LANE()_RX_OS_5_BSTS[AFE_OFFSET_STATUS]). */
+ uint64_t idle_offset_trigger : 1; /**< [ 2: 2](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the IDLE offset compensation training FSM to run. Note
+ that this is a debug-only feature. */
+ uint64_t ir_trim_trigger : 1; /**< [ 3: 3](R/W) Writing this bit to a logic 1 when the previous value was logic 0
+ will cause the IR trim compensation FSM to run. Note that this is
+ a debug-only feature. */
+ uint64_t ir_trim_comp_en : 1; /**< [ 4: 4](R/W) Enable IR TRIM compensation to run at the correct
+ point in the hardware-driven reset sequence if asserted when the
+ reset sequence begins. This bit field affects only IR trim compensation. */
+ uint64_t ir_trim_early_iter_max : 5; /**< [ 9: 5](R/W) Early IR TRIM Iteration Count Max. Controls the number of iterations
+ to perform during the Early IR trim. If set to 0, no iterations are done
+ and Early IR TRIM is skipped. Valid range 0 to 31. Note that
+ GSERN()_LANE()_RST_CNT4_BCFG[DFE_AFE_OSCAL_WAIT] must be increased to allow for
+ iterations. */
+ uint64_t reserved_10_11 : 2;
+ uint64_t settle_wait : 4; /**< [ 15: 12](R/W) Number of clock cycles for the DFE adaptation to wait after changing the
+ adjusted C1 values before resuming accumulation. */
+ uint64_t binsrch_acclen : 2; /**< [ 17: 16](R/W) Number of words to include in the binary search accumulation. This bit field
+ affects the binary seach engines for DFE/AFE offset and IR TRIM.
+ 0x0 = 16 words.
+ 0x1 = 32 words.
+ 0x2 = 64 words.
+ 0x3 = 128 words. */
+ uint64_t binsrch_wait : 10; /**< [ 27: 18](R/W) Number of clock cycles to wait after changing the offset code.
+ It is used to allow adjustments in wait time due to changes in the service clock
+ frequency.
+ This bit field affects the binary seach engines for DFE/AFE offset and IR TRIM. */
+ uint64_t binsrch_margin : 3; /**< [ 30: 28](R/W) Binary Search Noise Margin. This value is added to the binary search difference
+ count value. This bit field affects the binary search engine for IR TRIM.
+ 0x0 = 13'h000
+ 0x1 = 13'h020
+ 0x2 = 13'h040
+ 0x3 = 13'h080
+ 0x4 = 13'h100
+ 0x5 = 13'h200
+ 0x6 = 13'h400
+ 0x7 = 13'h800 (use with caution, may cause difference count overflow) */
+ uint64_t offset_comp_en : 1; /**< [ 31: 31](R/W) Enable AFE and DFE offset compensation to run at the
+ correct point in the hardware-driven reset sequence if asserted when
+ the reset sequence begins. If deasserted when the hardware-driven
+ reset sequence is run, this bit should be asserted later, once,
+ under software control to initiate AFE and DFE offset compensation
+ in a pure software-driven bringup. This bit field affects both AFE
+ and DFE offset compensation training. */
+ uint64_t c1_q_adjust : 5; /**< [ 36: 32](R/W) Adjust value magnitude for the error slice in the Q path. */
+ uint64_t reserved_37_39 : 3;
+ uint64_t c1_i_adjust : 5; /**< [ 44: 40](R/W) Adjust value magnitude for the error slice in the I path. */
+ uint64_t reserved_45_47 : 3;
+ uint64_t c1_e_adjust : 5; /**< [ 52: 48](R/W) Adjust value magnitude for the error slice in the E path. */
+ uint64_t reserved_53 : 1;
+ uint64_t run_eye_oscal : 1; /**< [ 54: 54](R/W) Enables eye (doute) DFE offset compensation to run at the correct
+ point in the hardware-driven reset sequence if asserted when the eye data path
+ bringup sequence begins. If deasserted when the eye data path bringup sequence
+ is run, this bit may be asserted later under software control prior to
+ performing eye measurements. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_5_bcfg bdk_gsernx_lanex_rx_os_5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001840ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) bdk_gsernx_lanex_rx_os_5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) "GSERNX_LANEX_RX_OS_5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_os_5_bsts
+ *
+ * GSER Lane Receiver Offset Status Group 5 Register
+ * This register controls for triggering RX offset compensation state machines.
+ */
+union bdk_gsernx_lanex_rx_os_5_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_os_5_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_25_63 : 39;
+ uint64_t idle : 1; /**< [ 24: 24](RO/H) For diagnostic use only.
+ Internal:
+ A copy of GSERN()_LANE()_RX_IDLEDET_BSTS[IDLE] for verification convenience. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t idle_offset_valid : 1; /**< [ 17: 17](R/W1C/H) Valid indicator for the DFE Offset calibration values. This bit gets set when
+ DFE offset calibration
+ completes, and may be cleared by software write to 1. */
+ uint64_t dfe_offsets_valid : 1; /**< [ 16: 16](R/W1C/H) Valid indicator for the DFE Offset calibration values. This bit gets set when
+ DFE offset calibration
+ completes, and may be cleared by software write to 1. */
+ uint64_t idle_os : 6; /**< [ 15: 10](RO/H) Value for the IDLE detect offset currently in use. This field may differ from
+ [IDLE_OS_CAL] if idle hysteresis is enabled. This field is only valid when the
+ idle detect offset calibration is not running. */
+ uint64_t idle_os_cal : 6; /**< [ 9: 4](RO/H) Result of IDLE detect offset calibration. This field is only valid when the idle
+ detect offset calibration is not running. */
+ uint64_t ir_trim_status : 1; /**< [ 3: 3](RO/H) When 1, indicates that the IR TRIM compensation FSM has completed operations.
+ Cleared to 0 by hardware when the IR TRIM compensation training FSM is triggered by software
+ or state machines. */
+ uint64_t idle_offset_status : 1; /**< [ 2: 2](RO/H) When 1, indicates that the IDLE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the IDLE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+ uint64_t afe_offset_status : 1; /**< [ 1: 1](RO/H) When 1, indicates that the AFE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the AFE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+ uint64_t dfe_offset_status : 1; /**< [ 0: 0](RO/H) When 1, indicates that the DFE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the DFE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+#else /* Word 0 - Little Endian */
+ uint64_t dfe_offset_status : 1; /**< [ 0: 0](RO/H) When 1, indicates that the DFE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the DFE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+ uint64_t afe_offset_status : 1; /**< [ 1: 1](RO/H) When 1, indicates that the AFE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the AFE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+ uint64_t idle_offset_status : 1; /**< [ 2: 2](RO/H) When 1, indicates that the IDLE offset compensation training FSM has completed operations.
+ Cleared to 0 by hardware when the IDLE offset compensation training FSM is triggered by software,
+ hardware timers, or state machines. */
+ uint64_t ir_trim_status : 1; /**< [ 3: 3](RO/H) When 1, indicates that the IR TRIM compensation FSM has completed operations.
+ Cleared to 0 by hardware when the IR TRIM compensation training FSM is triggered by software
+ or state machines. */
+ uint64_t idle_os_cal : 6; /**< [ 9: 4](RO/H) Result of IDLE detect offset calibration. This field is only valid when the idle
+ detect offset calibration is not running. */
+ uint64_t idle_os : 6; /**< [ 15: 10](RO/H) Value for the IDLE detect offset currently in use. This field may differ from
+ [IDLE_OS_CAL] if idle hysteresis is enabled. This field is only valid when the
+ idle detect offset calibration is not running. */
+ uint64_t dfe_offsets_valid : 1; /**< [ 16: 16](R/W1C/H) Valid indicator for the DFE Offset calibration values. This bit gets set when
+ DFE offset calibration
+ completes, and may be cleared by software write to 1. */
+ uint64_t idle_offset_valid : 1; /**< [ 17: 17](R/W1C/H) Valid indicator for the DFE Offset calibration values. This bit gets set when
+ DFE offset calibration
+ completes, and may be cleared by software write to 1. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t idle : 1; /**< [ 24: 24](RO/H) For diagnostic use only.
+ Internal:
+ A copy of GSERN()_LANE()_RX_IDLEDET_BSTS[IDLE] for verification convenience. */
+ uint64_t reserved_25_63 : 39;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_os_5_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_os_5_bsts bdk_gsernx_lanex_rx_os_5_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_5_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_OS_5_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090001980ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_OS_5_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) bdk_gsernx_lanex_rx_os_5_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) "GSERNX_LANEX_RX_OS_5_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_OS_5_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_qac_bcfg
+ *
+ * GSER Lane RX Quadrature Corrector Base Configuration Register
+ * Static controls for the quadrature corrector in the receiver. All fields
+ * must be set prior to exiting reset.
+ */
+union bdk_gsernx_lanex_rx_qac_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_qac_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_42_63 : 22;
+ uint64_t cdr_qac_selq : 1; /**< [ 41: 41](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t cdr_qac_sele : 1; /**< [ 40: 40](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t reserved_35_39 : 5;
+ uint64_t qac_cntset_q : 3; /**< [ 34: 32](R/W) Programmable counter depth for QAC corrector value for the doutq
+ path. The 3-bit encoding represents a integration time with 12-7 bit
+ counter. The counter stops counting until it saturates or reaches
+ 0. If [EN_QAC_Q] is clear, this register is not used. If
+ [EN_QAC_Q] is set, this correction value will be output to the
+ CDR loop. Set this field prior to exiting reset. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t qac_cntset_e : 3; /**< [ 26: 24](R/W) Programmable counter depth for QAC corrector value for the doute
+ path. The 3-bit encoding represents a integration time with 12-7 bit
+ counter. The counter stops counting until it saturates or reaches
+ 0. If [EN_QAC_E] is clear, this register is not used. If
+ [EN_QAC_E] is set, this correction value will be output to the
+ CDR loop. Set this field prior to exiting reset. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t qac_ref_qoffs : 6; /**< [ 21: 16](R/W) Target value for the phase relationship between the i-path (leading)
+ and the q-path (trailing). The range is zero to 180 degrees in 64
+ steps, i.e., 2.8571 degrees per step. Used only when the QAC filter
+ is enabled and selected. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t qac_ref_eoffs : 6; /**< [ 13: 8](R/W) Target value for the phase relationship between the i-path (leading)
+ and the e-path (trailing). The range is zero to 180 degrees in 64
+ steps, i.e., 2.8571 degrees per step. Used only when the QAC filter
+ is enabled and selected. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t en_qac_e : 1; /**< [ 1: 1](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t en_qac_q : 1; /**< [ 0: 0](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t en_qac_q : 1; /**< [ 0: 0](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t en_qac_e : 1; /**< [ 1: 1](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t qac_ref_eoffs : 6; /**< [ 13: 8](R/W) Target value for the phase relationship between the i-path (leading)
+ and the e-path (trailing). The range is zero to 180 degrees in 64
+ steps, i.e., 2.8571 degrees per step. Used only when the QAC filter
+ is enabled and selected. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t qac_ref_qoffs : 6; /**< [ 21: 16](R/W) Target value for the phase relationship between the i-path (leading)
+ and the q-path (trailing). The range is zero to 180 degrees in 64
+ steps, i.e., 2.8571 degrees per step. Used only when the QAC filter
+ is enabled and selected. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t qac_cntset_e : 3; /**< [ 26: 24](R/W) Programmable counter depth for QAC corrector value for the doute
+ path. The 3-bit encoding represents a integration time with 12-7 bit
+ counter. The counter stops counting until it saturates or reaches
+ 0. If [EN_QAC_E] is clear, this register is not used. If
+ [EN_QAC_E] is set, this correction value will be output to the
+ CDR loop. Set this field prior to exiting reset. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t qac_cntset_q : 3; /**< [ 34: 32](R/W) Programmable counter depth for QAC corrector value for the doutq
+ path. The 3-bit encoding represents a integration time with 12-7 bit
+ counter. The counter stops counting until it saturates or reaches
+ 0. If [EN_QAC_Q] is clear, this register is not used. If
+ [EN_QAC_Q] is set, this correction value will be output to the
+ CDR loop. Set this field prior to exiting reset. */
+ uint64_t reserved_35_39 : 5;
+ uint64_t cdr_qac_sele : 1; /**< [ 40: 40](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t cdr_qac_selq : 1; /**< [ 41: 41](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t reserved_42_63 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_qac_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_qac_bcfg bdk_gsernx_lanex_rx_qac_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_QAC_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_QAC_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000ee0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_QAC_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) bdk_gsernx_lanex_rx_qac_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) "GSERNX_LANEX_RX_QAC_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_QAC_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_qac_bsts
+ *
+ * GSER Lane RX Quadrature Corrector Base Status Register
+ * Quadrature corrector outputs captured in a CSR register; results should be close to
+ * GSERN()_LANE()_RX_QAC_BCFG[QAC_REF_EOFFS] and
+ * GSERN()_LANE()_RX_QAC_BCFG[QAC_REF_QOFFS] when the QAC is in use and stable.
+ */
+union bdk_gsernx_lanex_rx_qac_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_qac_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_22_63 : 42;
+ uint64_t qac_qoffs : 6; /**< [ 21: 16](RO/H) Quadrature filter control output for the phase relationship between
+ the i-path (leading) and the q-path (trailing). The range is zero
+ to 180 degrees in 64 steps, i.e., 2.8571 degrees per step. Valid only
+ when the QAC filter is enabled and selected. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t qac_eoffs : 6; /**< [ 13: 8](RO/H) Quadrature filter control output for the phase relationship between
+ the i-path (leading) and the e-path (trailing). The range is zero
+ to 180 degrees in 64 steps, i.e., 2.8571 degrees per step. Valid only
+ when the QAC filter is enabled and selected. */
+ uint64_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_7 : 8;
+ uint64_t qac_eoffs : 6; /**< [ 13: 8](RO/H) Quadrature filter control output for the phase relationship between
+ the i-path (leading) and the e-path (trailing). The range is zero
+ to 180 degrees in 64 steps, i.e., 2.8571 degrees per step. Valid only
+ when the QAC filter is enabled and selected. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t qac_qoffs : 6; /**< [ 21: 16](RO/H) Quadrature filter control output for the phase relationship between
+ the i-path (leading) and the q-path (trailing). The range is zero
+ to 180 degrees in 64 steps, i.e., 2.8571 degrees per step. Valid only
+ when the QAC filter is enabled and selected. */
+ uint64_t reserved_22_63 : 42;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_qac_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_qac_bsts bdk_gsernx_lanex_rx_qac_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_QAC_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_QAC_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000ef0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_QAC_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) bdk_gsernx_lanex_rx_qac_bsts_t
+#define bustype_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) "GSERNX_LANEX_RX_QAC_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_QAC_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_rx_st_bcfg
+ *
+ * GSER Lane RX Static Base Configuration Register
+ * This register controls for static RX settings that do not need FSM overrides.
+ */
+union bdk_gsernx_lanex_rx_st_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_rx_st_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t rxcdrfsmi : 1; /**< [ 48: 48](R/W) Set to provide the RX interpolator with the RX CDR load I
+ clock (rxcdrldi). deassert (low) to provide the interpolator with
+ the RX CDR load Q clock (rxcdrldq). This bit is ignored when
+ txcdrdfsm is asserted (high), which set the RX interpolator
+ and CDR FSM to use the TX clock (txcdrld).
+
+ Internal:
+ (For initial testing, assert rxcdrfsmi, but if we have trouble
+ meeting timing, we can deassert this signal to provide some
+ additional timing margin from the last flops in the RX CDR FSM to
+ the flops interpolator.) */
+ uint64_t reserved_42_47 : 6;
+ uint64_t rx_dcc_iboost : 1; /**< [ 41: 41](R/W) Set to assert the iboost control bit of the
+ receiver duty cycle correcter. Should be programmed as desired before
+ sequencing the receiver reset state machine. Differs
+ from [RX_DCC_LOWF] in the data rate range that it is set at. */
+ uint64_t rx_dcc_lowf : 1; /**< [ 40: 40](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t reserved_35_39 : 5;
+ uint64_t bstuff : 1; /**< [ 34: 34](R/W) Set to place custom receive pipe in bit-stuffing
+ mode. Only the odd bits in the half-rate DFE outputs are passed to
+ the cdrout* and dout* pipe outputs; the odd bits are duplicated to
+ fill up the expected data path width. */
+ uint64_t rx_idle_lowf : 2; /**< [ 33: 32](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t idle_os_bitlen : 2; /**< [ 31: 30](R/W) Number of bits to accumulate for IDLE detect offset calibration, measured in
+ cycles of the 100 MHz system service clock.
+ 0x0 = 5 cycles.
+ 0x1 = 30 cycles.
+ 0x2 = 60 cycles.
+ 0x3 = 250 cycles. */
+ uint64_t idle_os_ovrd_en : 1; /**< [ 29: 29](R/W) Enable use of [IDLE_OS_OVRD]. */
+ uint64_t refset : 5; /**< [ 28: 24](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as
+ idle.
+ 0x0 = Threshold (refp-refn) is 23 mV.
+ 0x1 = Threshold (refp-refn) is 27.4 mV.
+ 0x2 = Threshold (refp-refn) is 31.8 mV.
+ 0x3 = Threshold (refp-refn) is 36.2 mV.
+ 0x4 = Threshold (refp-refn) is 40.6 mV.
+ 0x5 = Threshold (refp-refn) is 45 mV.
+ 0x6 = Threshold (refp-refn) is 49.4 mV.
+ 0x7 = Threshold (refp-refn) is 53.8 mV.
+ 0x8 = Threshold (refp-refn) is 58.2 mV.
+ 0x9 = Threshold (refp-refn) is 62.6 mV.
+ 0xA = Threshold (refp-refn) is 67 mV.
+ 0xB = Threshold (refp-refn) is 71.4 mV.
+ 0xC = Threshold (refp-refn) is 75.8 mV.
+ 0xD = Threshold (refp-refn) is 80.2 mV.
+ 0xE = Threshold (refp-refn) is 84.6 mV.
+ 0xF = Threshold (refp-refn) is 89 mV.
+ 0x10 = Threshold (refp-refn) is 55 mV.
+ 0x11 = Threshold (refp-refn) is 62.9 mV.
+ 0x12 = Threshold (refp-refn) is 70.8 mV.
+ 0x13 = Threshold (refp-refn) is 78.7 mV.
+ 0x14 = Threshold (refp-refn) is 86.6 mV.
+ 0x15 = Threshold (refp-refn) is 94.5 mV.
+ 0x16 = Threshold (refp-refn) is 102.4 mV.
+ 0x17 = Threshold (refp-refn) is 110.3 mV.
+ 0x18 = Threshold (refp-refn) is 118.2 mV.
+ 0x19 = Threshold (refp-refn) is 126.1 mV.
+ 0x1A = Threshold (refp-refn) is 134 mV.
+ 0x1B = Threshold (refp-refn) is 141.9 mV.
+ 0x1C = Threshold (refp-refn) is 149.8 mV.
+ 0x1D = Threshold (refp-refn) is 157.7 mV.
+ 0x1E = Threshold (refp-refn) is 165.6 mV.
+ 0x1F = Threshold (refp-refn) is 173.5 mV. */
+ uint64_t idle_os_ovrd : 6; /**< [ 23: 18](R/W) Override value for the IDLE detect offset calibration. As with the
+ other offset DACs in the RX, the MSB sets the sign, and the 5 LSBs
+ are binary-encoded magnitudes. */
+ uint64_t en_idle_cal : 1; /**< [ 17: 17](R/W) Set to put the idle detector into calibration mode. */
+ uint64_t rxelecidle : 1; /**< [ 16: 16](R/W) Set to place the CDR finite state machine into a reset state so it does not try
+ to track clock or data and starts from a reset state when the CDR finite state
+ machine begins or resumes operation. deassert (low) to allow the CDR FSM to run. */
+ uint64_t rxcdrhold : 1; /**< [ 15: 15](R/W) Set to place the CDR finite state machine (FSM) into a hold state so it does not
+ try to track clock or data, which would not normally be present during
+ electrical idle. The CDR FSM state is preserved, provided [RXELECIDLE] is not
+ asserted, so the CDR FSM resumes operation with the same settings in effect
+ prior to entering the hold state. deassert (low) to allow the CDR FSM to run. */
+ uint64_t rxcdrramp : 1; /**< [ 14: 14](R/W) For diagnostic use only.
+ Internal:
+ For lab characterization use only. Set to 1 to cause the CDR FSM to ramp the 1st
+ order state by [INC1], independent of voter, & hold the 2nd order state. */
+ uint64_t reserved_13 : 1;
+ uint64_t en_sh_lb : 1; /**< [ 12: 12](R/W) Enable for shallow loopback mode within RX. Used when in shallow loopback
+ mode to mux the CDR receive clock onto the transmit data path clock
+ to ensure that the clock frequencies are matched (to prevent data overrun).
+ This signal should be enabled along with GSERN()_LANE()_PLL_2_BCFG[SHLB_EN] for
+ the PLL. */
+ uint64_t erc : 4; /**< [ 11: 8](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. Set as follows:
+ \<pre\>
+ if (data_period \>= 500ps) erc = 4'h1;
+ else if (data_period \>= 407ps) erc = 4'h2;
+ else if (data_period \>= 333ps) erc = 4'h3;
+ else if (data_period \>= 167ps) erc = 4'h4;
+ else if (data_period \>= 166ps) erc = 4'h5;
+ else if (data_period \>= 100ps) erc = 4'h7;
+ else if (data_period \>= 85ps) erc = 4'h8;
+ else if (data_period \>= 80ps) erc = 4'h9;
+ else if (data_period \>= 62ps) erc = 4'hA;
+ else if (data_period \>= 55ps) erc = 4'hB;
+ else if (data_period \>= 50ps) erc = 4'hC;
+ else if (data_period \>= 45ps) erc = 4'hD;
+ else if (data_period \>= 38ps) erc = 4'hE;
+ else erc = 4'hF;
+ \</pre\> */
+ uint64_t term : 2; /**< [ 7: 6](R/W) Termination voltage control. Setting to 0x1 (VDSSA) is typically appropriate for
+ PCIe channels. For channels without a series board capacitor the typical setting
+ would be 0x0 (floating).
+ 0x0 = Floating.
+ 0x1 = VSSA.
+ 0x2 = VDDA.
+ 0x3 = VSSA. */
+ uint64_t en_rt85 : 1; /**< [ 5: 5](R/W) Enable 85 Ohm termination in the receiver. */
+ uint64_t en_lb : 1; /**< [ 4: 4](R/W) Enable for near-end TX loopback path. */
+ uint64_t en_rterm : 1; /**< [ 3: 3](R/W) For debug use only. Set to one to enable the receiver's termination circuit
+ during bringup. Setting to zero will turn off receiver termination. */
+ uint64_t reserved_0_2 : 3;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_2 : 3;
+ uint64_t en_rterm : 1; /**< [ 3: 3](R/W) For debug use only. Set to one to enable the receiver's termination circuit
+ during bringup. Setting to zero will turn off receiver termination. */
+ uint64_t en_lb : 1; /**< [ 4: 4](R/W) Enable for near-end TX loopback path. */
+ uint64_t en_rt85 : 1; /**< [ 5: 5](R/W) Enable 85 Ohm termination in the receiver. */
+ uint64_t term : 2; /**< [ 7: 6](R/W) Termination voltage control. Setting to 0x1 (VDSSA) is typically appropriate for
+ PCIe channels. For channels without a series board capacitor the typical setting
+ would be 0x0 (floating).
+ 0x0 = Floating.
+ 0x1 = VSSA.
+ 0x2 = VDDA.
+ 0x3 = VSSA. */
+ uint64_t erc : 4; /**< [ 11: 8](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. Set as follows:
+ \<pre\>
+ if (data_period \>= 500ps) erc = 4'h1;
+ else if (data_period \>= 407ps) erc = 4'h2;
+ else if (data_period \>= 333ps) erc = 4'h3;
+ else if (data_period \>= 167ps) erc = 4'h4;
+ else if (data_period \>= 166ps) erc = 4'h5;
+ else if (data_period \>= 100ps) erc = 4'h7;
+ else if (data_period \>= 85ps) erc = 4'h8;
+ else if (data_period \>= 80ps) erc = 4'h9;
+ else if (data_period \>= 62ps) erc = 4'hA;
+ else if (data_period \>= 55ps) erc = 4'hB;
+ else if (data_period \>= 50ps) erc = 4'hC;
+ else if (data_period \>= 45ps) erc = 4'hD;
+ else if (data_period \>= 38ps) erc = 4'hE;
+ else erc = 4'hF;
+ \</pre\> */
+ uint64_t en_sh_lb : 1; /**< [ 12: 12](R/W) Enable for shallow loopback mode within RX. Used when in shallow loopback
+ mode to mux the CDR receive clock onto the transmit data path clock
+ to ensure that the clock frequencies are matched (to prevent data overrun).
+ This signal should be enabled along with GSERN()_LANE()_PLL_2_BCFG[SHLB_EN] for
+ the PLL. */
+ uint64_t reserved_13 : 1;
+ uint64_t rxcdrramp : 1; /**< [ 14: 14](R/W) For diagnostic use only.
+ Internal:
+ For lab characterization use only. Set to 1 to cause the CDR FSM to ramp the 1st
+ order state by [INC1], independent of voter, & hold the 2nd order state. */
+ uint64_t rxcdrhold : 1; /**< [ 15: 15](R/W) Set to place the CDR finite state machine (FSM) into a hold state so it does not
+ try to track clock or data, which would not normally be present during
+ electrical idle. The CDR FSM state is preserved, provided [RXELECIDLE] is not
+ asserted, so the CDR FSM resumes operation with the same settings in effect
+ prior to entering the hold state. deassert (low) to allow the CDR FSM to run. */
+ uint64_t rxelecidle : 1; /**< [ 16: 16](R/W) Set to place the CDR finite state machine into a reset state so it does not try
+ to track clock or data and starts from a reset state when the CDR finite state
+ machine begins or resumes operation. deassert (low) to allow the CDR FSM to run. */
+ uint64_t en_idle_cal : 1; /**< [ 17: 17](R/W) Set to put the idle detector into calibration mode. */
+ uint64_t idle_os_ovrd : 6; /**< [ 23: 18](R/W) Override value for the IDLE detect offset calibration. As with the
+ other offset DACs in the RX, the MSB sets the sign, and the 5 LSBs
+ are binary-encoded magnitudes. */
+ uint64_t refset : 5; /**< [ 28: 24](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as
+ idle.
+ 0x0 = Threshold (refp-refn) is 23 mV.
+ 0x1 = Threshold (refp-refn) is 27.4 mV.
+ 0x2 = Threshold (refp-refn) is 31.8 mV.
+ 0x3 = Threshold (refp-refn) is 36.2 mV.
+ 0x4 = Threshold (refp-refn) is 40.6 mV.
+ 0x5 = Threshold (refp-refn) is 45 mV.
+ 0x6 = Threshold (refp-refn) is 49.4 mV.
+ 0x7 = Threshold (refp-refn) is 53.8 mV.
+ 0x8 = Threshold (refp-refn) is 58.2 mV.
+ 0x9 = Threshold (refp-refn) is 62.6 mV.
+ 0xA = Threshold (refp-refn) is 67 mV.
+ 0xB = Threshold (refp-refn) is 71.4 mV.
+ 0xC = Threshold (refp-refn) is 75.8 mV.
+ 0xD = Threshold (refp-refn) is 80.2 mV.
+ 0xE = Threshold (refp-refn) is 84.6 mV.
+ 0xF = Threshold (refp-refn) is 89 mV.
+ 0x10 = Threshold (refp-refn) is 55 mV.
+ 0x11 = Threshold (refp-refn) is 62.9 mV.
+ 0x12 = Threshold (refp-refn) is 70.8 mV.
+ 0x13 = Threshold (refp-refn) is 78.7 mV.
+ 0x14 = Threshold (refp-refn) is 86.6 mV.
+ 0x15 = Threshold (refp-refn) is 94.5 mV.
+ 0x16 = Threshold (refp-refn) is 102.4 mV.
+ 0x17 = Threshold (refp-refn) is 110.3 mV.
+ 0x18 = Threshold (refp-refn) is 118.2 mV.
+ 0x19 = Threshold (refp-refn) is 126.1 mV.
+ 0x1A = Threshold (refp-refn) is 134 mV.
+ 0x1B = Threshold (refp-refn) is 141.9 mV.
+ 0x1C = Threshold (refp-refn) is 149.8 mV.
+ 0x1D = Threshold (refp-refn) is 157.7 mV.
+ 0x1E = Threshold (refp-refn) is 165.6 mV.
+ 0x1F = Threshold (refp-refn) is 173.5 mV. */
+ uint64_t idle_os_ovrd_en : 1; /**< [ 29: 29](R/W) Enable use of [IDLE_OS_OVRD]. */
+ uint64_t idle_os_bitlen : 2; /**< [ 31: 30](R/W) Number of bits to accumulate for IDLE detect offset calibration, measured in
+ cycles of the 100 MHz system service clock.
+ 0x0 = 5 cycles.
+ 0x1 = 30 cycles.
+ 0x2 = 60 cycles.
+ 0x3 = 250 cycles. */
+ uint64_t rx_idle_lowf : 2; /**< [ 33: 32](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t bstuff : 1; /**< [ 34: 34](R/W) Set to place custom receive pipe in bit-stuffing
+ mode. Only the odd bits in the half-rate DFE outputs are passed to
+ the cdrout* and dout* pipe outputs; the odd bits are duplicated to
+ fill up the expected data path width. */
+ uint64_t reserved_35_39 : 5;
+ uint64_t rx_dcc_lowf : 1; /**< [ 40: 40](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t rx_dcc_iboost : 1; /**< [ 41: 41](R/W) Set to assert the iboost control bit of the
+ receiver duty cycle correcter. Should be programmed as desired before
+ sequencing the receiver reset state machine. Differs
+ from [RX_DCC_LOWF] in the data rate range that it is set at. */
+ uint64_t reserved_42_47 : 6;
+ uint64_t rxcdrfsmi : 1; /**< [ 48: 48](R/W) Set to provide the RX interpolator with the RX CDR load I
+ clock (rxcdrldi). deassert (low) to provide the interpolator with
+ the RX CDR load Q clock (rxcdrldq). This bit is ignored when
+ txcdrdfsm is asserted (high), which set the RX interpolator
+ and CDR FSM to use the TX clock (txcdrld).
+
+ Internal:
+ (For initial testing, assert rxcdrfsmi, but if we have trouble
+ meeting timing, we can deassert this signal to provide some
+ additional timing margin from the last flops in the RX CDR FSM to
+ the flops interpolator.) */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_rx_st_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_rx_st_bcfg bdk_gsernx_lanex_rx_st_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_RX_ST_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_RX_ST_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000ff0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_RX_ST_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) bdk_gsernx_lanex_rx_st_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) "GSERNX_LANEX_RX_ST_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_RX_ST_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_phy2_bcfg
+ *
+ * GSER Lane SATA Control 2 Register
+ * Control settings for SATA PHY functionality.
+ */
+union bdk_gsernx_lanex_sata_phy2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_phy2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dev_align_count : 16; /**< [ 63: 48](R/W) Count in service clock cycles representing the duration of ALIGNp primitives
+ received at each speed from the far end Device during the rate negotiation
+ process.
+ Reset value is set to yield a 54.61ns duration. */
+ uint64_t reserved_43_47 : 5;
+ uint64_t cdr_lock_wait : 11; /**< [ 42: 32](R/W) Maximum wait count in service clock cycles required after detecting a received
+ signal or after completing a Receiver reset before the SATA aligner begins to
+ scan for 8B10B symbol alignment.
+ Reset value is set to 5us based on analysis of worst case SSC scenarios. */
+ uint64_t do_afeos_final : 4; /**< [ 31: 28](R/W) Set to one to allow AFEOS adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_AFEOS_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlelte_final : 4; /**< [ 27: 24](R/W) Set to one to allow CTLELTE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLELTE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlez_final : 4; /**< [ 23: 20](R/W) Set to one to allow CTLEZ adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLEZ_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctle_final : 4; /**< [ 19: 16](R/W) Set to one to allow CTLE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_dfe_final : 4; /**< [ 15: 12](R/W) Set to one to allow DFE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_DFE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_vga_final : 4; /**< [ 11: 8](R/W) Set to one to allow VGA adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_VGA_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_blwc_final : 4; /**< [ 7: 4](R/W) Set to one to allow BLWC adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_BLWC_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_prevga_gn_final : 4; /**< [ 3: 0](R/W) Set to one to allow PREVGA_GN adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_PREVGA_GN_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t do_prevga_gn_final : 4; /**< [ 3: 0](R/W) Set to one to allow PREVGA_GN adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_PREVGA_GN_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_blwc_final : 4; /**< [ 7: 4](R/W) Set to one to allow BLWC adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_BLWC_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_vga_final : 4; /**< [ 11: 8](R/W) Set to one to allow VGA adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_VGA_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_dfe_final : 4; /**< [ 15: 12](R/W) Set to one to allow DFE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_DFE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctle_final : 4; /**< [ 19: 16](R/W) Set to one to allow CTLE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlez_final : 4; /**< [ 23: 20](R/W) Set to one to allow CTLEZ adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLEZ_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlelte_final : 4; /**< [ 27: 24](R/W) Set to one to allow CTLELTE adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_CTLELTE_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t do_afeos_final : 4; /**< [ 31: 28](R/W) Set to one to allow AFEOS adaptation to keep running continuously during the final
+ phase of adaptation when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted,
+ GSERN()_LANE()_SATA_PHY_BCFG[DO_AFEOS_ADPT] is set and the SATA lane is operating
+ at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA Gen1.
+ \<1\> = SATA Gen2.
+ \<2\> = SATA Gen3.
+ \<3\> = Reserved. */
+ uint64_t cdr_lock_wait : 11; /**< [ 42: 32](R/W) Maximum wait count in service clock cycles required after detecting a received
+ signal or after completing a Receiver reset before the SATA aligner begins to
+ scan for 8B10B symbol alignment.
+ Reset value is set to 5us based on analysis of worst case SSC scenarios. */
+ uint64_t reserved_43_47 : 5;
+ uint64_t dev_align_count : 16; /**< [ 63: 48](R/W) Count in service clock cycles representing the duration of ALIGNp primitives
+ received at each speed from the far end Device during the rate negotiation
+ process.
+ Reset value is set to yield a 54.61ns duration. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_phy2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_phy2_bcfg bdk_gsernx_lanex_sata_phy2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002bb0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_PHY2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) bdk_gsernx_lanex_sata_phy2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) "GSERNX_LANEX_SATA_PHY2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_PHY2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_phy_bcfg
+ *
+ * GSER Lane SATA Control Register
+ * Control settings for SATA PHY functionality.
+ */
+union bdk_gsernx_lanex_sata_phy_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_phy_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t do_afeos_adpt : 4; /**< [ 63: 60](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlelte_adpt : 4; /**< [ 59: 56](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlez_adpt : 4; /**< [ 55: 52](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctle_adpt : 4; /**< [ 51: 48](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_dfe_adpt : 4; /**< [ 47: 44](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_vga_adpt : 4; /**< [ 43: 40](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_blwc_adpt : 4; /**< [ 39: 36](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_prevga_gn_adpt : 4; /**< [ 35: 32](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t sata_dp_width_sel : 4; /**< [ 31: 28](R/W) Cleared to select a 20 bit and set to select a 40 bit Rx and Tx Data Path Width
+ in the PCS Lite Layer.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1 (default 40 bits).
+ \<1\> = SATA gen2 (default 20 bits).
+ \<2\> = SATA gen3 (default 20 bits).
+ \<3\> = Reserved. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t inhibit_power_change : 1; /**< [ 25: 25](R/W) Inhibit SATA power state changes in response to pX_partial, pX_slumber and
+ pX_phy_devslp inputs. */
+ uint64_t frc_unalgn_rxelecidle : 1; /**< [ 24: 24](R/W) Enables use of negated pX_sig_det to force the RX PHY into unalign state. */
+ uint64_t sata_bitstuff_tx_en : 4; /**< [ 23: 20](R/W) Set to duplicate the first 20 bits of TX data before
+ alignment & ordering for lower data rates. This could be PCS TX
+ data, PRBS data, or shallow-loopback RX data depending on mode.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t sata_bitstuff_rx_drop_even : 4;/**< [ 19: 16](R/W) Tells the PCS lite receive datapath to drop even bits
+ in the vector of received data from the PMA when [SATA_BITSTUFF_RX_EN] is
+ set:
+ 0 = Drop bits 1, 3, 5, 7, ...
+ 1 = Drop bits 0, 2, 4, 6, ...
+
+ This bit is also used in the eye monitor to mask out the dropped
+ bits when counting mismatches.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t sata_bitstuff_rx_en : 4; /**< [ 15: 12](R/W) Set to expect duplicates on the PMA RX data and drop bits after
+ alignment & ordering for PCS layer to consume. The drop ordering is
+ determined by [SATA_BITSTUFF_RX_DROP_EVEN]. This value must only be changed
+ while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t rx_squelch_on_idle : 1; /**< [ 11: 11](R/W) Receive data squelch on idle. When idle detection is signaled
+ to the SATA control with the negation of phy_sig_det, the parallel
+ receive data will be set to all 0's regardless of the output of the
+ CDR. */
+ uint64_t comma_thr : 7; /**< [ 10: 4](R/W) COMMA detection threshold. The receive aligner must see this many
+ COMMA characters at the same rotation before declaring symbol
+ alignment. */
+ uint64_t error_thr : 4; /**< [ 3: 0](R/W) Error threshold. The receive aligner must see this many COMMA
+ characters at a different rotation than currently in use before
+ declaring loss of symbol alignment. */
+#else /* Word 0 - Little Endian */
+ uint64_t error_thr : 4; /**< [ 3: 0](R/W) Error threshold. The receive aligner must see this many COMMA
+ characters at a different rotation than currently in use before
+ declaring loss of symbol alignment. */
+ uint64_t comma_thr : 7; /**< [ 10: 4](R/W) COMMA detection threshold. The receive aligner must see this many
+ COMMA characters at the same rotation before declaring symbol
+ alignment. */
+ uint64_t rx_squelch_on_idle : 1; /**< [ 11: 11](R/W) Receive data squelch on idle. When idle detection is signaled
+ to the SATA control with the negation of phy_sig_det, the parallel
+ receive data will be set to all 0's regardless of the output of the
+ CDR. */
+ uint64_t sata_bitstuff_rx_en : 4; /**< [ 15: 12](R/W) Set to expect duplicates on the PMA RX data and drop bits after
+ alignment & ordering for PCS layer to consume. The drop ordering is
+ determined by [SATA_BITSTUFF_RX_DROP_EVEN]. This value must only be changed
+ while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t sata_bitstuff_rx_drop_even : 4;/**< [ 19: 16](R/W) Tells the PCS lite receive datapath to drop even bits
+ in the vector of received data from the PMA when [SATA_BITSTUFF_RX_EN] is
+ set:
+ 0 = Drop bits 1, 3, 5, 7, ...
+ 1 = Drop bits 0, 2, 4, 6, ...
+
+ This bit is also used in the eye monitor to mask out the dropped
+ bits when counting mismatches.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t sata_bitstuff_tx_en : 4; /**< [ 23: 20](R/W) Set to duplicate the first 20 bits of TX data before
+ alignment & ordering for lower data rates. This could be PCS TX
+ data, PRBS data, or shallow-loopback RX data depending on mode.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t frc_unalgn_rxelecidle : 1; /**< [ 24: 24](R/W) Enables use of negated pX_sig_det to force the RX PHY into unalign state. */
+ uint64_t inhibit_power_change : 1; /**< [ 25: 25](R/W) Inhibit SATA power state changes in response to pX_partial, pX_slumber and
+ pX_phy_devslp inputs. */
+ uint64_t reserved_26_27 : 2;
+ uint64_t sata_dp_width_sel : 4; /**< [ 31: 28](R/W) Cleared to select a 20 bit and set to select a 40 bit Rx and Tx Data Path Width
+ in the PCS Lite Layer.
+ This value must only be changed while lite layer is in reset.
+ \<0\> = SATA gen1 (default 40 bits).
+ \<1\> = SATA gen2 (default 20 bits).
+ \<2\> = SATA gen3 (default 20 bits).
+ \<3\> = Reserved. */
+ uint64_t do_prevga_gn_adpt : 4; /**< [ 35: 32](R/W) Set to one to allow the adaptation reset state machine to trigger PREVGA_GN adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_blwc_adpt : 4; /**< [ 39: 36](R/W) Set to one to allow the adaptation reset state machine to trigger BLWC adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_vga_adpt : 4; /**< [ 43: 40](R/W) Set to one to allow the adaptation reset state machine to trigger VGA adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_dfe_adpt : 4; /**< [ 47: 44](R/W) Set to one to allow the adaptation reset state machine to trigger DFE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctle_adpt : 4; /**< [ 51: 48](R/W) Set to one to allow the adaptation reset state machine to trigger CTLE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlez_adpt : 4; /**< [ 55: 52](R/W) Set to one to allow the adaptation reset state machine to trigger CTLEZ adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_ctlelte_adpt : 4; /**< [ 59: 56](R/W) Set to one to allow the adaptation reset state machine to trigger CTLELTE adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+ uint64_t do_afeos_adpt : 4; /**< [ 63: 60](R/W) Set to one to allow the adaptation reset state machine to trigger AFEOS adaptation
+ when GSERN()_LANE()_RST2_BCFG[RST_ADPT_RST_SM] is deasserted and the SATA lane is
+ operating at the corresponding rate. The individual bits are mapped as follows:
+ \<0\> = SATA gen1.
+ \<1\> = SATA gen2.
+ \<2\> = SATA gen3.
+ \<3\> = Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_phy_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_phy_bcfg bdk_gsernx_lanex_sata_phy_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002b30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_PHY_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) bdk_gsernx_lanex_sata_phy_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) "GSERNX_LANEX_SATA_PHY_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_PHY_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_phy_bsts
+ *
+ * GSER Lane SATA PCS Status Register
+ * Error Status for SATA PHY functionality.
+ */
+union bdk_gsernx_lanex_sata_phy_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_phy_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t align_error : 1; /**< [ 0: 0](R/W1C/H) Alignment error.
+ The receive 8B10B aligner has detected an error. An error is
+ declared if GSERN()_LANE()_SATA_PHY_BCFG[ERROR_THR]
+ COMMA characters are detected at a 10 bit rotation that does not match
+ the active rotation. The COMMAs do not have to all be at the same rotation. */
+#else /* Word 0 - Little Endian */
+ uint64_t align_error : 1; /**< [ 0: 0](R/W1C/H) Alignment error.
+ The receive 8B10B aligner has detected an error. An error is
+ declared if GSERN()_LANE()_SATA_PHY_BCFG[ERROR_THR]
+ COMMA characters are detected at a 10 bit rotation that does not match
+ the active rotation. The COMMAs do not have to all be at the same rotation. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_phy_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_phy_bsts bdk_gsernx_lanex_sata_phy_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_PHY_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002fb0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_PHY_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) bdk_gsernx_lanex_sata_phy_bsts_t
+#define bustype_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) "GSERNX_LANEX_SATA_PHY_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_PHY_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq1_1_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during SATA gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq1_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq1_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t sata_g1_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t sata_g1_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g1_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g1_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g1_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g1_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g1_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g1_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g1_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g1_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g1_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g1_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g1_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t reserved_0_8 : 9;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_8 : 9;
+ uint64_t sata_g1_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t sata_g1_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g1_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g1_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g1_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g1_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g1_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g1_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g1_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g1_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g1_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g1_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g1_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq1_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq1_1_bcfg bdk_gsernx_lanex_sata_rxeq1_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e00ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ1_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq1_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ1_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ1_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq1_2_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during SATA gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq1_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq1_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g1_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g1_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+ uint64_t sata_g1_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq1_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq1_2_bcfg bdk_gsernx_lanex_sata_rxeq1_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ1_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq1_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ1_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ1_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq1_3_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during SATA Gen1 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq1_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq1_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g1_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g1_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g1_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq1_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq1_3_bcfg bdk_gsernx_lanex_sata_rxeq1_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e20ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ1_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq1_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ1_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ1_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq2_1_bcfg
+ *
+ * GSER Lane SATA Gen2 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during SATA gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq2_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq2_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t sata_g2_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t sata_g2_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g2_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g2_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g2_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g2_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g2_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g2_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g2_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g2_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g2_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g2_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g2_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t reserved_0_8 : 9;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_8 : 9;
+ uint64_t sata_g2_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t sata_g2_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g2_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g2_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g2_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g2_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g2_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g2_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g2_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g2_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g2_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g2_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g2_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq2_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq2_1_bcfg bdk_gsernx_lanex_sata_rxeq2_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ2_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq2_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ2_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ2_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq2_2_bcfg
+ *
+ * GSER Lane SATA Gen2 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during SATA gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq2_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq2_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g2_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g2_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+ uint64_t sata_g2_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq2_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq2_2_bcfg bdk_gsernx_lanex_sata_rxeq2_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e40ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ2_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq2_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ2_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ2_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq2_3_bcfg
+ *
+ * GSER Lane SATA Gen2 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during SATA Gen2 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq2_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq2_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g2_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g2_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g2_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq2_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq2_3_bcfg bdk_gsernx_lanex_sata_rxeq2_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e50ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ2_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq2_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ2_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ2_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq3_1_bcfg
+ *
+ * GSER Lane SATA Gen3 RX Equalizer Control Register 1
+ * Parameters controlling the custom receiver equalization during SATA gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq3_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq3_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t sata_g3_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t sata_g3_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g3_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g3_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g3_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g3_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g3_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g3_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g3_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g3_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g3_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g3_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g3_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t reserved_0_8 : 9;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_8 : 9;
+ uint64_t sata_g3_ctle_lte_zero_ovrd : 4;/**< [ 12: 9](R/W) CTLE LTE zero frequency override value. */
+ uint64_t sata_g3_ctle_lte_zero_ovrd_en : 1;/**< [ 13: 13](R/W) CTLE LTE zero frequency override enable.
+ By default, the override should be enabled; otherwise, CTLE_LTE_ZERO
+ will be set equal to CTLE_ZERO within the RX adaptation FSM. */
+ uint64_t sata_g3_rx_dcc_lowf : 1; /**< [ 14: 14](R/W) Set to put the RX duty-cycle corrector (DCC) into low frequency mode. Set to 1
+ when operating at data rates below 4 Gbaud. */
+ uint64_t sata_g3_erc : 4; /**< [ 18: 15](R/W) Interpolator edge-rate control. This control is shared between all
+ interpolators in the lane. */
+ uint64_t sata_g3_inc1 : 6; /**< [ 24: 19](R/W) 1st order loop inc. */
+ uint64_t sata_g3_inc2 : 6; /**< [ 30: 25](R/W) 2nd order loop inc. */
+ uint64_t sata_g3_qoffs : 7; /**< [ 37: 31](R/W) Q interp state offset. */
+ uint64_t sata_g3_eoffs : 7; /**< [ 44: 38](R/W) E interp state offset. */
+ uint64_t sata_g3_cdr_qac_sele : 1; /**< [ 45: 45](R/W) Enable use of the QAC corrector for the e-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g3_cdr_qac_selq : 1; /**< [ 46: 46](R/W) Enable use of the QAC corrector for the q-path when the reset state
+ machine timing allows it. */
+ uint64_t sata_g3_en_qac_q : 1; /**< [ 47: 47](R/W) Enable use of QAC digital filter in the doutq datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g3_en_qac_e : 1; /**< [ 48: 48](R/W) Enable use of QAC digital filter in the doute datapath. If the
+ enable is deasserted, the filter will output the [QAC_REFSET]
+ value. If its asserted, it will determine the current phase and use
+ [QAC_REFSET] & [QAC_CNTSET] to output a correction value. Set prior to
+ exiting reset. */
+ uint64_t sata_g3_blwc_deadband : 12; /**< [ 60: 49](R/W) BLWC adaptation deadband settings.
+ 12-bit field to match accumulator, but typically a value less than 0x0FF is used. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq3_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq3_1_bcfg bdk_gsernx_lanex_sata_rxeq3_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e60ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ3_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq3_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ3_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ3_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq3_2_bcfg
+ *
+ * GSER Lane SATA Gen3 RX Equalizer Control Register 2
+ * Parameters controlling the custom receiver equalization during SATA gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq3_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq3_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g3_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g3_blwc_subrate_final : 16;/**< [ 15: 0](R/W) Subrate counter final value. Sets the ending value for the LMS update interval, if subrate
+ gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled.
+ Subrate counter final value. */
+ uint64_t sata_g3_blwc_subrate_init : 16;/**< [ 31: 16](R/W) Subrate counter initial value. Sets the initial value for the LMS update interval, if
+ subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_prevga_gn_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_prevga_gn_subrate_fin : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FIN if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq3_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq3_2_bcfg bdk_gsernx_lanex_sata_rxeq3_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e70ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ3_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq3_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ3_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ3_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxeq3_3_bcfg
+ *
+ * GSER Lane SATA Gen3 RX Equalizer Control Register 3
+ * Parameters controlling the custom receiver equalization during SATA Gen3 operation.
+ * These fields will drive the associated control signal when
+ * GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ * is set to 'SATA'.
+ */
+union bdk_gsernx_lanex_sata_rxeq3_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxeq3_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t sata_g3_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g3_subrate_init : 16; /**< [ 15: 0](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_subrate_final : 16; /**< [ 31: 16](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_afeos_subrate_init : 16;/**< [ 47: 32](R/W) Subrate counter initial value. Sets the starting value for the LMS update
+ interval, if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+ uint64_t sata_g3_afeos_subrate_final : 16;/**< [ 63: 48](R/W) Subrate counter final value. Sets the final value for the LMS update interval,
+ if subrate gearshifting is enabled.
+ Set SUBRATE_INIT = SUBRATE_FINAL if subrate gearshifting is not enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxeq3_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxeq3_3_bcfg bdk_gsernx_lanex_sata_rxeq3_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002e80ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXEQ3_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) bdk_gsernx_lanex_sata_rxeq3_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) "GSERNX_LANEX_SATA_RXEQ3_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXEQ3_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidl1a_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN1. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidl1a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidl1a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidl1a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidl1a_bcfg bdk_gsernx_lanex_sata_rxidl1a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002cc0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDL1A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) bdk_gsernx_lanex_sata_rxidl1a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDL1A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDL1A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidl2a_bcfg
+ *
+ * GSER Lane SATA Gen2 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN2. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidl2a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidl2a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidl2a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidl2a_bcfg bdk_gsernx_lanex_sata_rxidl2a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002ce0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDL2A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) bdk_gsernx_lanex_sata_rxidl2a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDL2A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDL2A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidl3a_bcfg
+ *
+ * GSER Lane SATA Gen3 RX Idle Detection Filter Control Register 2
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN3. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidl3a_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidl3a_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+ uint64_t reserved_61 : 1;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_54_55 : 2;
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+#else /* Word 0 - Little Endian */
+ uint64_t l0 : 27; /**< [ 26: 0](R/W) Zeros count leak parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the zeros count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L0=N0 and I0=1 for a simple run-of-N0 zeros to
+ deassert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t l1 : 27; /**< [ 53: 27](R/W) Ones count leak parameter. When a zero in the raw idle signal from the custom
+ macro is encountered, the ones count is decremented by this amount, saturating
+ to a minimum count of zero. (Set L1=N1 and I1=1 for a simple run-of-N1 ones to
+ assert the filter output.) The minimum setting for this field is 0x1. */
+ uint64_t reserved_54_55 : 2;
+ uint64_t refset : 5; /**< [ 60: 56](R/W) Sets the reference voltage swing for idle detection. A voltage swing
+ at the input of the RX less than this amount is defined as idle.
+ (See GSERN()_LANE()_RX_ST_BCFG[REFSET] for bit mapping.) */
+ uint64_t reserved_61 : 1;
+ uint64_t rx_idle_lowf : 2; /**< [ 63: 62](R/W) Control for the receiver's idle detector analog filter
+ bandwidth. The two bits apply at different times.
+ \<0\> = Set to 1 for low bandwidth during normal operation.
+ \<1\> = Set to 1 for low bandwidth during idle offset calibration.
+ The default is 1 during normal operation for large filter capacitance and low
+ bandwidth, and 0 during idle offset calibration to provide faster response. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidl3a_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidl3a_bcfg bdk_gsernx_lanex_sata_rxidl3a_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002d00ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDL3A_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) bdk_gsernx_lanex_sata_rxidl3a_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDL3A_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDL3A_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidle1_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN1. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidle1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidle1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidle1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidle1_bcfg bdk_gsernx_lanex_sata_rxidle1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002cb0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDLE1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) bdk_gsernx_lanex_sata_rxidle1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDLE1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDLE1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidle2_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN2. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidle2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidle2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidle2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidle2_bcfg bdk_gsernx_lanex_sata_rxidle2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002cd0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDLE2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) bdk_gsernx_lanex_sata_rxidle2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDLE2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDLE2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_rxidle3_bcfg
+ *
+ * GSER Lane SATA Gen1 RX Idle Detection Filter Control Register
+ * Parameters controlling the analog detection and digital filtering of the receiver's
+ * idle detection logic for SATA GEN3. For the digital filtering, setting all fields to 1,
+ * i.e., N0=N1=I0=I1=L0=L1=1, results in no filtering.
+ */
+union bdk_gsernx_lanex_sata_rxidle3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_rxidle3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t reserved_54 : 1;
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+#else /* Word 0 - Little Endian */
+ uint64_t n0 : 27; /**< [ 26: 0](R/W) Threshold for the count of zeros in the raw idle signal from the custom macro
+ required to deassert the idle filter output. */
+ uint64_t n1 : 27; /**< [ 53: 27](R/W) Threshold for the count of ones in the raw idle signal from the custom macro
+ required to assert the idle filter output. */
+ uint64_t reserved_54 : 1;
+ uint64_t i0 : 4; /**< [ 58: 55](R/W) Zeros count increment parameter. When a zero in the raw idle signal from the
+ custom macro is encountered, the zeros count is incremented by this amount,
+ saturating to a maximum count of [N0]. */
+ uint64_t i1 : 4; /**< [ 62: 59](R/W) Ones count increment parameter. When a one in the raw idle signal from the custom
+ macro is encountered, the ones count is incremented by this amount, saturating
+ to a maximum of [N1]. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_rxidle3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_rxidle3_bcfg bdk_gsernx_lanex_sata_rxidle3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002cf0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_RXIDLE3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) bdk_gsernx_lanex_sata_rxidle3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) "GSERNX_LANEX_SATA_RXIDLE3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_RXIDLE3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_txdrv1_bcfg
+ *
+ * GSER Lane SATA TX Drive Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values and TX bias/swing for SATA GEN1.
+ */
+union bdk_gsernx_lanex_sata_txdrv1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_txdrv1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t sata_g1_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN1.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g1_cpost : 5; /**< [ 20: 16](R/W) SATA GEN1 Cpost value. Combined with the reset values of [SATA_G1_CMAIN] and
+ [SATA_G1_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g1_cmain : 6; /**< [ 13: 8](R/W) SATA GEN1 Cmain value. Combined with the reset values of [SATA_G1_CPOST] and
+ [SATA_G1_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g1_cpre : 5; /**< [ 4: 0](R/W) SATA GEN1 Cpre value. Combined with the reset values of [SATA_G1_CPOST] and
+ [SATA_G1_CMAIN] this yields 3.5 dB TX deemphasis. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g1_cpre : 5; /**< [ 4: 0](R/W) SATA GEN1 Cpre value. Combined with the reset values of [SATA_G1_CPOST] and
+ [SATA_G1_CMAIN] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g1_cmain : 6; /**< [ 13: 8](R/W) SATA GEN1 Cmain value. Combined with the reset values of [SATA_G1_CPOST] and
+ [SATA_G1_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g1_cpost : 5; /**< [ 20: 16](R/W) SATA GEN1 Cpost value. Combined with the reset values of [SATA_G1_CMAIN] and
+ [SATA_G1_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g1_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN1.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_txdrv1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_txdrv1_bcfg bdk_gsernx_lanex_sata_txdrv1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002f80ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_TXDRV1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) bdk_gsernx_lanex_sata_txdrv1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) "GSERNX_LANEX_SATA_TXDRV1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_TXDRV1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_txdrv2_bcfg
+ *
+ * GSER Lane SATA TX Drive Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values and TX bias/swing for SATA GEN2.
+ */
+union bdk_gsernx_lanex_sata_txdrv2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_txdrv2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t sata_g2_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN2.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g2_cpost : 5; /**< [ 20: 16](R/W) SATA GEN2 Cpost value. Combined with the reset values of [SATA_G2_CMAIN] and
+ [SATA_G2_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g2_cmain : 6; /**< [ 13: 8](R/W) SATA GEN2 Cmain value. Combined with the reset values of [SATA_G2_CPOST] and
+ [SATA_G2_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g2_cpre : 5; /**< [ 4: 0](R/W) SATA GEN2 Cpre value. Combined with the reset values of [SATA_G2_CPOST] and
+ [SATA_G2_CMAIN] this yields 3.5 dB TX deemphasis. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g2_cpre : 5; /**< [ 4: 0](R/W) SATA GEN2 Cpre value. Combined with the reset values of [SATA_G2_CPOST] and
+ [SATA_G2_CMAIN] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g2_cmain : 6; /**< [ 13: 8](R/W) SATA GEN2 Cmain value. Combined with the reset values of [SATA_G2_CPOST] and
+ [SATA_G2_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g2_cpost : 5; /**< [ 20: 16](R/W) SATA GEN2 Cpost value. Combined with the reset values of [SATA_G2_CMAIN] and
+ [SATA_G2_CPRE] this yields 3.5 dB TX deemphasis. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g2_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN2.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_txdrv2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_txdrv2_bcfg bdk_gsernx_lanex_sata_txdrv2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002f90ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_TXDRV2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) bdk_gsernx_lanex_sata_txdrv2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) "GSERNX_LANEX_SATA_TXDRV2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_TXDRV2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_sata_txdrv3_bcfg
+ *
+ * GSER Lane SATA TX Drive Control Register
+ * TX drive Cpre, Cpost and Cmain Coefficient values and TX bias/swing for SATA GEN3.
+ */
+union bdk_gsernx_lanex_sata_txdrv3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_sata_txdrv3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t sata_g3_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN3.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g3_cpost : 5; /**< [ 20: 16](R/W) SATA GEN3 Cpost value. Combined with the reset values of [SATA_G3_CMAIN] and
+ [SATA_G3_CPRE] this yields 6 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g3_cmain : 6; /**< [ 13: 8](R/W) SATA GEN3 Cmain value. Combined with the reset values of [SATA_G3_CPOST] and
+ [SATA_G3_CPRE] this yields 6 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g3_cpre : 5; /**< [ 4: 0](R/W) SATA GEN3 Cpre value. Combined with the reset values of [SATA_G3_CPOST] and
+ [SATA_G3_CMAIN] this yields 6 dB TX deemphasis. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_g3_cpre : 5; /**< [ 4: 0](R/W) SATA GEN3 Cpre value. Combined with the reset values of [SATA_G3_CPOST] and
+ [SATA_G3_CMAIN] this yields 6 dB TX deemphasis. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t sata_g3_cmain : 6; /**< [ 13: 8](R/W) SATA GEN3 Cmain value. Combined with the reset values of [SATA_G3_CPOST] and
+ [SATA_G3_CPRE] this yields 6 dB TX deemphasis. */
+ uint64_t reserved_14_15 : 2;
+ uint64_t sata_g3_cpost : 5; /**< [ 20: 16](R/W) SATA GEN3 Cpost value. Combined with the reset values of [SATA_G3_CMAIN] and
+ [SATA_G3_CPRE] this yields 6 dB TX deemphasis. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sata_g3_tx_bias : 6; /**< [ 29: 24](R/W) TX bias/swing selection for SATA GEN3.
+ Typical values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_sata_txdrv3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_sata_txdrv3_bcfg bdk_gsernx_lanex_sata_txdrv3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090002fa0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SATA_TXDRV3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) bdk_gsernx_lanex_sata_txdrv3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) "GSERNX_LANEX_SATA_TXDRV3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SATA_TXDRV3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_scope_0_dat
+ *
+ * GSER Lane PCS Lite Scope Data Gathering Result Register 0
+ */
+union bdk_gsernx_lanex_scope_0_dat
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_scope_0_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_41_63 : 23;
+ uint64_t cnt_done : 1; /**< [ 40: 40](RO/H) Indicates when the match counter has counted down from
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT] to 0x0. The error vector will no longer
+ be updated once the counter is done. To clear the flag a new
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] or GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] toggle
+ needs to happen. */
+ uint64_t ref_vec : 40; /**< [ 39: 0](RO/H) Stored doutq that will be used to compare against incoming
+ doutq. Its value is changed by toggling GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]
+ low then high, which will save the next doutq received in the PCS
+ layer as the new reference vector, or by setting
+ GSERN()_LANE()_SCOPE_CTL_2[REF_VEC_OVRRIDE] and
+ GSERN()_LANE()_SCOPE_CTL_2[REF_VEC_OVRRIDE_EN].
+ This field is only valid when GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] is asserted. */
+#else /* Word 0 - Little Endian */
+ uint64_t ref_vec : 40; /**< [ 39: 0](RO/H) Stored doutq that will be used to compare against incoming
+ doutq. Its value is changed by toggling GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]
+ low then high, which will save the next doutq received in the PCS
+ layer as the new reference vector, or by setting
+ GSERN()_LANE()_SCOPE_CTL_2[REF_VEC_OVRRIDE] and
+ GSERN()_LANE()_SCOPE_CTL_2[REF_VEC_OVRRIDE_EN].
+ This field is only valid when GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] is asserted. */
+ uint64_t cnt_done : 1; /**< [ 40: 40](RO/H) Indicates when the match counter has counted down from
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT] to 0x0. The error vector will no longer
+ be updated once the counter is done. To clear the flag a new
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] or GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] toggle
+ needs to happen. */
+ uint64_t reserved_41_63 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_scope_0_dat_s cn; */
+};
+typedef union bdk_gsernx_lanex_scope_0_dat bdk_gsernx_lanex_scope_0_dat_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_0_DAT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_0_DAT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000900ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SCOPE_0_DAT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) bdk_gsernx_lanex_scope_0_dat_t
+#define bustype_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) "GSERNX_LANEX_SCOPE_0_DAT"
+#define device_bar_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SCOPE_0_DAT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_scope_1_dat
+ *
+ * GSER Lane PCS Lite Scope Data Gathering Result Register 1
+ */
+union bdk_gsernx_lanex_scope_1_dat
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_scope_1_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t err_vec : 40; /**< [ 39: 0](RO/H) Error vector that maintains status of mismatches between doutq &
+ doute. It updates every time there is a match between doutq & the
+ captured GSERN()_LANE()_SCOPE_0_DAT[REF_VEC]. To clear it a toggle to
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] or GSERN()_LANE()_SCOPE_CTL[CNT_EN] is
+ needed. This field is only valid when GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] is
+ set. */
+#else /* Word 0 - Little Endian */
+ uint64_t err_vec : 40; /**< [ 39: 0](RO/H) Error vector that maintains status of mismatches between doutq &
+ doute. It updates every time there is a match between doutq & the
+ captured GSERN()_LANE()_SCOPE_0_DAT[REF_VEC]. To clear it a toggle to
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] or GSERN()_LANE()_SCOPE_CTL[CNT_EN] is
+ needed. This field is only valid when GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] is
+ set. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_scope_1_dat_s cn; */
+};
+typedef union bdk_gsernx_lanex_scope_1_dat bdk_gsernx_lanex_scope_1_dat_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_1_DAT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_1_DAT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000910ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SCOPE_1_DAT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) bdk_gsernx_lanex_scope_1_dat_t
+#define bustype_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) "GSERNX_LANEX_SCOPE_1_DAT"
+#define device_bar_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SCOPE_1_DAT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_scope_ctl
+ *
+ * GSER Lane PCS Lite Scope Data Gathering Control Register
+ * Register controls for the PCS layer scope function. Use of this function
+ * requires enabling the doute eye data path in the analog macro, i.e.,
+ * GSERN()_LANE()_RST2_BCFG[LN_RESET_USE_EYE] should be asserted when the lane
+ * reset state machines bring the lane out of reset.
+ */
+union bdk_gsernx_lanex_scope_ctl
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_scope_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t doutq_ld : 1; /**< [ 56: 56](R/W) Set to a doutq value for comparison against incoming
+ doutq. The incoming stream should guarantee a recurring doutq
+ pattern to capture valid error vector. This works only on a
+ positive-edge trigger which means a new value won't be stored until
+ a 0-\>1 transition happens. Assertion of GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]
+ also resets the match counter, GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] and
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]. Deassert [DOUTQ_LD] to
+ enable the match counter to count down and to enable collection of
+ new data in the error vector (also requires that
+ GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] is clear).
+
+ For diagnostic use only. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t scope_en : 1; /**< [ 49: 49](R/W) Set to enable collection of GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]
+ data. Deassertion stops collection of new mismatch bits, but does
+ not reset GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]. If
+ GSERN()_LANE()_SCOPE_CTL[CNT_EN] is also asserted, collection will stop
+ when the GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT] is reached. If not using
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT], software can control duration of
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC] data collection through
+ [SCOPE_EN]. All scope logic is conditionally clocked with the
+ condition being GSERN()_LANE()_SCOPE_CTL[SCOPE_EN], so deassert this bit
+ when not used to save power.
+
+ For diagnostic use only. */
+ uint64_t cnt_rst_n : 1; /**< [ 48: 48](R/W) Set low to reset the match counter, the done indicator, and the error
+ vector. The reset value for the counter is set by
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT]. GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] and
+ the error vector, GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC], reset to all zeros. Set
+ this bit high to enable the match counter to count down and to enable collection
+ of new data in the error vector (also requires that
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] is not set high). Cycle
+ GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] (low then high) to clear the counter and the
+ error vector, leaving GSERN()_LANE()_SCOPE_0_DAT[REF_VEC] unchanged, enabling
+ collection of a new error vector under updated receiver settings using the same
+ reference vector match pattern.
+
+ For diagnostic use only. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t cnt_en : 1; /**< [ 40: 40](R/W) Enable use of the match counter to limit the number of doutq to
+ ref_vec matches over which the doutq to doute mismatch vector is
+ accumulated. If this bit is not asserted,
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC] accumulation is limited by
+ GSERN()_LANE()_SCOPE_CTL[SCOPE_EN] and/or GSERN()_LANE()_SCOPE_CTL[CNT_RST_N].
+
+ For diagnostic use only. */
+ uint64_t cnt_limit : 40; /**< [ 39: 0](R/W) Limit value the match counter starts decrementing
+ from. It gets loaded every time a new doutq load happens or a
+ counter reset happens.
+
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt_limit : 40; /**< [ 39: 0](R/W) Limit value the match counter starts decrementing
+ from. It gets loaded every time a new doutq load happens or a
+ counter reset happens.
+
+ For diagnostic use only. */
+ uint64_t cnt_en : 1; /**< [ 40: 40](R/W) Enable use of the match counter to limit the number of doutq to
+ ref_vec matches over which the doutq to doute mismatch vector is
+ accumulated. If this bit is not asserted,
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC] accumulation is limited by
+ GSERN()_LANE()_SCOPE_CTL[SCOPE_EN] and/or GSERN()_LANE()_SCOPE_CTL[CNT_RST_N].
+
+ For diagnostic use only. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t cnt_rst_n : 1; /**< [ 48: 48](R/W) Set low to reset the match counter, the done indicator, and the error
+ vector. The reset value for the counter is set by
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT]. GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] and
+ the error vector, GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC], reset to all zeros. Set
+ this bit high to enable the match counter to count down and to enable collection
+ of new data in the error vector (also requires that
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD] is not set high). Cycle
+ GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] (low then high) to clear the counter and the
+ error vector, leaving GSERN()_LANE()_SCOPE_0_DAT[REF_VEC] unchanged, enabling
+ collection of a new error vector under updated receiver settings using the same
+ reference vector match pattern.
+
+ For diagnostic use only. */
+ uint64_t scope_en : 1; /**< [ 49: 49](R/W) Set to enable collection of GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]
+ data. Deassertion stops collection of new mismatch bits, but does
+ not reset GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]. If
+ GSERN()_LANE()_SCOPE_CTL[CNT_EN] is also asserted, collection will stop
+ when the GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT] is reached. If not using
+ GSERN()_LANE()_SCOPE_CTL[CNT_LIMIT], software can control duration of
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC] data collection through
+ [SCOPE_EN]. All scope logic is conditionally clocked with the
+ condition being GSERN()_LANE()_SCOPE_CTL[SCOPE_EN], so deassert this bit
+ when not used to save power.
+
+ For diagnostic use only. */
+ uint64_t reserved_50_55 : 6;
+ uint64_t doutq_ld : 1; /**< [ 56: 56](R/W) Set to a doutq value for comparison against incoming
+ doutq. The incoming stream should guarantee a recurring doutq
+ pattern to capture valid error vector. This works only on a
+ positive-edge trigger which means a new value won't be stored until
+ a 0-\>1 transition happens. Assertion of GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]
+ also resets the match counter, GSERN()_LANE()_SCOPE_0_DAT[CNT_DONE] and
+ GSERN()_LANE()_SCOPE_1_DAT[ERR_VEC]. Deassert [DOUTQ_LD] to
+ enable the match counter to count down and to enable collection of
+ new data in the error vector (also requires that
+ GSERN()_LANE()_SCOPE_CTL[CNT_RST_N] is clear).
+
+ For diagnostic use only. */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_scope_ctl_s cn; */
+};
+typedef union bdk_gsernx_lanex_scope_ctl bdk_gsernx_lanex_scope_ctl_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900008d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SCOPE_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) bdk_gsernx_lanex_scope_ctl_t
+#define bustype_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) "GSERNX_LANEX_SCOPE_CTL"
+#define device_bar_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SCOPE_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_scope_ctl_2
+ *
+ * GSER Lane PCS Lite Scope Data Gathering Control Register 2
+ * This register contains control signals to allow loading a specific reference vector
+ * for use in the scope logic instead of capturing the reference vector from the input
+ * data stream. For diagnostic use only.
+ */
+union bdk_gsernx_lanex_scope_ctl_2
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_scope_ctl_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_42_63 : 22;
+ uint64_t use_doute_cal : 1; /**< [ 41: 41](R/W) Set to select doute_cal data (receiver eye calibration path) for
+ scope comparisons with doutq (receiver normal quadrature path). If
+ clear, the default will be to use doute (receiver eye path) to
+ compare with doutq. The bit should be programmed as desired before
+ writing GSERN()_LANE()_SCOPE_CTL[SCOPE_EN] to one.
+
+ For diagnostic use only. */
+ uint64_t ref_vec_ovrride_en : 1; /**< [ 40: 40](R/W) Enable use of [REF_VEC_OVRRIDE] for the scope logic instead
+ of capturing the reference vector from the input data stream. This
+ control has priority over
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]. This field should be
+ deasserted when the override value, [REF_VEC_OVRRIDE], is
+ changed. [REF_VEC_OVRRIDE_EN] may be asserted in the same register
+ write that changes [REF_VEC_OVRRIDE].
+
+ For diagnostic use only. */
+ uint64_t ref_vec_ovrride : 40; /**< [ 39: 0](R/W) Selectable reference vector to use for comparison with doutq and doute for the
+ scope logic as an alternative to capturing the reference vector from the
+ incoming data stream. When used, this pattern should be recurring in the
+ incoming data stream to capture valid error vector data, since errors will only
+ be accumulated in the error vector when doutq matches the reference
+ vector. [REF_VEC_OVRRIDE_EN] should be deasserted when [REF_VEC_OVRRIDE] is
+ changed. [REF_VEC_OVRRIDE_EN] may be written to a one in the same register write
+ that changes [REF_VEC_OVRRIDE]. Note that the bit pattern in [REF_VEC_OVRRIDE]
+ must match the format produced by the receiver's deserializer for the data path
+ width in use.
+
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t ref_vec_ovrride : 40; /**< [ 39: 0](R/W) Selectable reference vector to use for comparison with doutq and doute for the
+ scope logic as an alternative to capturing the reference vector from the
+ incoming data stream. When used, this pattern should be recurring in the
+ incoming data stream to capture valid error vector data, since errors will only
+ be accumulated in the error vector when doutq matches the reference
+ vector. [REF_VEC_OVRRIDE_EN] should be deasserted when [REF_VEC_OVRRIDE] is
+ changed. [REF_VEC_OVRRIDE_EN] may be written to a one in the same register write
+ that changes [REF_VEC_OVRRIDE]. Note that the bit pattern in [REF_VEC_OVRRIDE]
+ must match the format produced by the receiver's deserializer for the data path
+ width in use.
+
+ For diagnostic use only. */
+ uint64_t ref_vec_ovrride_en : 1; /**< [ 40: 40](R/W) Enable use of [REF_VEC_OVRRIDE] for the scope logic instead
+ of capturing the reference vector from the input data stream. This
+ control has priority over
+ GSERN()_LANE()_SCOPE_CTL[DOUTQ_LD]. This field should be
+ deasserted when the override value, [REF_VEC_OVRRIDE], is
+ changed. [REF_VEC_OVRRIDE_EN] may be asserted in the same register
+ write that changes [REF_VEC_OVRRIDE].
+
+ For diagnostic use only. */
+ uint64_t use_doute_cal : 1; /**< [ 41: 41](R/W) Set to select doute_cal data (receiver eye calibration path) for
+ scope comparisons with doutq (receiver normal quadrature path). If
+ clear, the default will be to use doute (receiver eye path) to
+ compare with doutq. The bit should be programmed as desired before
+ writing GSERN()_LANE()_SCOPE_CTL[SCOPE_EN] to one.
+
+ For diagnostic use only. */
+ uint64_t reserved_42_63 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_scope_ctl_2_s cn; */
+};
+typedef union bdk_gsernx_lanex_scope_ctl_2 bdk_gsernx_lanex_scope_ctl_2_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL_2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL_2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900008e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SCOPE_CTL_2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) bdk_gsernx_lanex_scope_ctl_2_t
+#define bustype_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) "GSERNX_LANEX_SCOPE_CTL_2"
+#define device_bar_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SCOPE_CTL_2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_scope_ctl_3
+ *
+ * GSER Lane PCS Lite Scope Data Gathering Control Register 3
+ * The four bits in this register allow for shifting either the doutq or
+ * doute_cal data by 1 or 2 UI to allow for an offset in the framing of the
+ * deserialized data between these two data paths in the receiver. Software
+ * will need to iterate eye or scope measurement with identical settings
+ * for the quadurature and eye datapaths, adjusting the shift bits in this
+ * register until no differences are accumulated. (Note that shifting both
+ * doutq and doute_cal would typically not be useful, since the resulting
+ * alignment would be the same as if neither were shifted.)
+ */
+union bdk_gsernx_lanex_scope_ctl_3
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_scope_ctl_3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t dbl_shift_doute : 1; /**< [ 9: 9](R/W) Assert to shift the doute_cal (receiver eye calibration path) data
+ by 2 UI earlier to align with doutq for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t shift_doute : 1; /**< [ 8: 8](R/W) Assert to shift the doute_cal (receiver eye path) data by 1 UI
+ earlier to align with doutq for eye and scope comparison logic. Only
+ data captured in the eye or scope logic is impacted by this
+ setting. Program as desired before enabling eye data capture. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t dbl_shift_doutq : 1; /**< [ 1: 1](R/W) Assert to shift the doutq (receiver normal quadrature path) data by
+ 2 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t shift_doutq : 1; /**< [ 0: 0](R/W) Assert to shift the doutq (receiver normal quadrature path) data by
+ 1 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. Program as desired before enabling eye data capture. */
+#else /* Word 0 - Little Endian */
+ uint64_t shift_doutq : 1; /**< [ 0: 0](R/W) Assert to shift the doutq (receiver normal quadrature path) data by
+ 1 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. Program as desired before enabling eye data capture. */
+ uint64_t dbl_shift_doutq : 1; /**< [ 1: 1](R/W) Assert to shift the doutq (receiver normal quadrature path) data by
+ 2 UI earlier to align with doute_cal for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t shift_doute : 1; /**< [ 8: 8](R/W) Assert to shift the doute_cal (receiver eye path) data by 1 UI
+ earlier to align with doutq for eye and scope comparison logic. Only
+ data captured in the eye or scope logic is impacted by this
+ setting. Program as desired before enabling eye data capture. */
+ uint64_t dbl_shift_doute : 1; /**< [ 9: 9](R/W) Assert to shift the doute_cal (receiver eye calibration path) data
+ by 2 UI earlier to align with doutq for eye and scope comparison
+ logic. Only data captured in the eye or scope logic is impacted by
+ this setting. When asserted, the double shift control has priority
+ over the (single) shift control. Program as desired before enabling eye
+ data capture. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_scope_ctl_3_s cn; */
+};
+typedef union bdk_gsernx_lanex_scope_ctl_3 bdk_gsernx_lanex_scope_ctl_3_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL_3(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SCOPE_CTL_3(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900008f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SCOPE_CTL_3", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) bdk_gsernx_lanex_scope_ctl_3_t
+#define bustype_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) "GSERNX_LANEX_SCOPE_CTL_3"
+#define device_bar_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SCOPE_CTL_3(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_srcmx_bcfg
+ *
+ * GSER Lane PCS Source Mux Control Register
+ */
+union bdk_gsernx_lanex_srcmx_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_srcmx_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_50_63 : 14;
+ uint64_t en_hldcdrfsm_on_idle : 1; /**< [ 49: 49](R/W) Enable holding the CSR finite state machine when the receiver idle filter
+ detects idle.
+ For diagnostic use only. */
+ uint64_t en_pauseadpt_on_idle : 1; /**< [ 48: 48](R/W) Enable pausing adaptation when the receiver idle filter detects idle.
+ For diagnostic use only. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t trn_tx_cgt_on : 1; /**< [ 43: 43](R/W) Force the clock gate for the training transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t trn_rx_cgt_on : 1; /**< [ 42: 42](R/W) Force the clock gate for the training receive data path clock on.
+ For diagnostic use only. */
+ uint64_t ocx_tx_cgt_on : 1; /**< [ 41: 41](R/W) Force on the clock gate for the OCX interface.
+ For diagnostic use only. */
+ uint64_t ocx_rx_cgt_on : 1; /**< [ 40: 40](R/W) Force on the clock gate for the OCX interface.
+ For diagnostic use only. */
+ uint64_t sata_tx_cgt_on : 1; /**< [ 39: 39](R/W) Force the clock gate for the SATA transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t sata_rx_cgt_on : 1; /**< [ 38: 38](R/W) Force the clock gate for the SATA receive data path clock on.
+ For diagnostic use only. */
+ uint64_t pcie_tx_cgt_on : 1; /**< [ 37: 37](R/W) Force the clock gate for the PCIe transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t pcie_rx_cgt_on : 1; /**< [ 36: 36](R/W) Force the clock gate for the PCIe receive data path clock on.
+ For diagnostic use only. */
+ uint64_t pat_tx_cgt_on : 1; /**< [ 35: 35](R/W) Force the clock gate for the pattern transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t pat_rx_cgt_on : 1; /**< [ 34: 34](R/W) Force the clock gate for the pattern receive data path clock on.
+ For diagnostic use only. */
+ uint64_t cgx_tx_cgt_on : 1; /**< [ 33: 33](R/W) Force the clock gate for the CGX transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t cgx_rx_cgt_on : 1; /**< [ 32: 32](R/W) Force the clock gate for the CGX receive data path clock on.
+ For diagnostic use only. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t txdivclk_mux_sel_ovrride_en : 1;/**< [ 23: 23](R/W) Mux selection override enable for lane txdivclk mux; enables use of
+ [TXDIVCLK_MUX_SEL_OVRRIDE]. This must be set to 1 for all lanes in a multi-lane
+ link.
+ 0 = Use the lane's local txdivclk.
+ 1 = Use [TXDIVCLK_MUX_SEL_OVRRIDE] instead of other sources for control of the
+ lane txdivclk mux. */
+ uint64_t reserved_19_22 : 4;
+ uint64_t txdivclk_mux_sel_ovrride : 3;/**< [ 18: 16](R/W) Mux selection override control for lane txdivclk mux, when enabled by
+ [TXDIVCLK_MUX_SEL_OVRRIDE_EN], the following values apply:
+ 0x0 = Use lane internal txdivclk (e.g. for single-lane links).
+ 0x1 = Use txdivclkx2 (e.g. for 2-lane links).
+ 0x2 = Use txdivclkx4 (e.g. for 4-lane links).
+ 0x3 = Use txdivclkx8 (e.g. for 8-lane links).
+ 0x4 = Use txdivclkx16 (e.g. for 16-lane links).
+ _ else = Reserved. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t tx_ctrl_sel : 5; /**< [ 12: 8](R/W) Lite layer transmit control-settings mux control:
+ 0x0 = no source selected; defaults to idle termination unless CSR overrides are
+ enabled by setting GSERN()_LANE()_TX_DRV_BCFG[EN_TX_DRV].
+ 0x1 = PCIe.
+ 0x2 = CGX.
+ 0x4 = SATA.
+ 0x8 = OCX.
+ 0x10 = Pattern memory generator.
+ _ else = reserved. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t tx_data_sel : 5; /**< [ 4: 0](R/W) Lite layer transmit data mux control:
+ 0x0 = No source selected, e.g., for PRBS testing.
+ 0x1 = PCIe.
+ 0x2 = CGX.
+ 0x4 = SATA.
+ 0x8 = OCX.
+ 0x10 = Pattern memory generator.
+ _ else = reserved. (This is a 1-hot vector.) */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_data_sel : 5; /**< [ 4: 0](R/W) Lite layer transmit data mux control:
+ 0x0 = No source selected, e.g., for PRBS testing.
+ 0x1 = PCIe.
+ 0x2 = CGX.
+ 0x4 = SATA.
+ 0x8 = OCX.
+ 0x10 = Pattern memory generator.
+ _ else = reserved. (This is a 1-hot vector.) */
+ uint64_t reserved_5_7 : 3;
+ uint64_t tx_ctrl_sel : 5; /**< [ 12: 8](R/W) Lite layer transmit control-settings mux control:
+ 0x0 = no source selected; defaults to idle termination unless CSR overrides are
+ enabled by setting GSERN()_LANE()_TX_DRV_BCFG[EN_TX_DRV].
+ 0x1 = PCIe.
+ 0x2 = CGX.
+ 0x4 = SATA.
+ 0x8 = OCX.
+ 0x10 = Pattern memory generator.
+ _ else = reserved. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t txdivclk_mux_sel_ovrride : 3;/**< [ 18: 16](R/W) Mux selection override control for lane txdivclk mux, when enabled by
+ [TXDIVCLK_MUX_SEL_OVRRIDE_EN], the following values apply:
+ 0x0 = Use lane internal txdivclk (e.g. for single-lane links).
+ 0x1 = Use txdivclkx2 (e.g. for 2-lane links).
+ 0x2 = Use txdivclkx4 (e.g. for 4-lane links).
+ 0x3 = Use txdivclkx8 (e.g. for 8-lane links).
+ 0x4 = Use txdivclkx16 (e.g. for 16-lane links).
+ _ else = Reserved. */
+ uint64_t reserved_19_22 : 4;
+ uint64_t txdivclk_mux_sel_ovrride_en : 1;/**< [ 23: 23](R/W) Mux selection override enable for lane txdivclk mux; enables use of
+ [TXDIVCLK_MUX_SEL_OVRRIDE]. This must be set to 1 for all lanes in a multi-lane
+ link.
+ 0 = Use the lane's local txdivclk.
+ 1 = Use [TXDIVCLK_MUX_SEL_OVRRIDE] instead of other sources for control of the
+ lane txdivclk mux. */
+ uint64_t reserved_24_31 : 8;
+ uint64_t cgx_rx_cgt_on : 1; /**< [ 32: 32](R/W) Force the clock gate for the CGX receive data path clock on.
+ For diagnostic use only. */
+ uint64_t cgx_tx_cgt_on : 1; /**< [ 33: 33](R/W) Force the clock gate for the CGX transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t pat_rx_cgt_on : 1; /**< [ 34: 34](R/W) Force the clock gate for the pattern receive data path clock on.
+ For diagnostic use only. */
+ uint64_t pat_tx_cgt_on : 1; /**< [ 35: 35](R/W) Force the clock gate for the pattern transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t pcie_rx_cgt_on : 1; /**< [ 36: 36](R/W) Force the clock gate for the PCIe receive data path clock on.
+ For diagnostic use only. */
+ uint64_t pcie_tx_cgt_on : 1; /**< [ 37: 37](R/W) Force the clock gate for the PCIe transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t sata_rx_cgt_on : 1; /**< [ 38: 38](R/W) Force the clock gate for the SATA receive data path clock on.
+ For diagnostic use only. */
+ uint64_t sata_tx_cgt_on : 1; /**< [ 39: 39](R/W) Force the clock gate for the SATA transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t ocx_rx_cgt_on : 1; /**< [ 40: 40](R/W) Force on the clock gate for the OCX interface.
+ For diagnostic use only. */
+ uint64_t ocx_tx_cgt_on : 1; /**< [ 41: 41](R/W) Force on the clock gate for the OCX interface.
+ For diagnostic use only. */
+ uint64_t trn_rx_cgt_on : 1; /**< [ 42: 42](R/W) Force the clock gate for the training receive data path clock on.
+ For diagnostic use only. */
+ uint64_t trn_tx_cgt_on : 1; /**< [ 43: 43](R/W) Force the clock gate for the training transmit data path clock on.
+ For diagnostic use only. */
+ uint64_t reserved_44_47 : 4;
+ uint64_t en_pauseadpt_on_idle : 1; /**< [ 48: 48](R/W) Enable pausing adaptation when the receiver idle filter detects idle.
+ For diagnostic use only. */
+ uint64_t en_hldcdrfsm_on_idle : 1; /**< [ 49: 49](R/W) Enable holding the CSR finite state machine when the receiver idle filter
+ detects idle.
+ For diagnostic use only. */
+ uint64_t reserved_50_63 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_srcmx_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_srcmx_bcfg bdk_gsernx_lanex_srcmx_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_SRCMX_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_SRCMX_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000a10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_SRCMX_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) bdk_gsernx_lanex_srcmx_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) "GSERNX_LANEX_SRCMX_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_SRCMX_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_0_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 0
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_0_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_0_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t txt_post : 5; /**< [ 63: 59](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_main : 6; /**< [ 58: 53](RO/H) After TX BASE-R link training, this is the resultant MAIN Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_pre : 5; /**< [ 52: 48](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_swm : 1; /**< [ 47: 47](R/W) Set when TX BASE-R link training is to be performed under software control. For diagnostic
+ use only. */
+ uint64_t txt_cur_post : 5; /**< [ 46: 42](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C+1) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_cur_main : 6; /**< [ 41: 36](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C0) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_cur_pre : 5; /**< [ 35: 31](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C-1) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_cur_prg : 1; /**< [ 30: 30](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, setting [TXT_CUR_PRG] writes the TX
+ equalizer
+ coefficients in GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRE],
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_MAIN],
+ and GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_POST] registers into the GSER TX equalizer.
+ For diagnostic use only. */
+ uint64_t rxt_adtmout_fast : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ For simulation use only. When set accelerates the link training time-out timer during
+ BASE-R link training. When set shortens the link training time-out timer to time-out
+ after 164 microseconds to facilitate shorter BASE-R training simulations runs.
+ For diagnostic use only. */
+ uint64_t rxt_adtmout_sel : 2; /**< [ 28: 27](R/W) Selects the timeout value for the BASE-R link training time-out timer.
+ This time-out timer value is only valid if
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE]
+ is cleared to 0 and BASE-R hardware training is enabled.
+
+ When GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] is cleared to 0 the link training
+ time-out timer value is set by [RXT_ADTMOUT_SEL] to the values shown.
+ 0x0 = 83.89 milliseconds.
+ 0x1 = 167.77 milliseconds.
+ 0x2 = 335.54 milliseconds.
+ 0x3 = 419.43 milliseconds.
+
+ When GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] is set to 1 the link training
+ time-out timer value is set by [RXT_ADTMOUT_SEL] to the values shown.
+ 0x0 = 81.92 microseconds.
+ 0x1 = 163.84 microseconds.
+ 0x2 = 327.68 microseconds.
+ 0x3 = 655.36 microseconds. */
+ uint64_t rxt_adtmout_disable : 1; /**< [ 26: 26](R/W) For BASE-R links one of the terminating condition for link training receiver adaptation
+ is a programmable time-out timer. When the receiver adaptation time-out timer
+ expires the link training process is concluded and the link is considered good and
+ the receiver ready status report bit is set in the local device.
+ Note that when BASE-R link training is performed under software control,
+ (GSERN()_LANE()_TRAIN_0_BCFG[RXT_SWM] is set), the receiver adaptation time-out timer is
+ disabled and not used.
+
+ Set this bit to a one to disable the link training receiver adaptation time-out
+ timer during BASE-R link training under hardware control. For diagnostic use only. */
+ uint64_t rxt_eer : 1; /**< [ 25: 25](WO/H) When RX BASE-R link training is being performed under software control,
+ (GSERN()_LANE()_TRAIN_0_BCFG[RXT_SWM] is set), writing this bit initiates an equalization
+ request to the SerDes receiver equalizer. Reading this bit always returns a zero. */
+ uint64_t rxt_esv : 1; /**< [ 24: 24](RO/H) When performing an equalization request ([RXT_EER]), this bit, when set, indicates that
+ the
+ equalization status (RXT_ESM) is valid. When issuing a [RXT_EER] request, it is expected
+ that [RXT_ESV] will get written to zero so that a valid RXT_ESM can be determined. */
+ uint64_t rxt_tx_post_dir : 2; /**< [ 23: 22](RO/H) RX recommended TXPOST direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_tx_main_dir : 2; /**< [ 21: 20](RO/H) RX recommended TXMAIN direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_tx_pre_dir : 2; /**< [ 19: 18](RO/H) RX recommended TXPRE direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t trn_short : 1; /**< [ 17: 17](R/W) Train short. Executes an abbreviated BASE-R training session.
+ For diagnostic use only. */
+ uint64_t ld_receiver_rdy : 1; /**< [ 16: 16](RO/H) At the completion of BASE-R training the local device sets receiver ready. This bit
+ reflects the state of the local device receiver ready status. For Debug use only.
+ This bit is only valid during BASE-R link training and at the conclusion of link
+ training. */
+ uint64_t frz_cdr_en : 1; /**< [ 15: 15](R/W) Freeze CDR enable. In CGX mode when set to a one enables the CGX MAC to
+ Freeze the receiver CDR during BASE-R autonegotiation (AN) and KR training
+ to prevent the RX CDR from locking onto the differential manchester encoded
+ AN and KR training frames. CGX asserts the rx cdr coast signal to the GSER
+ block to freeze the RX CDR. Clearing [FRZ_CDR_EN] prevents CGS from freezing
+ the RX CDR.
+ For diagnostic use only. */
+ uint64_t trn_ovrd_en : 1; /**< [ 14: 14](R/W) BASE-R Training Override Enable. Setting [TRN_OVRD_EN] will enable BASE-R training logic
+ for both CGX and OCX. This is a CSR override for the BASE-R training enable signals from
+ the CGX and OCX blocks. Either GSERN()_LANE()_TRAIN_0_BCFG[CFG_CGX] or
+ GSERN()_LANE()_TRAIN_0_BCFG[CFG_OCX] must be set to 1 before [TRN_OVRD_EN] is set to 1. Also
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL] must be programmed to select CGX or OCX mode
+ before [TRN_OVRD_EN] is set to 1.
+ For diagnostic use only. */
+ uint64_t reserved_8_13 : 6;
+ uint64_t cfg_ocx : 1; /**< [ 7: 7](R/W) Configure BASE-R training logic for OCX mode. When [CFG_OCX] is set the
+ Coefficient Update (CU) and Status Report (SR) messaging is reconfigured for
+ the OCX controller. The CU and SR messages must be sent and received in the
+ txdivclk and rxdivclk domains for the OCX controller.
+
+ When [CFG_OCX] is set, the GSERN()_LANE()_TRAIN_0_BCFG[CFG_CGX] field must be
+ cleared to zero. */
+ uint64_t rxt_adjmain : 1; /**< [ 6: 6](R/W) For all link training, this bit determines how the main tap is adjusted at the start
+ of link training. When set the main tap of link partner transmitter peak-to-peak level
+ is adjusted to optimize the AGC of the local device receiver. This is intended to prevent
+ receiver saturation on short or low loss links.
+
+ To perform main tap optimization of the link partner transmitter set this bit prior to
+ enabling link training. */
+ uint64_t rxt_initialize : 1; /**< [ 5: 5](R/W) For all link training, this bit determines how to configure the initialize bit in the
+ coefficient update message that is sent to the far end transmitter of RX training. When
+ set, a request is made that the coefficients be set to its INITIALIZE state. To perform an
+ initialize prior to link training, set this bit prior to performing link training. Note
+ that it is illegal to set both the preset and initialize bits at the same time. */
+ uint64_t rxt_preset : 1; /**< [ 4: 4](R/W) For all link training, this bit determines how to configure the preset bit in the
+ coefficient update message that is sent to the far end transmitter. When set, a one time
+ request is made that the coefficients be set to a state where equalization is turned off.
+
+ To perform a preset, set this bit prior to link training. Link training needs to be
+ disabled to complete the request and get the rxtrain state machine back to idle. Note that
+ it is illegal to set both the preset and initialize bits at the same time. For diagnostic
+ use only. */
+ uint64_t rxt_swm : 1; /**< [ 3: 3](R/W) Set when RX BASE-R link training is to be performed under software control.
+
+ See GSERN()_LANE()_TRAIN_0_BCFG[RXT_EER]. */
+ uint64_t cgx_quad : 1; /**< [ 2: 2](R/W) When set, indicates the QLM is in CGX quad aggregation mode. [CGX_QUAD] must only be
+ set when GSERN()_LANE()_SRCMX_BCFG[TX_DATA_SEL]=CGX is set and
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]=CGX is set and [CGX_DUAL] is clear.
+
+ When [CGX_QUAD] is set, GSER bundles all four lanes for one BCX controller.
+ [CGX_QUAD] must only be set for the XAUI/DXAUI, XLAUI, and CAUI protocols. */
+ uint64_t cgx_dual : 1; /**< [ 1: 1](R/W) When set, indicates the QLM is in CGX dual aggregation mode. [CGX_DUAL] must only be
+ set when GSERN()_LANE()_SRCMX_BCFG[TX_DATA_SEL]=CGX is set and
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]=CGX is set and [CGX_QUAD] is clear.
+
+ When [CGX_DUAL] is set, GSER bundles lanes 0 and 1 for one CGX controller and bundles
+ lanes 2 and 3 for another CGX controller. [CGX_DUAL] must only be set for the RXAUI
+ protocol. */
+ uint64_t cfg_cgx : 1; /**< [ 0: 0](R/W) When set, indicates the BASE-R training logic is in CGX mode. Enables SCLK to the CGX TX
+ and RX
+ data path and the BASE-R TX/RX Training blocks. [CFG_CGX] must be set to one when
+ either GSERN()_LANE()_TRAIN_0_BCFG[CGX_DUAL] or GSERN()_LANE()_TRAIN_0_BCFG[CGX_QUAD]
+ is set.
+
+ When [CFG_CGX] is set, the GSERN()_LANE()_TRAIN_0_BCFG[CFG_OCX] field must be
+ cleared to zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t cfg_cgx : 1; /**< [ 0: 0](R/W) When set, indicates the BASE-R training logic is in CGX mode. Enables SCLK to the CGX TX
+ and RX
+ data path and the BASE-R TX/RX Training blocks. [CFG_CGX] must be set to one when
+ either GSERN()_LANE()_TRAIN_0_BCFG[CGX_DUAL] or GSERN()_LANE()_TRAIN_0_BCFG[CGX_QUAD]
+ is set.
+
+ When [CFG_CGX] is set, the GSERN()_LANE()_TRAIN_0_BCFG[CFG_OCX] field must be
+ cleared to zero. */
+ uint64_t cgx_dual : 1; /**< [ 1: 1](R/W) When set, indicates the QLM is in CGX dual aggregation mode. [CGX_DUAL] must only be
+ set when GSERN()_LANE()_SRCMX_BCFG[TX_DATA_SEL]=CGX is set and
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]=CGX is set and [CGX_QUAD] is clear.
+
+ When [CGX_DUAL] is set, GSER bundles lanes 0 and 1 for one CGX controller and bundles
+ lanes 2 and 3 for another CGX controller. [CGX_DUAL] must only be set for the RXAUI
+ protocol. */
+ uint64_t cgx_quad : 1; /**< [ 2: 2](R/W) When set, indicates the QLM is in CGX quad aggregation mode. [CGX_QUAD] must only be
+ set when GSERN()_LANE()_SRCMX_BCFG[TX_DATA_SEL]=CGX is set and
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]=CGX is set and [CGX_DUAL] is clear.
+
+ When [CGX_QUAD] is set, GSER bundles all four lanes for one BCX controller.
+ [CGX_QUAD] must only be set for the XAUI/DXAUI, XLAUI, and CAUI protocols. */
+ uint64_t rxt_swm : 1; /**< [ 3: 3](R/W) Set when RX BASE-R link training is to be performed under software control.
+
+ See GSERN()_LANE()_TRAIN_0_BCFG[RXT_EER]. */
+ uint64_t rxt_preset : 1; /**< [ 4: 4](R/W) For all link training, this bit determines how to configure the preset bit in the
+ coefficient update message that is sent to the far end transmitter. When set, a one time
+ request is made that the coefficients be set to a state where equalization is turned off.
+
+ To perform a preset, set this bit prior to link training. Link training needs to be
+ disabled to complete the request and get the rxtrain state machine back to idle. Note that
+ it is illegal to set both the preset and initialize bits at the same time. For diagnostic
+ use only. */
+ uint64_t rxt_initialize : 1; /**< [ 5: 5](R/W) For all link training, this bit determines how to configure the initialize bit in the
+ coefficient update message that is sent to the far end transmitter of RX training. When
+ set, a request is made that the coefficients be set to its INITIALIZE state. To perform an
+ initialize prior to link training, set this bit prior to performing link training. Note
+ that it is illegal to set both the preset and initialize bits at the same time. */
+ uint64_t rxt_adjmain : 1; /**< [ 6: 6](R/W) For all link training, this bit determines how the main tap is adjusted at the start
+ of link training. When set the main tap of link partner transmitter peak-to-peak level
+ is adjusted to optimize the AGC of the local device receiver. This is intended to prevent
+ receiver saturation on short or low loss links.
+
+ To perform main tap optimization of the link partner transmitter set this bit prior to
+ enabling link training. */
+ uint64_t cfg_ocx : 1; /**< [ 7: 7](R/W) Configure BASE-R training logic for OCX mode. When [CFG_OCX] is set the
+ Coefficient Update (CU) and Status Report (SR) messaging is reconfigured for
+ the OCX controller. The CU and SR messages must be sent and received in the
+ txdivclk and rxdivclk domains for the OCX controller.
+
+ When [CFG_OCX] is set, the GSERN()_LANE()_TRAIN_0_BCFG[CFG_CGX] field must be
+ cleared to zero. */
+ uint64_t reserved_8_13 : 6;
+ uint64_t trn_ovrd_en : 1; /**< [ 14: 14](R/W) BASE-R Training Override Enable. Setting [TRN_OVRD_EN] will enable BASE-R training logic
+ for both CGX and OCX. This is a CSR override for the BASE-R training enable signals from
+ the CGX and OCX blocks. Either GSERN()_LANE()_TRAIN_0_BCFG[CFG_CGX] or
+ GSERN()_LANE()_TRAIN_0_BCFG[CFG_OCX] must be set to 1 before [TRN_OVRD_EN] is set to 1. Also
+ GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL] must be programmed to select CGX or OCX mode
+ before [TRN_OVRD_EN] is set to 1.
+ For diagnostic use only. */
+ uint64_t frz_cdr_en : 1; /**< [ 15: 15](R/W) Freeze CDR enable. In CGX mode when set to a one enables the CGX MAC to
+ Freeze the receiver CDR during BASE-R autonegotiation (AN) and KR training
+ to prevent the RX CDR from locking onto the differential manchester encoded
+ AN and KR training frames. CGX asserts the rx cdr coast signal to the GSER
+ block to freeze the RX CDR. Clearing [FRZ_CDR_EN] prevents CGS from freezing
+ the RX CDR.
+ For diagnostic use only. */
+ uint64_t ld_receiver_rdy : 1; /**< [ 16: 16](RO/H) At the completion of BASE-R training the local device sets receiver ready. This bit
+ reflects the state of the local device receiver ready status. For Debug use only.
+ This bit is only valid during BASE-R link training and at the conclusion of link
+ training. */
+ uint64_t trn_short : 1; /**< [ 17: 17](R/W) Train short. Executes an abbreviated BASE-R training session.
+ For diagnostic use only. */
+ uint64_t rxt_tx_pre_dir : 2; /**< [ 19: 18](RO/H) RX recommended TXPRE direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_tx_main_dir : 2; /**< [ 21: 20](RO/H) RX recommended TXMAIN direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_tx_post_dir : 2; /**< [ 23: 22](RO/H) RX recommended TXPOST direction change.
+
+ Recommended direction change outputs from the PHY for the link partner transmitter
+ coefficients.
+ 0x0 = Hold.
+ 0x1 = Increment.
+ 0x2 = Decrement.
+ 0x3 = Hold. */
+ uint64_t rxt_esv : 1; /**< [ 24: 24](RO/H) When performing an equalization request ([RXT_EER]), this bit, when set, indicates that
+ the
+ equalization status (RXT_ESM) is valid. When issuing a [RXT_EER] request, it is expected
+ that [RXT_ESV] will get written to zero so that a valid RXT_ESM can be determined. */
+ uint64_t rxt_eer : 1; /**< [ 25: 25](WO/H) When RX BASE-R link training is being performed under software control,
+ (GSERN()_LANE()_TRAIN_0_BCFG[RXT_SWM] is set), writing this bit initiates an equalization
+ request to the SerDes receiver equalizer. Reading this bit always returns a zero. */
+ uint64_t rxt_adtmout_disable : 1; /**< [ 26: 26](R/W) For BASE-R links one of the terminating condition for link training receiver adaptation
+ is a programmable time-out timer. When the receiver adaptation time-out timer
+ expires the link training process is concluded and the link is considered good and
+ the receiver ready status report bit is set in the local device.
+ Note that when BASE-R link training is performed under software control,
+ (GSERN()_LANE()_TRAIN_0_BCFG[RXT_SWM] is set), the receiver adaptation time-out timer is
+ disabled and not used.
+
+ Set this bit to a one to disable the link training receiver adaptation time-out
+ timer during BASE-R link training under hardware control. For diagnostic use only. */
+ uint64_t rxt_adtmout_sel : 2; /**< [ 28: 27](R/W) Selects the timeout value for the BASE-R link training time-out timer.
+ This time-out timer value is only valid if
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE]
+ is cleared to 0 and BASE-R hardware training is enabled.
+
+ When GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] is cleared to 0 the link training
+ time-out timer value is set by [RXT_ADTMOUT_SEL] to the values shown.
+ 0x0 = 83.89 milliseconds.
+ 0x1 = 167.77 milliseconds.
+ 0x2 = 335.54 milliseconds.
+ 0x3 = 419.43 milliseconds.
+
+ When GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] is set to 1 the link training
+ time-out timer value is set by [RXT_ADTMOUT_SEL] to the values shown.
+ 0x0 = 81.92 microseconds.
+ 0x1 = 163.84 microseconds.
+ 0x2 = 327.68 microseconds.
+ 0x3 = 655.36 microseconds. */
+ uint64_t rxt_adtmout_fast : 1; /**< [ 29: 29](R/W) Reserved.
+ Internal:
+ For simulation use only. When set accelerates the link training time-out timer during
+ BASE-R link training. When set shortens the link training time-out timer to time-out
+ after 164 microseconds to facilitate shorter BASE-R training simulations runs.
+ For diagnostic use only. */
+ uint64_t txt_cur_prg : 1; /**< [ 30: 30](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, setting [TXT_CUR_PRG] writes the TX
+ equalizer
+ coefficients in GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRE],
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_MAIN],
+ and GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_POST] registers into the GSER TX equalizer.
+ For diagnostic use only. */
+ uint64_t txt_cur_pre : 5; /**< [ 35: 31](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C-1) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_cur_main : 6; /**< [ 41: 36](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C0) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_cur_post : 5; /**< [ 46: 42](R/W) When TX BASE-R link training is being performed under software control,
+ e.g. GSERN()_LANE()_TRAIN_0_BCFG[TXT_SWM] is set, this is the (C+1) coefficient
+ update to be written to the SerDes TX Equalizer.
+ The coefficients are written to the TX equalizer when
+ GSERN()_LANE()_TRAIN_0_BCFG[TXT_CUR_PRG] is set to a one.
+ For diagnostic use only. */
+ uint64_t txt_swm : 1; /**< [ 47: 47](R/W) Set when TX BASE-R link training is to be performed under software control. For diagnostic
+ use only. */
+ uint64_t txt_pre : 5; /**< [ 52: 48](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_main : 6; /**< [ 58: 53](RO/H) After TX BASE-R link training, this is the resultant MAIN Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+ uint64_t txt_post : 5; /**< [ 63: 59](RO/H) After TX BASE-R link training, this is the resultant POST Tap value that was
+ written to the PHY. This field has no meaning if TX BASE-R link training was
+ not performed.
+ For diagnostic use only. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_0_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_0_bcfg bdk_gsernx_lanex_train_0_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_0_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_0_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900031b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_0_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) bdk_gsernx_lanex_train_0_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) "GSERNX_LANEX_TRAIN_0_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_0_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_10_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 10
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_10_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_10_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t l_c1_e_adj_sgn : 1; /**< [ 58: 58](R/W) Sets the lower C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the lower C1_E sampler below the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the lower C1_E sampler above the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_4_BCFG[C1_E_ADJ_STEP] during KR training.
+ For diagnostic use only. */
+ uint64_t u_c1_e_adj_sgn : 1; /**< [ 57: 57](R/W) Sets the upper C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the upper C1_E sampler above the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the upper C1_E sampler below the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_10_BCFG[U_C1_E_ADJ_STEP] for BASE-R training.
+ For diagnostic use only. */
+ uint64_t u_c1_e_adj_step : 5; /**< [ 56: 52](R/W) Sets the C1 E sampler voltage level during eye monitor sampling when
+ GSERN()_LANE()_TRAIN_10_BCFG[FOM_TYPE] is set to one for BASE-R training.
+ Typically [U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15mv above the C1 Q sampler voltage level when
+ computing the FOM using the two step process, e.g. [FOM_TYPE] set to one,
+ with the error slicer level positioned above and below the data slicer
+ level. The error slicer level and positon relative to the data slicer
+ is controlled by [U_C1_E_ADJ_STEP] and
+ GSERN()_LANE()_TRAIN_10_BCFG[U_C1_E_ADJ_SGN] for BASE-R training.
+ Steps are in units of 5.08 mV per step.
+ For diagnostic use only. */
+ uint64_t l_c1_e_adj_step : 5; /**< [ 51: 47](R/W) Sets the C1 E sampler voltage level during eye monitor sampling when
+ GSERN()_LANE()_TRAIN_10_BCFG[FOM_TYPE] is set to one for BASE-R training.
+ Typically [U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15mv below the C1 Q sampler voltage level when
+ computing the FOM using the two step process, e.g. [FOM_TYPE] set to one,
+ with the error slicer level positioned above and below the data slicer
+ level. The error slicer level and positon relative to the data slicer
+ is controlled by [U_C1_E_ADJ_STEP] and
+ GSERN()_LANE()_TRAIN_10_BCFG[L_C1_E_ADJ_SGN] for BASE-R training.
+ Steps are in units of 5.08 mV per step.
+ For diagnostic use only. */
+ uint64_t fom_type : 1; /**< [ 46: 46](R/W) BASE-R and PCIE training selects the Figure of Merit (FOM) measurement type. For
+ diagnostic use only.
+ 0 = The raw FOM is measured by setting the eye monitor
+ error slicer below the data slicer nominal level and counting the errors
+ for each of the transition ones, non trasition ones, transition zeros, and
+ non transition zeros then summing the four error counts, convert to ones
+ complement, then normalize to a 12-bit unsigned integer.
+ 1 = The raw FOM calculation follows the steps above however the
+ eye monitor error measurements is a two step process with the error slicer
+ first set both below the nominal data slicer level and then on the second
+ measurement pass set above the data slicer nominal level.
+
+ Internal:
+ The first FOM method can detect a saturated receiver and stop training
+ if the eye is sufficiently open.
+ The second FOM method returns a lower value for overequalized eyes and
+ is useful for driving the training to a more optimal equalization
+ setting on longer links. */
+ uint64_t trn_fom_thrs_en : 1; /**< [ 45: 45](R/W) BASE-R training when set to 1 enables the FOM threshold value in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] for training convergence
+ detection. When the measured FOM exceeds the value in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 0x1, training
+ will terminate depending on the settings of the training termination
+ condition values set in
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ When BASE-R training converges due the FOM threshold being met or
+ exceeded GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS] will be set to 1
+ if GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 1.
+ For diagnostic use only. */
+ uint64_t exit_fom_thrs_val : 12; /**< [ 44: 33](R/W) BASE-R training sets the FOM threshold value used for training convergence
+ detection. When the measured FOM exceeds the value in [EXIT_FOM_THRS_VAL]
+ and GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 0x1, training
+ will terminate depending on the settings of the training termination
+ condition values set in
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ Refer to the description for GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN]
+ and GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+ For diagnostic use only. */
+ uint64_t ttrk_array_clr : 1; /**< [ 32: 32](R/W) KR training Local Device Tx Equalizer tracking array clear signal. Used to
+ clear the tracking array after KR training has completed.
+ For diagnostic use only. */
+ uint64_t ttrk_array_rd : 1; /**< [ 31: 31](R/W) KR training Local Device Tx Equalizer tracking array index Read signal. Used to
+ readback tap values from the tracking array after KR training has completed.
+ For diagnostic use only. */
+ uint64_t ttrk_array_addr : 7; /**< [ 30: 24](R/W) KR training Local Device Tx Equalizer tracking array index. Used to
+ readback tap values from the tracking array after KR training has completed.
+ For diagnostic use only.
+
+ Internal:
+ During KR training the local device transmitter tap values (C0,C+1,C-1)
+ are stored in the tap tracking array. The array holds up to 128 locations.
+ After KR training completes the array can be read back to determine the
+ training progression of the transmitter taps. This is helpful in debugging
+ KR training convergence problems of the local device transmitter. */
+ uint64_t ttrk_moves : 8; /**< [ 23: 16](RO/H) KR training Local Device Tx Equalizer number of tap adjustments during KR training.
+ For diagnostic use only. */
+ uint64_t ttrk_pre : 5; /**< [ 15: 11](RO/H) KR training Local Device Tx Equalizer Pre (C-1) value from the tap tracking array.
+ For diagnostic use only. */
+ uint64_t ttrk_main : 6; /**< [ 10: 5](RO/H) KR training Local Device Tx Equalizer Main (C0) value from the tap tracking array.
+ For diagnostic use only. */
+ uint64_t ttrk_post : 5; /**< [ 4: 0](RO/H) KR training Local Device Tx Equalizer Post (C+1) value from the tap tracking array.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t ttrk_post : 5; /**< [ 4: 0](RO/H) KR training Local Device Tx Equalizer Post (C+1) value from the tap tracking array.
+ For diagnostic use only. */
+ uint64_t ttrk_main : 6; /**< [ 10: 5](RO/H) KR training Local Device Tx Equalizer Main (C0) value from the tap tracking array.
+ For diagnostic use only. */
+ uint64_t ttrk_pre : 5; /**< [ 15: 11](RO/H) KR training Local Device Tx Equalizer Pre (C-1) value from the tap tracking array.
+ For diagnostic use only. */
+ uint64_t ttrk_moves : 8; /**< [ 23: 16](RO/H) KR training Local Device Tx Equalizer number of tap adjustments during KR training.
+ For diagnostic use only. */
+ uint64_t ttrk_array_addr : 7; /**< [ 30: 24](R/W) KR training Local Device Tx Equalizer tracking array index. Used to
+ readback tap values from the tracking array after KR training has completed.
+ For diagnostic use only.
+
+ Internal:
+ During KR training the local device transmitter tap values (C0,C+1,C-1)
+ are stored in the tap tracking array. The array holds up to 128 locations.
+ After KR training completes the array can be read back to determine the
+ training progression of the transmitter taps. This is helpful in debugging
+ KR training convergence problems of the local device transmitter. */
+ uint64_t ttrk_array_rd : 1; /**< [ 31: 31](R/W) KR training Local Device Tx Equalizer tracking array index Read signal. Used to
+ readback tap values from the tracking array after KR training has completed.
+ For diagnostic use only. */
+ uint64_t ttrk_array_clr : 1; /**< [ 32: 32](R/W) KR training Local Device Tx Equalizer tracking array clear signal. Used to
+ clear the tracking array after KR training has completed.
+ For diagnostic use only. */
+ uint64_t exit_fom_thrs_val : 12; /**< [ 44: 33](R/W) BASE-R training sets the FOM threshold value used for training convergence
+ detection. When the measured FOM exceeds the value in [EXIT_FOM_THRS_VAL]
+ and GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 0x1, training
+ will terminate depending on the settings of the training termination
+ condition values set in
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ Refer to the description for GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN]
+ and GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+ For diagnostic use only. */
+ uint64_t trn_fom_thrs_en : 1; /**< [ 45: 45](R/W) BASE-R training when set to 1 enables the FOM threshold value in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] for training convergence
+ detection. When the measured FOM exceeds the value in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 0x1, training
+ will terminate depending on the settings of the training termination
+ condition values set in
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ When BASE-R training converges due the FOM threshold being met or
+ exceeded GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS] will be set to 1
+ if GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 1.
+ For diagnostic use only. */
+ uint64_t fom_type : 1; /**< [ 46: 46](R/W) BASE-R and PCIE training selects the Figure of Merit (FOM) measurement type. For
+ diagnostic use only.
+ 0 = The raw FOM is measured by setting the eye monitor
+ error slicer below the data slicer nominal level and counting the errors
+ for each of the transition ones, non trasition ones, transition zeros, and
+ non transition zeros then summing the four error counts, convert to ones
+ complement, then normalize to a 12-bit unsigned integer.
+ 1 = The raw FOM calculation follows the steps above however the
+ eye monitor error measurements is a two step process with the error slicer
+ first set both below the nominal data slicer level and then on the second
+ measurement pass set above the data slicer nominal level.
+
+ Internal:
+ The first FOM method can detect a saturated receiver and stop training
+ if the eye is sufficiently open.
+ The second FOM method returns a lower value for overequalized eyes and
+ is useful for driving the training to a more optimal equalization
+ setting on longer links. */
+ uint64_t l_c1_e_adj_step : 5; /**< [ 51: 47](R/W) Sets the C1 E sampler voltage level during eye monitor sampling when
+ GSERN()_LANE()_TRAIN_10_BCFG[FOM_TYPE] is set to one for BASE-R training.
+ Typically [U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15mv below the C1 Q sampler voltage level when
+ computing the FOM using the two step process, e.g. [FOM_TYPE] set to one,
+ with the error slicer level positioned above and below the data slicer
+ level. The error slicer level and positon relative to the data slicer
+ is controlled by [U_C1_E_ADJ_STEP] and
+ GSERN()_LANE()_TRAIN_10_BCFG[L_C1_E_ADJ_SGN] for BASE-R training.
+ Steps are in units of 5.08 mV per step.
+ For diagnostic use only. */
+ uint64_t u_c1_e_adj_step : 5; /**< [ 56: 52](R/W) Sets the C1 E sampler voltage level during eye monitor sampling when
+ GSERN()_LANE()_TRAIN_10_BCFG[FOM_TYPE] is set to one for BASE-R training.
+ Typically [U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15mv above the C1 Q sampler voltage level when
+ computing the FOM using the two step process, e.g. [FOM_TYPE] set to one,
+ with the error slicer level positioned above and below the data slicer
+ level. The error slicer level and positon relative to the data slicer
+ is controlled by [U_C1_E_ADJ_STEP] and
+ GSERN()_LANE()_TRAIN_10_BCFG[U_C1_E_ADJ_SGN] for BASE-R training.
+ Steps are in units of 5.08 mV per step.
+ For diagnostic use only. */
+ uint64_t u_c1_e_adj_sgn : 1; /**< [ 57: 57](R/W) Sets the upper C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the upper C1_E sampler above the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the upper C1_E sampler below the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_10_BCFG[U_C1_E_ADJ_STEP] for BASE-R training.
+ For diagnostic use only. */
+ uint64_t l_c1_e_adj_sgn : 1; /**< [ 58: 58](R/W) Sets the lower C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the lower C1_E sampler below the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the lower C1_E sampler above the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_4_BCFG[C1_E_ADJ_STEP] during KR training.
+ For diagnostic use only. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_10_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_10_bcfg bdk_gsernx_lanex_train_10_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_10_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_10_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003250ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_10_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) bdk_gsernx_lanex_train_10_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) "GSERNX_LANEX_TRAIN_10_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_10_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_1_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 1
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rxt_fom : 12; /**< [ 63: 52](RO/H) Figure of merit. An 11-bit output from the PHY indicating the quality of the
+ received data eye. A higher value indicates better link equalization, with 0x0
+ indicating worst equalization setting and 4095 indicating the best equalization
+ setting. */
+ uint64_t train_tx_rule : 8; /**< [ 51: 44](R/W) BASE-R training TX taps coefficient rule. Sets the upper limit of the permissible
+ range of the combined TX equalizer c(0), c(+1), and c(-1) taps so that the TX equalizer
+ operates within range specified in the 10GBASE-KR standard.
+ The TX coefficient rule requires (pre + post + main) \<= [TRAIN_TX_RULE].
+
+ The allowable range for [TRAIN_TX_RULE] is (24 decimal \<= [TRAIN_TX_RULE] \<= 48
+ decimal).
+ For 10GBASE-KR it is recommended to program [TRAIN_TX_RULE] to 0x30 (48 decimal).
+
+ c(-1) pre TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[PRE_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[PRE_MIN_LIMIT].
+
+ c(0) main TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MIN_LIMIT].
+
+ c(+1) post TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[POST_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[POST_MIN_LIMIT]. */
+ uint64_t trn_rx_nxt_st : 6; /**< [ 43: 38](RO/H) BASE-R training single step next state for the receive training state machine.
+ In single step mode this field holds the value of the next state of the receive
+ training state machine when the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP] bit is
+ set to a one.
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN] must be set to a one to enable single
+ step mode and the GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] must be set to a one
+ to force the receive training state machine to the STOP state.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST].
+
+ For diagnostic use only. */
+ uint64_t trn_ovrd_st : 6; /**< [ 37: 32](R/W) BASE-R training single step override state for the receive training
+ state machine. In single step mode allows for forcing the receive training
+ state machine to a specific state when exiting the STOP state.
+ Refer to the description for GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD].
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t trn_ss_ovrd : 1; /**< [ 31: 31](R/W) BASE-R training single step state override control for the receive training
+ state machine.
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 and the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, indicated by the stop flag GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP]
+ set to one, the next state of the receive state machine, prior to entering the STOP
+ state is indicated by the value in the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST]
+ field. The next state of the receive state machine can be overridden, that is forced
+ to another state other than the next state by setting
+ the desired next state in the GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field and then
+ clearing the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP] to zero. The receive state
+ machine will exit the STOP state and proceed to state indicated in [TRN_OVRD_ST]
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t reserved_30 : 1;
+ uint64_t trn_rx_ss_sp : 1; /**< [ 29: 29](RO/H) BASE-R training single step stop flag for the receiver training state machine.
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, the [TRN_RX_SS_SP] flag will be set. Subsequently, writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to zero will cause the receive state machine
+ to exit the STOP state and jump to the state indicated in the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST] field.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t trn_ss_st : 1; /**< [ 28: 28](WO/H) BASE-R training single-step start single-step stop.
+ Refer to the description for GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN].
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+
+ For diagnostic use only. */
+ uint64_t trn_ss_en : 1; /**< [ 27: 27](R/W) BASE-R training single step mode enable. When set to a 1 enables single stepping
+ the BASE-R link training receive state machines.
+
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, the [TRN_RX_SS_SP] flag will be set. Subsequently, writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 0 then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 1 will cause the receive state machine
+ to exit the STOP state and jump to the state indicated in the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST] field. Alternatively, the receive
+ state machine can be forced to a different state by writing the state value
+ to the GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field then set the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD] to 1 and then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 0 then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 1 to force the receive state machine to the
+ override state and then return to the STOP state.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t rx_train_fsm : 6; /**< [ 26: 21](RO/H) Value of the BASE-R hardware receiver link training state machine state during
+ link training single step mode. The values in this field are only valid when
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN] is set.
+ For diagnostic use only. */
+ uint64_t tx_train_fsm : 5; /**< [ 20: 16](RO/H) Value of the BASE-R hardware transmitter link training state machine state.
+ For diagnostic use only. */
+ uint64_t txt_post_init : 5; /**< [ 15: 11](R/W) During TX BASE-R link training, the TX posttap value that is used
+ when the initialize coefficients update is received. It is also the TX posttap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_main_init : 6; /**< [ 10: 5](R/W) During TX BASE-R link training, the TX swing-tap value that is used
+ when the initialize coefficients update is received. It is also the TX swing-tap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_pre_init : 5; /**< [ 4: 0](R/W) During TX BASE-R link training, the TX pretap value that is used
+ when the initialize coefficients update is received. It is also the TX pretap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t txt_pre_init : 5; /**< [ 4: 0](R/W) During TX BASE-R link training, the TX pretap value that is used
+ when the initialize coefficients update is received. It is also the TX pretap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_main_init : 6; /**< [ 10: 5](R/W) During TX BASE-R link training, the TX swing-tap value that is used
+ when the initialize coefficients update is received. It is also the TX swing-tap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t txt_post_init : 5; /**< [ 15: 11](R/W) During TX BASE-R link training, the TX posttap value that is used
+ when the initialize coefficients update is received. It is also the TX posttap
+ value used when the BASE-R link training begins.
+ For diagnostic use only. */
+ uint64_t tx_train_fsm : 5; /**< [ 20: 16](RO/H) Value of the BASE-R hardware transmitter link training state machine state.
+ For diagnostic use only. */
+ uint64_t rx_train_fsm : 6; /**< [ 26: 21](RO/H) Value of the BASE-R hardware receiver link training state machine state during
+ link training single step mode. The values in this field are only valid when
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN] is set.
+ For diagnostic use only. */
+ uint64_t trn_ss_en : 1; /**< [ 27: 27](R/W) BASE-R training single step mode enable. When set to a 1 enables single stepping
+ the BASE-R link training receive state machines.
+
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, the [TRN_RX_SS_SP] flag will be set. Subsequently, writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 0 then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 1 will cause the receive state machine
+ to exit the STOP state and jump to the state indicated in the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST] field. Alternatively, the receive
+ state machine can be forced to a different state by writing the state value
+ to the GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field then set the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD] to 1 and then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 0 then writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to 1 to force the receive state machine to the
+ override state and then return to the STOP state.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t trn_ss_st : 1; /**< [ 28: 28](WO/H) BASE-R training single-step start single-step stop.
+ Refer to the description for GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN].
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+
+ For diagnostic use only. */
+ uint64_t trn_rx_ss_sp : 1; /**< [ 29: 29](RO/H) BASE-R training single step stop flag for the receiver training state machine.
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, the [TRN_RX_SS_SP] flag will be set. Subsequently, writing
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to zero will cause the receive state machine
+ to exit the STOP state and jump to the state indicated in the
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST] field.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t reserved_30 : 1;
+ uint64_t trn_ss_ovrd : 1; /**< [ 31: 31](R/W) BASE-R training single step state override control for the receive training
+ state machine.
+ When single step mode is enabled by setting GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN]
+ to 1 and the receive state machine is forced to the STOP state by setting
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] to a 1. When the receive state machine enters
+ the STOP state, indicated by the stop flag GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP]
+ set to one, the next state of the receive state machine, prior to entering the STOP
+ state is indicated by the value in the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_NXT_ST]
+ field. The next state of the receive state machine can be overridden, that is forced
+ to another state other than the next state by setting
+ the desired next state in the GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field and then
+ clearing the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP] to zero. The receive state
+ machine will exit the STOP state and proceed to state indicated in [TRN_OVRD_ST]
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST] field.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_OVRD_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t trn_ovrd_st : 6; /**< [ 37: 32](R/W) BASE-R training single step override state for the receive training
+ state machine. In single step mode allows for forcing the receive training
+ state machine to a specific state when exiting the STOP state.
+ Refer to the description for GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD].
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_OVRD],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP].
+
+ For diagnostic use only. */
+ uint64_t trn_rx_nxt_st : 6; /**< [ 43: 38](RO/H) BASE-R training single step next state for the receive training state machine.
+ In single step mode this field holds the value of the next state of the receive
+ training state machine when the GSERN()_LANE()_TRAIN_1_BCFG[TRN_RX_SS_SP] bit is
+ set to a one.
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN] must be set to a one to enable single
+ step mode and the GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST] must be set to a one
+ to force the receive training state machine to the STOP state.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_EN],
+ GSERN()_LANE()_TRAIN_1_BCFG[TRN_SS_ST].
+
+ For diagnostic use only. */
+ uint64_t train_tx_rule : 8; /**< [ 51: 44](R/W) BASE-R training TX taps coefficient rule. Sets the upper limit of the permissible
+ range of the combined TX equalizer c(0), c(+1), and c(-1) taps so that the TX equalizer
+ operates within range specified in the 10GBASE-KR standard.
+ The TX coefficient rule requires (pre + post + main) \<= [TRAIN_TX_RULE].
+
+ The allowable range for [TRAIN_TX_RULE] is (24 decimal \<= [TRAIN_TX_RULE] \<= 48
+ decimal).
+ For 10GBASE-KR it is recommended to program [TRAIN_TX_RULE] to 0x30 (48 decimal).
+
+ c(-1) pre TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[PRE_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[PRE_MIN_LIMIT].
+
+ c(0) main TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MIN_LIMIT].
+
+ c(+1) post TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[POST_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[POST_MIN_LIMIT]. */
+ uint64_t rxt_fom : 12; /**< [ 63: 52](RO/H) Figure of merit. An 11-bit output from the PHY indicating the quality of the
+ received data eye. A higher value indicates better link equalization, with 0x0
+ indicating worst equalization setting and 4095 indicating the best equalization
+ setting. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_1_bcfg bdk_gsernx_lanex_train_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900031c0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) bdk_gsernx_lanex_train_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) "GSERNX_LANEX_TRAIN_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_2_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 2
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trn_sat_mv_lmt : 4; /**< [ 63: 60](R/W) BASE-R training saturated move limit threshold.
+ See GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT_EN].
+ For diagnostic use only. */
+ uint64_t trn_sat_mv_lmt_en : 1; /**< [ 59: 59](R/W) BASE-R training saturated move limit threshold enable. During BASE-R training
+ if a consecutive number of saturated tap moves specified by
+ GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT] is met or exceeded training will conclude.
+ This is to prevent cases where the FOM can no longer be improved and the
+ link partner TX taps are at their minimum or maximum limits and the algorithm
+ is attempting to repeatedly move the Tx taps beyond their min/max limits.
+ If the threshold limit is met or exceeded and [TRN_SAT_MV_LMT_EN] is set to 1
+ training will terminate and the GSERN()_LANE()_TRAIN_3_BCFG[EXIT_SAT_MV_LMT]
+ flag will set.
+ For diagnostic use only. */
+ uint64_t trn_cfg_use_eye_en : 1; /**< [ 58: 58](R/W) BASE-R and PCIe training when [TRN_CFG_USE_EYE_EN] is set the training state machine
+ will control the eye monitor block while training is active the power down the
+ eye monitor at the conclusion of link training.
+ For diagnostic use only. */
+ uint64_t trn_rrrpt_en : 1; /**< [ 57: 57](R/W) BASE-R training when [TRN_RRRPT_EN] is set the training state machine
+ will repeatedly send Receiver Ready messages to the CGX/OCX MAC every
+ 128 services clocks when training completes. For diagnostic use only. */
+ uint64_t trn_preset_en : 1; /**< [ 56: 56](R/W) BASE-R training when [TRN_PRESET_EN] is set to one preset the link
+ partner TX equalizer when training starts. When [TRN_PRESET_EN]
+ is cleared to zero the link partner TX equalizer will start in the
+ INITIALIZE state. For BASE-R training it is recommended to
+ start link training with [TRN_PRESET_EN] set to one. */
+ uint64_t trn_main_en : 2; /**< [ 55: 54](R/W) BASE-R training decrements the link partner (LP) TX equalizer main (C0) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_MAIN_VAL].
+
+ 0x0 = Disabled, do not decrement LP main C0 tap following PRESET.
+ 0x1 = Decrement LP main C0 tap following PRESET until vga_gain\<3:0\>
+ is less than or equal to the value in [TRN_MAIN_VAL].
+ 0x2 = Decrement LP main C0 tap following PRESET by the number of
+ steps in the [TRN_MAIN_VAL].
+ 0x3 = Increment LP main C0 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_MAIN_VAL]. */
+ uint64_t trn_main_val : 6; /**< [ 53: 48](R/W) BASE-R training decrements the link partner (LP) TX equalizer main (C0) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_MAIN_EN].
+ See [TRN_MAIN_EN]. */
+ uint64_t max_tap_moves : 8; /**< [ 47: 40](R/W) BASE-R training sets the maximum number of link partner TX Equalizer Tap moves
+ allowed. Exceeding the [MAX_TAP_MOVES] forces training to terminate and local
+ device ready signaled if TRAIN_DONE_MASK[MAX_MOVES] is set.
+
+ Internal:
+ FIXME no such register TRAIN_DONE_MASK[MAX_MOVES], then remove above exempt attribute. */
+ uint64_t min_tap_moves : 8; /**< [ 39: 32](R/W) BASE-R training sets the minimum number of link partner TX Equalizer Tap moves
+ before training completion (local device ready) is permitted. */
+ uint64_t main_max_limit : 6; /**< [ 31: 26](R/W) BASE-R training sets the maximum limit of the local device transmitter main (C0) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the main tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the main (C0) tap value.
+ The allowable range for the main (C0) tap is 0x18 to 0x30. */
+ uint64_t post_max_limit : 5; /**< [ 25: 21](R/W) BASE-R training sets the maximum limit of the local device transmitter post (C+1) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the post tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the post (C+1) tap value.
+ The allowable range for the post (C+1) tap is 0 to 0xC. */
+ uint64_t pre_max_limit : 5; /**< [ 20: 16](R/W) BASE-R training sets the maximum limit of the local device transmitter pre (C-1) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the pre tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the pre (C-1) tap value.
+ The allowable range for the pre (C-1) tap is 0 to 0x10. */
+ uint64_t main_min_limit : 6; /**< [ 15: 10](R/W) BASE-R training sets the minimum limit of the local device transmitter main (C0) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the main tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the main (C0) tap value.
+ The allowable range for the main (C0) tap is 0x18 to 0x30. */
+ uint64_t post_min_limit : 5; /**< [ 9: 5](R/W) BASE-R training sets the minimum limit of the local device transmitter post (C+1) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the post tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the post (C+1) tap value.
+ The allowable range for the post (C+1) tap is 0 to 0x10. */
+ uint64_t pre_min_limit : 5; /**< [ 4: 0](R/W) BASE-R training sets the minimum limit of the local device transmitter pre (C-1) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the pre tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the pre (C-1) tap value.
+ The allowable range for the min (C-1) tap is 0 to 0x10. */
+#else /* Word 0 - Little Endian */
+ uint64_t pre_min_limit : 5; /**< [ 4: 0](R/W) BASE-R training sets the minimum limit of the local device transmitter pre (C-1) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the pre tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the pre (C-1) tap value.
+ The allowable range for the min (C-1) tap is 0 to 0x10. */
+ uint64_t post_min_limit : 5; /**< [ 9: 5](R/W) BASE-R training sets the minimum limit of the local device transmitter post (C+1) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the post tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the post (C+1) tap value.
+ The allowable range for the post (C+1) tap is 0 to 0x10. */
+ uint64_t main_min_limit : 6; /**< [ 15: 10](R/W) BASE-R training sets the minimum limit of the local device transmitter main (C0) tap
+ value during KR training. Successive coefficient update message tap decrements
+ will decrease the main tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of minimum for the main (C0) tap value.
+ The allowable range for the main (C0) tap is 0x18 to 0x30. */
+ uint64_t pre_max_limit : 5; /**< [ 20: 16](R/W) BASE-R training sets the maximum limit of the local device transmitter pre (C-1) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the pre tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the pre (C-1) tap value.
+ The allowable range for the pre (C-1) tap is 0 to 0x10. */
+ uint64_t post_max_limit : 5; /**< [ 25: 21](R/W) BASE-R training sets the maximum limit of the local device transmitter post (C+1) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the post tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the post (C+1) tap value.
+ The allowable range for the post (C+1) tap is 0 to 0xC. */
+ uint64_t main_max_limit : 6; /**< [ 31: 26](R/W) BASE-R training sets the maximum limit of the local device transmitter main (C0) tap
+ value during KR training. Successive coefficient update message tap increments
+ will increase the main tap value until it reaches the value in this field. At
+ that point the local device TX training state machine will return a status report
+ of maximum for the main (C0) tap value.
+ The allowable range for the main (C0) tap is 0x18 to 0x30. */
+ uint64_t min_tap_moves : 8; /**< [ 39: 32](R/W) BASE-R training sets the minimum number of link partner TX Equalizer Tap moves
+ before training completion (local device ready) is permitted. */
+ uint64_t max_tap_moves : 8; /**< [ 47: 40](R/W) BASE-R training sets the maximum number of link partner TX Equalizer Tap moves
+ allowed. Exceeding the [MAX_TAP_MOVES] forces training to terminate and local
+ device ready signaled if TRAIN_DONE_MASK[MAX_MOVES] is set.
+
+ Internal:
+ FIXME no such register TRAIN_DONE_MASK[MAX_MOVES], then remove above exempt attribute. */
+ uint64_t trn_main_val : 6; /**< [ 53: 48](R/W) BASE-R training decrements the link partner (LP) TX equalizer main (C0) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_MAIN_EN].
+ See [TRN_MAIN_EN]. */
+ uint64_t trn_main_en : 2; /**< [ 55: 54](R/W) BASE-R training decrements the link partner (LP) TX equalizer main (C0) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_MAIN_VAL].
+
+ 0x0 = Disabled, do not decrement LP main C0 tap following PRESET.
+ 0x1 = Decrement LP main C0 tap following PRESET until vga_gain\<3:0\>
+ is less than or equal to the value in [TRN_MAIN_VAL].
+ 0x2 = Decrement LP main C0 tap following PRESET by the number of
+ steps in the [TRN_MAIN_VAL].
+ 0x3 = Increment LP main C0 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_MAIN_VAL]. */
+ uint64_t trn_preset_en : 1; /**< [ 56: 56](R/W) BASE-R training when [TRN_PRESET_EN] is set to one preset the link
+ partner TX equalizer when training starts. When [TRN_PRESET_EN]
+ is cleared to zero the link partner TX equalizer will start in the
+ INITIALIZE state. For BASE-R training it is recommended to
+ start link training with [TRN_PRESET_EN] set to one. */
+ uint64_t trn_rrrpt_en : 1; /**< [ 57: 57](R/W) BASE-R training when [TRN_RRRPT_EN] is set the training state machine
+ will repeatedly send Receiver Ready messages to the CGX/OCX MAC every
+ 128 services clocks when training completes. For diagnostic use only. */
+ uint64_t trn_cfg_use_eye_en : 1; /**< [ 58: 58](R/W) BASE-R and PCIe training when [TRN_CFG_USE_EYE_EN] is set the training state machine
+ will control the eye monitor block while training is active the power down the
+ eye monitor at the conclusion of link training.
+ For diagnostic use only. */
+ uint64_t trn_sat_mv_lmt_en : 1; /**< [ 59: 59](R/W) BASE-R training saturated move limit threshold enable. During BASE-R training
+ if a consecutive number of saturated tap moves specified by
+ GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT] is met or exceeded training will conclude.
+ This is to prevent cases where the FOM can no longer be improved and the
+ link partner TX taps are at their minimum or maximum limits and the algorithm
+ is attempting to repeatedly move the Tx taps beyond their min/max limits.
+ If the threshold limit is met or exceeded and [TRN_SAT_MV_LMT_EN] is set to 1
+ training will terminate and the GSERN()_LANE()_TRAIN_3_BCFG[EXIT_SAT_MV_LMT]
+ flag will set.
+ For diagnostic use only. */
+ uint64_t trn_sat_mv_lmt : 4; /**< [ 63: 60](R/W) BASE-R training saturated move limit threshold.
+ See GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT_EN].
+ For diagnostic use only. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_2_bcfg bdk_gsernx_lanex_train_2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900031d0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) bdk_gsernx_lanex_train_2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) "GSERNX_LANEX_TRAIN_2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_3_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 3
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_3_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_3_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t exit_fom_thrs : 1; /**< [ 63: 63](RO/H) BASE-R training exit condition flag indicates the measured FOM
+ was equal to or exceeded the FOM threshold value specified in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] when
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 1.
+
+ Used in conjustion with
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR] to
+ specify the BASE-R training convergence exit criteria. */
+ uint64_t train_tx_min_rule : 8; /**< [ 62: 55](R/W) BASE-R training TX taps minimum coefficient rule. Sets the lower limit of the permissible
+ range of the TX equalizer c(0), c(+1), and c(-1) taps so that the TX equalizer
+ operates within range specified in the IEEE 802.3-2012 Clause 72 10GBASE-KR
+ and IEEE 802.3bj-2014 Clause 93 100GBASE-KR4.
+ The TX coefficient minimum rule requires (main - pre - post) \>= [TRAIN_TX_MIN_RULE].
+
+ The allowable range for [TRAIN_TX_MIN_RULE] is
+ (6 decimal \<= [TRAIN_TX_MIN_RULE] \<= 16 decimal).
+ For 10GBASE-KR, 40GBASE-KR4 and 100GBASE-KR4 it is recommended to
+ program [TRAIN_TX_MIN_RULE] to 0x6.
+
+ c(-1) pre TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[PRE_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[PRE_MIN_LIMIT].
+
+ c(0) main TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MIN_LIMIT].
+
+ c(+1) post TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[POST_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[POST_MIN_LIMIT]. */
+ uint64_t exit_sat_mv_lmt : 1; /**< [ 54: 54](RO/H) BASE-R training saturated move limit threshold exit flag.
+ See GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT_EN].
+ For diagnostic use only. */
+ uint64_t exit_prbs11_ok : 1; /**< [ 53: 53](RO/H) Training exit condition PRBS11 in the BASE-R KR training frame is
+ error free.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_PRBS11_OK] will be set if the training was terminated
+ because the PRBS11 pattern extracted by the CGX or OCX MAC
+ indicates that the PRBS11 pattern is error free.
+
+ This bit will report the PRBS11 status when BASE-R training
+ completes even if GSERN()_LANE()_TRAIN_3_BCFG[LD_TRAIN_DONE\<21\>
+ or LD_TRAIN_DONE\<26\>] are not set.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled
+ for the [EXIT_PRBS11_OK] status to be reported.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only.
+
+ Internal:
+ FIXME what does LD_TRAIN_DONE refer to, then remove above exempt attribute. */
+ uint64_t exit_delta_ffom : 1; /**< [ 52: 52](RO/H) Training exit condition due to delta filtered FOM.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one the
+ [EXIT_DELTA_FFOM] bit will be set if the training was terminated
+ because the Delta Filtered FOM is within the high and low limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeded the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT]
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_rep_pattern : 1; /**< [ 51: 51](RO/H) Training exit condition repeating TAP moves pattern detected.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_REP_PATTERN] will be set if the training was terminated
+ because the training state machine discovered a repeating tap
+ move pattern. The GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must
+ be set to a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_tmt_timer : 1; /**< [ 50: 50](RO/H) Training timeout timer expired.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MAX_TAP_MOVES] will be set if the training was terminated
+ because the training state machine KR training time-out timer expired.
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_min_tap_moves : 1; /**< [ 49: 49](RO/H) Training exit condition exceeded minimum number of tap moves.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MIN_TAP_MOVES] will be set if the training was terminated
+ because the training state machine exceeded the minimum number of
+ tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES].
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_max_tap_moves : 1; /**< [ 48: 48](RO/H) Training exit condition exceeded maximum number of tap moves.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MAX_TAP_MOVES] will be set if the training was terminated
+ because the training state machine exceeded the maximum number of
+ tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MAX_TAP_MOVES].
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_dffom : 13; /**< [ 47: 35](RO/H) Training exit location delta filtered FOM value. Holds the delta filtered FOM
+ value at the completion of BASE-R training. Number represented in offset binary
+ notation. For diagnostic use only. */
+ uint64_t trn_ntap_mvs : 8; /**< [ 34: 27](RO/H) BASE-R training holds the number of link partner tap moves made during
+ link training. */
+ uint64_t term_prbs11_and : 1; /**< [ 26: 26](R/W) BASE-R training KR training PRBS11 pattern check extracted from the
+ KR training frame is error free. Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled to
+ enable PRBS11 pattern error checking. */
+ uint64_t term_dffom_and : 1; /**< [ 25: 25](R/W) BASE-R training KR training Delta Filtered FOM is within the high
+ and low limits. Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Delta filtered FOM is within the high and low
+ limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeds the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] */
+ uint64_t term_rep_pat_and : 1; /**< [ 24: 24](R/W) BASE-R training KR training taps move repeating pattern detected.
+ Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must be set to
+ a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence. */
+ uint64_t term_tmt_tmr_and : 1; /**< [ 23: 23](R/W) BASE-R training KR training time-out timer expired. Termination
+ AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero. */
+ uint64_t term_min_mvs_and : 1; /**< [ 22: 22](R/W) BASE-R training termination exceeded minimum number of tap moves.
+ Termination AND condition. See description below.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded minimum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES] sets the minimum
+ number of tap moves. */
+ uint64_t term_prbs11_or : 1; /**< [ 21: 21](R/W) BASE-R training KR training PRBS11 pattern check extracted from the
+ KR training frame is error free. Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled to
+ enable PRBS11 pattern error checking. */
+ uint64_t term_dffom_or : 1; /**< [ 20: 20](R/W) BASE-R training KR training Delta Filtered FOM is within the high
+ and low limits. Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Delta filtered FOM is within the high and low
+ limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeds the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] */
+ uint64_t term_rep_pat_or : 1; /**< [ 19: 19](R/W) BASE-R training KR training taps move repeating pattern detected.
+ Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must be set to
+ a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence. */
+ uint64_t term_tmt_tmr_or : 1; /**< [ 18: 18](R/W) BASE-R training KR training time-out timer expired. Termination
+ OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero. */
+ uint64_t term_max_mvs_or : 1; /**< [ 17: 17](R/W) BASE-R training termination exceeded maximum number of tap moves.
+ Termination OR condition. See description below.
+
+ BASE-R training termination condition register fields. Selects the conditions
+ used to terminate local device KR link training. Setting the associated
+ bit will enable the training termination condition. An AND-OR
+ tree is used to allow setting conditions that must occur together
+ (AND function) or any single condition (OR function) will trigger the
+ BASE-R training termination. AND and OR conditions can be combined.
+
+ \<page\>
+ OR CONDITIONS. Any condition that is true and has a set condition bit will
+ trigger training termination. Conditions with bits that are not set
+ (cleared to zero) are not used to trigger training termination.
+
+ [TERM_MAX_MVS_OR] = Exceeded maximum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MAX_TAP_MOVES] sets the maximum
+ number of tap moves.
+
+ [TERM_TMT_TMR_OR] = KR training time-out timer expired.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_TMT_TMR_OR].
+
+ [TERM_REP_PAT_OR] =Taps move repeating pattern detected.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_REP_PAT_OR].
+
+ [TERM_DFFOM_OR] = Delta Filtered FOM is within the high and low
+ limits.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_DFFOM_OR].
+
+ [TERM_PRBS11_OR] = PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_PRBS11_OR].
+
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR] =
+ Measured FOM equal or exceeds the FOM threshold
+ in GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] during KR
+ training. GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] must also
+ be set to 1.
+ See description in GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ \<page\>
+ AND CONDITIONS. The conditions associated with bits that are set must
+ all be true to trigger training termination. Conditions with bits that
+ are not set (cleared to zero) are not used to trigger training termination.
+
+ [TERM_MIN_MVS_AND] = Exceeded minimum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES] sets the minimum
+ number of tap moves.
+
+ [TERM_TMT_TMR_AND] = KR training time-out timer expired.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_TMT_TMR_AND].
+
+ [TERM_REP_PAT_AND] = Taps move repeating pattern detected.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_REP_PAT_AND].
+
+ [TERM_DFFOM_AND] = Delta Filtered FOM is within the high and low
+ limits.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_DFFOM_AND].
+
+ [TERM_PRBS11_AND] = PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_PRBS11_AND].
+
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] =
+ Measured FOM equal or exceeds the FOM threshold
+ in GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] during KR
+ training. GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] must also
+ be set to 1.
+ See description in GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND]. */
+ uint64_t inv_tx_post_dir : 1; /**< [ 16: 16](R/W) BASE-R training when set reverses the direction of the post tap (C+1)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t inv_tx_main_dir : 1; /**< [ 15: 15](R/W) BASE-R training when set reverses the direction of the main tap (C0)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t inv_tx_pre_dir : 1; /**< [ 14: 14](R/W) BASE-R training when set reverses the direction of the pre tap (C-1)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t trn_post_en : 2; /**< [ 13: 12](R/W) BASE-R training decrements the link partner (LP) TX equalizer post (C+1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_POST_VAL].
+
+ 0x0 = Disabled, do not decrement LP post C+1 tap following PRESET.
+ 0x1 = Reserved, do not use.
+ 0x2 = Decrement LP post C+1 tap following PRESET by the number of
+ steps in the [TRN_POST_VAL].
+ 0x3 = Increment LP post C+1 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_POST_VAL]. */
+ uint64_t trn_post_val : 5; /**< [ 11: 7](R/W) BASE-R training decrements the link partner (LP) TX equalizer post (C+1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. See [TRN_POST_EN]. */
+ uint64_t trn_pre_en : 2; /**< [ 6: 5](R/W) BASE-R training decrements the link partner (LP) TX equalizer pre (C-1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_PRE_VAL].
+
+ 0x0 = Disabled, do not decrement LP pre C-1 tap following PRESET.
+ 0x1 = Reserved, do not use.
+ 0x2 = Decrement LP pre C-1 tap following PRESET by the number of
+ steps in the [TRN_PRE_VAL].
+ 0x3 = Increment LP pre C-1 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_PRE_VAL]. */
+ uint64_t trn_pre_val : 5; /**< [ 4: 0](R/W) BASE-R training decrements the link partner (LP) TX equalizer pre (C-1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_PRE_EN].
+ See [TRN_PRE_EN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t trn_pre_val : 5; /**< [ 4: 0](R/W) BASE-R training decrements the link partner (LP) TX equalizer pre (C-1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_PRE_EN].
+ See [TRN_PRE_EN]. */
+ uint64_t trn_pre_en : 2; /**< [ 6: 5](R/W) BASE-R training decrements the link partner (LP) TX equalizer pre (C-1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_PRE_VAL].
+
+ 0x0 = Disabled, do not decrement LP pre C-1 tap following PRESET.
+ 0x1 = Reserved, do not use.
+ 0x2 = Decrement LP pre C-1 tap following PRESET by the number of
+ steps in the [TRN_PRE_VAL].
+ 0x3 = Increment LP pre C-1 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_PRE_VAL]. */
+ uint64_t trn_post_val : 5; /**< [ 11: 7](R/W) BASE-R training decrements the link partner (LP) TX equalizer post (C+1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. See [TRN_POST_EN]. */
+ uint64_t trn_post_en : 2; /**< [ 13: 12](R/W) BASE-R training decrements the link partner (LP) TX equalizer post (C+1) tap
+ at the start of link training after the PRESET coefficient update has been
+ issued to the link partner. Used in conjunction with [TRN_POST_VAL].
+
+ 0x0 = Disabled, do not decrement LP post C+1 tap following PRESET.
+ 0x1 = Reserved, do not use.
+ 0x2 = Decrement LP post C+1 tap following PRESET by the number of
+ steps in the [TRN_POST_VAL].
+ 0x3 = Increment LP post C+1 tap at the start of training (PRESET disabled)
+ by the number of steps in [TRN_POST_VAL]. */
+ uint64_t inv_tx_pre_dir : 1; /**< [ 14: 14](R/W) BASE-R training when set reverses the direction of the pre tap (C-1)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t inv_tx_main_dir : 1; /**< [ 15: 15](R/W) BASE-R training when set reverses the direction of the main tap (C0)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t inv_tx_post_dir : 1; /**< [ 16: 16](R/W) BASE-R training when set reverses the direction of the post tap (C+1)
+ direction hint in the local transmitter received from the link partner. */
+ uint64_t term_max_mvs_or : 1; /**< [ 17: 17](R/W) BASE-R training termination exceeded maximum number of tap moves.
+ Termination OR condition. See description below.
+
+ BASE-R training termination condition register fields. Selects the conditions
+ used to terminate local device KR link training. Setting the associated
+ bit will enable the training termination condition. An AND-OR
+ tree is used to allow setting conditions that must occur together
+ (AND function) or any single condition (OR function) will trigger the
+ BASE-R training termination. AND and OR conditions can be combined.
+
+ \<page\>
+ OR CONDITIONS. Any condition that is true and has a set condition bit will
+ trigger training termination. Conditions with bits that are not set
+ (cleared to zero) are not used to trigger training termination.
+
+ [TERM_MAX_MVS_OR] = Exceeded maximum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MAX_TAP_MOVES] sets the maximum
+ number of tap moves.
+
+ [TERM_TMT_TMR_OR] = KR training time-out timer expired.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_TMT_TMR_OR].
+
+ [TERM_REP_PAT_OR] =Taps move repeating pattern detected.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_REP_PAT_OR].
+
+ [TERM_DFFOM_OR] = Delta Filtered FOM is within the high and low
+ limits.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_DFFOM_OR].
+
+ [TERM_PRBS11_OR] = PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_PRBS11_OR].
+
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR] =
+ Measured FOM equal or exceeds the FOM threshold
+ in GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] during KR
+ training. GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] must also
+ be set to 1.
+ See description in GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR].
+
+ \<page\>
+ AND CONDITIONS. The conditions associated with bits that are set must
+ all be true to trigger training termination. Conditions with bits that
+ are not set (cleared to zero) are not used to trigger training termination.
+
+ [TERM_MIN_MVS_AND] = Exceeded minimum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES] sets the minimum
+ number of tap moves.
+
+ [TERM_TMT_TMR_AND] = KR training time-out timer expired.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_TMT_TMR_AND].
+
+ [TERM_REP_PAT_AND] = Taps move repeating pattern detected.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_REP_PAT_AND].
+
+ [TERM_DFFOM_AND] = Delta Filtered FOM is within the high and low
+ limits.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_DFFOM_AND].
+
+ [TERM_PRBS11_AND] = PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ See description in GSERN()_LANE()_TRAIN_3_BCFG[TERM_PRBS11_AND].
+
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] =
+ Measured FOM equal or exceeds the FOM threshold
+ in GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] during KR
+ training. GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] must also
+ be set to 1.
+ See description in GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND]. */
+ uint64_t term_tmt_tmr_or : 1; /**< [ 18: 18](R/W) BASE-R training KR training time-out timer expired. Termination
+ OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero. */
+ uint64_t term_rep_pat_or : 1; /**< [ 19: 19](R/W) BASE-R training KR training taps move repeating pattern detected.
+ Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must be set to
+ a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence. */
+ uint64_t term_dffom_or : 1; /**< [ 20: 20](R/W) BASE-R training KR training Delta Filtered FOM is within the high
+ and low limits. Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Delta filtered FOM is within the high and low
+ limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeds the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] */
+ uint64_t term_prbs11_or : 1; /**< [ 21: 21](R/W) BASE-R training KR training PRBS11 pattern check extracted from the
+ KR training frame is error free. Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled to
+ enable PRBS11 pattern error checking. */
+ uint64_t term_min_mvs_and : 1; /**< [ 22: 22](R/W) BASE-R training termination exceeded minimum number of tap moves.
+ Termination AND condition. See description below.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded minimum tap moves iterations.
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES] sets the minimum
+ number of tap moves. */
+ uint64_t term_tmt_tmr_and : 1; /**< [ 23: 23](R/W) BASE-R training KR training time-out timer expired. Termination
+ AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero. */
+ uint64_t term_rep_pat_and : 1; /**< [ 24: 24](R/W) BASE-R training KR training taps move repeating pattern detected.
+ Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must be set to
+ a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence. */
+ uint64_t term_dffom_and : 1; /**< [ 25: 25](R/W) BASE-R training KR training Delta Filtered FOM is within the high
+ and low limits. Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Delta filtered FOM is within the high and low
+ limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeds the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] */
+ uint64_t term_prbs11_and : 1; /**< [ 26: 26](R/W) BASE-R training KR training PRBS11 pattern check extracted from the
+ KR training frame is error free. Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ PRBS11 pattern check extracted from the KR training
+ frame is error free.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled to
+ enable PRBS11 pattern error checking. */
+ uint64_t trn_ntap_mvs : 8; /**< [ 34: 27](RO/H) BASE-R training holds the number of link partner tap moves made during
+ link training. */
+ uint64_t exit_dffom : 13; /**< [ 47: 35](RO/H) Training exit location delta filtered FOM value. Holds the delta filtered FOM
+ value at the completion of BASE-R training. Number represented in offset binary
+ notation. For diagnostic use only. */
+ uint64_t exit_max_tap_moves : 1; /**< [ 48: 48](RO/H) Training exit condition exceeded maximum number of tap moves.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MAX_TAP_MOVES] will be set if the training was terminated
+ because the training state machine exceeded the maximum number of
+ tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MAX_TAP_MOVES].
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_min_tap_moves : 1; /**< [ 49: 49](RO/H) Training exit condition exceeded minimum number of tap moves.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MIN_TAP_MOVES] will be set if the training was terminated
+ because the training state machine exceeded the minimum number of
+ tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES].
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_tmt_timer : 1; /**< [ 50: 50](RO/H) Training timeout timer expired.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_MAX_TAP_MOVES] will be set if the training was terminated
+ because the training state machine KR training time-out timer expired.
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_FAST] select the
+ timeout time in milliseconds/microseconds and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_ADTMOUT_DISABLE] enables
+ the timeout timer when cleared to zero.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_rep_pattern : 1; /**< [ 51: 51](RO/H) Training exit condition repeating TAP moves pattern detected.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_REP_PATTERN] will be set if the training was terminated
+ because the training state machine discovered a repeating tap
+ move pattern. The GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] must
+ be set to a non-zero value and GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN]
+ must be set to a one to enable the repeating tap move pattern
+ matching logic which looks for repeating tap moves to signal
+ training convergence.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_delta_ffom : 1; /**< [ 52: 52](RO/H) Training exit condition due to delta filtered FOM.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one the
+ [EXIT_DELTA_FFOM] bit will be set if the training was terminated
+ because the Delta Filtered FOM is within the high and low limits set by
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT], and
+ GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN]=1, and
+ the number of consecutive tap move iterations in which
+ the Delta Filtered FOM is within the high/low limits
+ exceeded the count in
+ GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT]
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only. */
+ uint64_t exit_prbs11_ok : 1; /**< [ 53: 53](RO/H) Training exit condition PRBS11 in the BASE-R KR training frame is
+ error free.
+ When BASE-R training is concluded, indicated by
+ GSERN()_LANE()_TRAIN_0_BCFG[LD_RECEIVER_RDY] set to one
+ [EXIT_PRBS11_OK] will be set if the training was terminated
+ because the PRBS11 pattern extracted by the CGX or OCX MAC
+ indicates that the PRBS11 pattern is error free.
+
+ This bit will report the PRBS11 status when BASE-R training
+ completes even if GSERN()_LANE()_TRAIN_3_BCFG[LD_TRAIN_DONE\<21\>
+ or LD_TRAIN_DONE\<26\>] are not set.
+ GSERN()_LANE()_TRAIN_4_BCFG[EN_PRBS11_CHK] must be enabled
+ for the [EXIT_PRBS11_OK] status to be reported.
+
+ This bit will be cleared if BASE-R training is re-enabled.
+ For diagnostic use only.
+
+ Internal:
+ FIXME what does LD_TRAIN_DONE refer to, then remove above exempt attribute. */
+ uint64_t exit_sat_mv_lmt : 1; /**< [ 54: 54](RO/H) BASE-R training saturated move limit threshold exit flag.
+ See GSERN()_LANE()_TRAIN_2_BCFG[TRN_SAT_MV_LMT_EN].
+ For diagnostic use only. */
+ uint64_t train_tx_min_rule : 8; /**< [ 62: 55](R/W) BASE-R training TX taps minimum coefficient rule. Sets the lower limit of the permissible
+ range of the TX equalizer c(0), c(+1), and c(-1) taps so that the TX equalizer
+ operates within range specified in the IEEE 802.3-2012 Clause 72 10GBASE-KR
+ and IEEE 802.3bj-2014 Clause 93 100GBASE-KR4.
+ The TX coefficient minimum rule requires (main - pre - post) \>= [TRAIN_TX_MIN_RULE].
+
+ The allowable range for [TRAIN_TX_MIN_RULE] is
+ (6 decimal \<= [TRAIN_TX_MIN_RULE] \<= 16 decimal).
+ For 10GBASE-KR, 40GBASE-KR4 and 100GBASE-KR4 it is recommended to
+ program [TRAIN_TX_MIN_RULE] to 0x6.
+
+ c(-1) pre TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[PRE_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[PRE_MIN_LIMIT].
+
+ c(0) main TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[MAIN_MIN_LIMIT].
+
+ c(+1) post TX tap range is programmed by GSERN()_LANE()_TRAIN_2_BCFG[POST_MAX_LIMIT] and
+ GSERN()_LANE()_TRAIN_2_BCFG[POST_MIN_LIMIT]. */
+ uint64_t exit_fom_thrs : 1; /**< [ 63: 63](RO/H) BASE-R training exit condition flag indicates the measured FOM
+ was equal to or exceeded the FOM threshold value specified in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] when
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] is set to 1.
+
+ Used in conjustion with
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_AND] and
+ GSERN()_LANE()_TRAIN_4_BCFG[TERM_FOM_THRS_OR] to
+ specify the BASE-R training convergence exit criteria. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_3_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_3_bcfg bdk_gsernx_lanex_train_3_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_3_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_3_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900031e0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_3_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) bdk_gsernx_lanex_train_3_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) "GSERNX_LANEX_TRAIN_3_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_3_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_4_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 4
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_4_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_4_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t term_fom_thrs_and : 1; /**< [ 63: 63](R/W) BASE-R training termination condition measured FOM equal or
+ exceeds the FOM threshold set in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL].
+ Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded FOM threshold.
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL] sets the FOM
+ threshold.
+
+ Refer to the description for
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] and
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL], then remove
+ above exempt attribute. */
+ uint64_t term_fom_thrs_or : 1; /**< [ 62: 62](R/W) BASE-R training termination condition measured FOM equal or
+ exceeds the FOM threshold set in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL].
+ Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded FOM threshold.
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL] sets the FOM
+ threshold.
+
+ Refer to the description for
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] and
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL]. */
+ uint64_t en_prbs11_chk : 1; /**< [ 61: 61](R/W) BASE-R training enables the check for PRBS11 checking for training
+ convergence.
+ 0 = Disables PRBS11 checking.
+ 1 = Enables PRBS11 checking.
+
+ The CGX/OCX MAC extracts the PRBS11 pattern from the KR training frame
+ and checks the PRBS11 pattern for errors. The CGX/MAC signals to the
+ KR training frame if the PRBS11 pattern sampled from the KR training
+ frame is error free or contains errors.
+
+ When [EN_PRBS11_CHK] is set the KR training state machine will
+ sample the PRBS11 status signal from the MAC and if the PRBS11 is
+ error free will use this to signal training convergence and signal
+ receiver ready if this condition is enabled in the
+ GSERN()_LANE()_TRAIN_3_BCFG[LD_TRAIN_DONE\<21\> or LD_TRAIN_DONE\<26\>]
+ training termination condition fields.
+
+ Internal:
+ FIXME what does LD_TRAIN_DONE refer to? */
+ uint64_t en_rev_moves : 1; /**< [ 60: 60](R/W) BASE-R training controls the receiver adaptation algorithm to reverse previous
+ tap moves that resulted in a decrease in the receiver figure of merit
+ (FOM).
+ 0 = Prevents the adaptation algorithm state machine from
+ reversing previous tap moves that resulted in a lower FOM.
+ 1 = Enables the adaptation algorithm state machine
+ to reverse a previous tap move that resulted in a lower FOM value.
+
+ The receiver adaptation algorithm will not reverse previous tap moves until the
+ number of tap moves exceeds the minimum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES]. [EN_REV_MOVES] is normally enabled to
+ improve the adaptation convergence time. */
+ uint64_t tx_tap_stepsize : 1; /**< [ 59: 59](R/W) BASE-R training controls the transmitter Pre/Main/Post step size when a Coefficient Update
+ increment or decrement request is received. When [TX_TAP_STEPSIZE] is zero the
+ transmitter Pre/Main/Post step size is set to +/- 1. When [TX_TAP_STEPSIZE] is set to one
+ the
+ transmitter Pre/Main/Post step size is set to +/- 2. */
+ uint64_t train_rst : 1; /**< [ 58: 58](R/W) Set to force the training engine into reset. Set low to enable link
+ training. */
+ uint64_t train_ovrrd_en : 1; /**< [ 57: 57](R/W) Training engine eye monitor FOM request override enable.
+ If not programmed to PCIe, CGX, or OCX mode via GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ then program [TRAIN_OVRRD_EN] to 1 before using
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ] and
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] to request an RX equalizer
+ evaluation to measure the RX equalizer Figure of Merit (FOM). The 8-bit FOM is
+ returned in GSERN()_LANE()_TRAIN_5_BCFG[FOM] and the raw 12-bit FOM
+ is returned in GSERN()_LANE()_TRAIN_5_BCFG[RAW_FOM].
+ For diagnostic use only. */
+ uint64_t rxt_rev_dir : 1; /**< [ 56: 56](R/W) When set, reverses the direction of the
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_POST_DIR],
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_MAIN_DIR], and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_PRE_DIR]
+ link partner TX tap direction hints. For diagnostic use only. */
+ uint64_t adapt_axis : 3; /**< [ 55: 53](R/W) Sets the number or adaptation axes to use during receiver adaptation.
+ Typically set to 0x7 to enable all three adaptation axes. One-hot encoded.
+
+ Set to 0x1 to only enable axis 1 and disable axis 2 and axis 3.
+ Set to 0x3 to enable axis 1 and axis 2 but disable axis 3.
+ Set to 0x7 to enable axis 1, 2 and 3. (default.)
+ For diagnostic use only. */
+ uint64_t c1_e_adj_step : 5; /**< [ 52: 48](R/W) Reserved.
+ Internal:
+ Functionality moved to GSERN()_LANE()_TRAIN_10_BCFG.L_C1_E_ADJ_STEP */
+ uint64_t eq_eval_ovrrd_req : 1; /**< [ 47: 47](R/W) When set issues a receiver equalization evaluation request when
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] is set.
+ For diagnostic use only. */
+ uint64_t eq_eval_ovrrd_en : 1; /**< [ 46: 46](R/W) When set the RX equalization evaluation request is controlled by
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ].
+ For diagnostic use only. */
+ uint64_t err_cnt_div_ovrrd_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [ERR_CNT_DIV_OVRRD_EN] is set.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = DIV 2.
+ 0x2 = DIV 4.
+ 0x3 = DIV 8.
+ 0x4 = DIV 16.
+ 0x5 = DIV 32.
+ 0x6 = DIV 64.
+ 0x7 = DIV 128.
+ 0x8 = DIV 256.
+ 0x9 = DIV 512.
+ 0xA = DIV 1024.
+ 0xB = DIV 2048.
+ 0xC = DIV 4096.
+ 0xD = DIV 8192.
+ 0xE = DIV 16384.
+ 0xF = DIV 32768. */
+ uint64_t err_cnt_div_ovrrd_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ For diagnostic use only. */
+ uint64_t eye_cnt_ovrrd_en : 1; /**< [ 40: 40](R/W) Eye Cycle Count Override Enable. When set the number of eye monitor
+ cycles to sample and count during the BASE-R training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_4_BCFG[EYE_CNT_OVRRD_VAL].
+ For diagnostic use only. */
+ uint64_t eye_cnt_ovrrd_val : 40; /**< [ 39: 0](R/W) Sets the number of eye monitor cycles to sample/count during the BASE-R training
+ figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_4_BCFG[EYE_CNT_OVRRD_EN]=1.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t eye_cnt_ovrrd_val : 40; /**< [ 39: 0](R/W) Sets the number of eye monitor cycles to sample/count during the BASE-R training
+ figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_4_BCFG[EYE_CNT_OVRRD_EN]=1.
+ For diagnostic use only. */
+ uint64_t eye_cnt_ovrrd_en : 1; /**< [ 40: 40](R/W) Eye Cycle Count Override Enable. When set the number of eye monitor
+ cycles to sample and count during the BASE-R training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_4_BCFG[EYE_CNT_OVRRD_VAL].
+ For diagnostic use only. */
+ uint64_t err_cnt_div_ovrrd_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ For diagnostic use only. */
+ uint64_t err_cnt_div_ovrrd_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [ERR_CNT_DIV_OVRRD_EN] is set.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = DIV 2.
+ 0x2 = DIV 4.
+ 0x3 = DIV 8.
+ 0x4 = DIV 16.
+ 0x5 = DIV 32.
+ 0x6 = DIV 64.
+ 0x7 = DIV 128.
+ 0x8 = DIV 256.
+ 0x9 = DIV 512.
+ 0xA = DIV 1024.
+ 0xB = DIV 2048.
+ 0xC = DIV 4096.
+ 0xD = DIV 8192.
+ 0xE = DIV 16384.
+ 0xF = DIV 32768. */
+ uint64_t eq_eval_ovrrd_en : 1; /**< [ 46: 46](R/W) When set the RX equalization evaluation request is controlled by
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ].
+ For diagnostic use only. */
+ uint64_t eq_eval_ovrrd_req : 1; /**< [ 47: 47](R/W) When set issues a receiver equalization evaluation request when
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] is set.
+ For diagnostic use only. */
+ uint64_t c1_e_adj_step : 5; /**< [ 52: 48](R/W) Reserved.
+ Internal:
+ Functionality moved to GSERN()_LANE()_TRAIN_10_BCFG.L_C1_E_ADJ_STEP */
+ uint64_t adapt_axis : 3; /**< [ 55: 53](R/W) Sets the number or adaptation axes to use during receiver adaptation.
+ Typically set to 0x7 to enable all three adaptation axes. One-hot encoded.
+
+ Set to 0x1 to only enable axis 1 and disable axis 2 and axis 3.
+ Set to 0x3 to enable axis 1 and axis 2 but disable axis 3.
+ Set to 0x7 to enable axis 1, 2 and 3. (default.)
+ For diagnostic use only. */
+ uint64_t rxt_rev_dir : 1; /**< [ 56: 56](R/W) When set, reverses the direction of the
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_POST_DIR],
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_MAIN_DIR], and
+ GSERN()_LANE()_TRAIN_0_BCFG[RXT_TX_PRE_DIR]
+ link partner TX tap direction hints. For diagnostic use only. */
+ uint64_t train_ovrrd_en : 1; /**< [ 57: 57](R/W) Training engine eye monitor FOM request override enable.
+ If not programmed to PCIe, CGX, or OCX mode via GSERN()_LANE()_SRCMX_BCFG[TX_CTRL_SEL]
+ then program [TRAIN_OVRRD_EN] to 1 before using
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ] and
+ GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] to request an RX equalizer
+ evaluation to measure the RX equalizer Figure of Merit (FOM). The 8-bit FOM is
+ returned in GSERN()_LANE()_TRAIN_5_BCFG[FOM] and the raw 12-bit FOM
+ is returned in GSERN()_LANE()_TRAIN_5_BCFG[RAW_FOM].
+ For diagnostic use only. */
+ uint64_t train_rst : 1; /**< [ 58: 58](R/W) Set to force the training engine into reset. Set low to enable link
+ training. */
+ uint64_t tx_tap_stepsize : 1; /**< [ 59: 59](R/W) BASE-R training controls the transmitter Pre/Main/Post step size when a Coefficient Update
+ increment or decrement request is received. When [TX_TAP_STEPSIZE] is zero the
+ transmitter Pre/Main/Post step size is set to +/- 1. When [TX_TAP_STEPSIZE] is set to one
+ the
+ transmitter Pre/Main/Post step size is set to +/- 2. */
+ uint64_t en_rev_moves : 1; /**< [ 60: 60](R/W) BASE-R training controls the receiver adaptation algorithm to reverse previous
+ tap moves that resulted in a decrease in the receiver figure of merit
+ (FOM).
+ 0 = Prevents the adaptation algorithm state machine from
+ reversing previous tap moves that resulted in a lower FOM.
+ 1 = Enables the adaptation algorithm state machine
+ to reverse a previous tap move that resulted in a lower FOM value.
+
+ The receiver adaptation algorithm will not reverse previous tap moves until the
+ number of tap moves exceeds the minimum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_2_BCFG[MIN_TAP_MOVES]. [EN_REV_MOVES] is normally enabled to
+ improve the adaptation convergence time. */
+ uint64_t en_prbs11_chk : 1; /**< [ 61: 61](R/W) BASE-R training enables the check for PRBS11 checking for training
+ convergence.
+ 0 = Disables PRBS11 checking.
+ 1 = Enables PRBS11 checking.
+
+ The CGX/OCX MAC extracts the PRBS11 pattern from the KR training frame
+ and checks the PRBS11 pattern for errors. The CGX/MAC signals to the
+ KR training frame if the PRBS11 pattern sampled from the KR training
+ frame is error free or contains errors.
+
+ When [EN_PRBS11_CHK] is set the KR training state machine will
+ sample the PRBS11 status signal from the MAC and if the PRBS11 is
+ error free will use this to signal training convergence and signal
+ receiver ready if this condition is enabled in the
+ GSERN()_LANE()_TRAIN_3_BCFG[LD_TRAIN_DONE\<21\> or LD_TRAIN_DONE\<26\>]
+ training termination condition fields.
+
+ Internal:
+ FIXME what does LD_TRAIN_DONE refer to? */
+ uint64_t term_fom_thrs_or : 1; /**< [ 62: 62](R/W) BASE-R training termination condition measured FOM equal or
+ exceeds the FOM threshold set in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL].
+ Termination OR condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded FOM threshold.
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL] sets the FOM
+ threshold.
+
+ Refer to the description for
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] and
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL]. */
+ uint64_t term_fom_thrs_and : 1; /**< [ 63: 63](R/W) BASE-R training termination condition measured FOM equal or
+ exceeds the FOM threshold set in
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL].
+ Termination AND condition.
+ Part of the BASE-R training termination condition register.
+ See the full description of the training termination conditions
+ register in GSERN()_LANE()_TRAIN_3_BCFG[TERM_MAX_MVS_OR].
+
+ Exceeded FOM threshold.
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL] sets the FOM
+ threshold.
+
+ Refer to the description for
+ GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_EN] and
+ GSERN()_LANE()_TRAIN_10_BCFG[EXIT_FOM_THRS_VAL] and
+ GSERN()_LANE()_TRAIN_3_BCFG[EXIT_FOM_THRS].
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_10_BCFG[TRN_FOM_THRS_VAL], then remove
+ above exempt attribute. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_4_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_4_bcfg bdk_gsernx_lanex_train_4_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_4_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_4_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900031f0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_4_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) bdk_gsernx_lanex_train_4_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) "GSERNX_LANEX_TRAIN_4_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_4_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_5_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 5
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_5_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_5_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pat_exit_cnt : 4; /**< [ 63: 60](R/W) BASE-R training controls the receiver adaptation algorithm training convergence
+ pattern matching logic. As BASE-R training progresses the Pre/Main/Post tap
+ direction change coefficient updates to the link partner start to dither around the
+ optimal tap values. The pattern matching logic looks for repeating patterns of
+ the tap dithering around the optimal value and is used as one metric to determine
+ that BASE-R training has converged and local device can signal receiver ready.
+
+ The [PAT_EXIT_CNT] variable sets the maximum length of the repeating pattern to search
+ for in the pattern matching array. The pattern matching array has twelve elements
+ therefore the maximum value of [PAT_EXIT_CNT] is 0xC. A value of 0x6 has been
+ found to be optimal for recognizing training tap convergence.
+
+ The GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] field is used in conjunction with the
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN] field to control the training convergence
+ pattern matching logic during BASE-R training. */
+ uint64_t pat_match_en : 1; /**< [ 59: 59](R/W) BASE-R training controls the receiver adaptation algorithm when [PAT_MATCH_EN] is set to
+ one
+ the training convergence pattern matching logic is enabled. The training pattern matching
+ logic tracks the link partner transmitter tap moves and sets a flag when the pattern
+ is found to be repeating in the taps moves tracking array. This is used to help
+ converge training adaptation. When [PAT_MATCH_EN] is cleared to zero the pattern matching
+ logic is disabled and not used to detect training convergence.
+
+ The GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN] field is used in conjunction with the
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] field to control the training convergence
+ pattern matching logic during BASE-R training. */
+ uint64_t fdltfom_hi_lmt : 8; /**< [ 58: 51](R/W) BASE-R training sets the Delta Filtered FOM upper limit for training convergence.
+ Value is a signed twos complement value. */
+ uint64_t fdltfom_lo_lmt : 8; /**< [ 50: 43](R/W) BASE-R training sets the Delta Filtered FOM lower limit for training convergence.
+ Value is a signed twos complement value. */
+ uint64_t inv_post_dir : 1; /**< [ 42: 42](R/W) BASE-R training when set reverses the direction of the post tap (C+1)
+ direction hint from the local device. */
+ uint64_t inv_main_dir : 1; /**< [ 41: 41](R/W) BASE-R training when set reverses the direction of the main tap (C0)
+ direction hint from the local device. */
+ uint64_t inv_pre_dir : 1; /**< [ 40: 40](R/W) BASE-R training when set reverses the direction of the pre tap (C-1)
+ direction hint from the local device. */
+ uint64_t use_ffom : 1; /**< [ 39: 39](R/W) Use filtered figure of merit for BASE-R transmitter adaptation logic.
+ For diagnostic use only.
+ 0 = The BASE-R transmitter adaptation logic use the unfiltered raw figure
+ of merit FOM for BASE-R Inc/Dec direction hint computation.
+ 1 = The BASE-R transmitter adaptation logic use the
+ filtered FOM for Inc/Dec direction hint computation. */
+ uint64_t dfom_tc : 3; /**< [ 38: 36](R/W) Delta filtered figure of merit (DFOM) filter time constant. The DFOM is filtered
+ by a cumulative moving average (CMA) filter. [DFOM_TC] sets the time constant
+ of the CMA filter.
+ Selectable time constant options are in the range 0 to 7 which sets the divider value
+ used to scale the summed DFOM input term and the filtered DFOM feedback term. This
+ provides
+ a smoothed delta filtered figure of merit for use by the BASE-R transmitter adaptation
+ logic.
+
+ For diagnostic use only.
+
+ 0x0 = No scaling.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128. */
+ uint64_t ffom_tc : 3; /**< [ 35: 33](R/W) Filtered figure of merit (FFOM) filter time constant. The raw figure of merit (raw FOM)
+ is filtered by a cumulative moving average (CMA) filter. [FFOM_TC] sets the time
+ constant of the CMA filter.
+ Selectable time constant options are in the range 0 to 7 which sets the divider value
+ used to scale the raw FOM input term and the filtered FOM feedback term. This provides
+ a smoothed filtered figure of merit for use by the BASE-R transmitter adaptation logic.
+
+ 0x0 = No scaling.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+
+ For diagnostic use only. */
+ uint64_t eq_eval_ack : 1; /**< [ 32: 32](RO/H) When set indicates a receiver equalization evaluation acknowledgment. Set in
+ response to request when GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] is set
+ and GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ] is set.
+
+ When [EQ_EVAL_ACK] is set, clear GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ]
+ which will in turn clear [EQ_EVAL_ACK] before issue another RX equalization
+ evaluation request via GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ].
+
+ For diagnostic use only. */
+ uint64_t filtered_fom : 12; /**< [ 31: 20](RO/H) Filtered figure of merit (FOM) from the receiver adaptation logic.
+ For diagnostic use only. */
+ uint64_t raw_fom : 12; /**< [ 19: 8](RO/H) Raw figure of merit (FOM) from the receiver adaptation logic.
+ For diagnostic use only. */
+ uint64_t fom : 8; /**< [ 7: 0](RO/H) Figure of merit (FOM) for PCIe and CGX logic used for link partner TX equalizer
+ adaptation. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t fom : 8; /**< [ 7: 0](RO/H) Figure of merit (FOM) for PCIe and CGX logic used for link partner TX equalizer
+ adaptation. For diagnostic use only. */
+ uint64_t raw_fom : 12; /**< [ 19: 8](RO/H) Raw figure of merit (FOM) from the receiver adaptation logic.
+ For diagnostic use only. */
+ uint64_t filtered_fom : 12; /**< [ 31: 20](RO/H) Filtered figure of merit (FOM) from the receiver adaptation logic.
+ For diagnostic use only. */
+ uint64_t eq_eval_ack : 1; /**< [ 32: 32](RO/H) When set indicates a receiver equalization evaluation acknowledgment. Set in
+ response to request when GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_EN] is set
+ and GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ] is set.
+
+ When [EQ_EVAL_ACK] is set, clear GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ]
+ which will in turn clear [EQ_EVAL_ACK] before issue another RX equalization
+ evaluation request via GSERN()_LANE()_TRAIN_4_BCFG[EQ_EVAL_OVRRD_REQ].
+
+ For diagnostic use only. */
+ uint64_t ffom_tc : 3; /**< [ 35: 33](R/W) Filtered figure of merit (FFOM) filter time constant. The raw figure of merit (raw FOM)
+ is filtered by a cumulative moving average (CMA) filter. [FFOM_TC] sets the time
+ constant of the CMA filter.
+ Selectable time constant options are in the range 0 to 7 which sets the divider value
+ used to scale the raw FOM input term and the filtered FOM feedback term. This provides
+ a smoothed filtered figure of merit for use by the BASE-R transmitter adaptation logic.
+
+ 0x0 = No scaling.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+
+ For diagnostic use only. */
+ uint64_t dfom_tc : 3; /**< [ 38: 36](R/W) Delta filtered figure of merit (DFOM) filter time constant. The DFOM is filtered
+ by a cumulative moving average (CMA) filter. [DFOM_TC] sets the time constant
+ of the CMA filter.
+ Selectable time constant options are in the range 0 to 7 which sets the divider value
+ used to scale the summed DFOM input term and the filtered DFOM feedback term. This
+ provides
+ a smoothed delta filtered figure of merit for use by the BASE-R transmitter adaptation
+ logic.
+
+ For diagnostic use only.
+
+ 0x0 = No scaling.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128. */
+ uint64_t use_ffom : 1; /**< [ 39: 39](R/W) Use filtered figure of merit for BASE-R transmitter adaptation logic.
+ For diagnostic use only.
+ 0 = The BASE-R transmitter adaptation logic use the unfiltered raw figure
+ of merit FOM for BASE-R Inc/Dec direction hint computation.
+ 1 = The BASE-R transmitter adaptation logic use the
+ filtered FOM for Inc/Dec direction hint computation. */
+ uint64_t inv_pre_dir : 1; /**< [ 40: 40](R/W) BASE-R training when set reverses the direction of the pre tap (C-1)
+ direction hint from the local device. */
+ uint64_t inv_main_dir : 1; /**< [ 41: 41](R/W) BASE-R training when set reverses the direction of the main tap (C0)
+ direction hint from the local device. */
+ uint64_t inv_post_dir : 1; /**< [ 42: 42](R/W) BASE-R training when set reverses the direction of the post tap (C+1)
+ direction hint from the local device. */
+ uint64_t fdltfom_lo_lmt : 8; /**< [ 50: 43](R/W) BASE-R training sets the Delta Filtered FOM lower limit for training convergence.
+ Value is a signed twos complement value. */
+ uint64_t fdltfom_hi_lmt : 8; /**< [ 58: 51](R/W) BASE-R training sets the Delta Filtered FOM upper limit for training convergence.
+ Value is a signed twos complement value. */
+ uint64_t pat_match_en : 1; /**< [ 59: 59](R/W) BASE-R training controls the receiver adaptation algorithm when [PAT_MATCH_EN] is set to
+ one
+ the training convergence pattern matching logic is enabled. The training pattern matching
+ logic tracks the link partner transmitter tap moves and sets a flag when the pattern
+ is found to be repeating in the taps moves tracking array. This is used to help
+ converge training adaptation. When [PAT_MATCH_EN] is cleared to zero the pattern matching
+ logic is disabled and not used to detect training convergence.
+
+ The GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN] field is used in conjunction with the
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] field to control the training convergence
+ pattern matching logic during BASE-R training. */
+ uint64_t pat_exit_cnt : 4; /**< [ 63: 60](R/W) BASE-R training controls the receiver adaptation algorithm training convergence
+ pattern matching logic. As BASE-R training progresses the Pre/Main/Post tap
+ direction change coefficient updates to the link partner start to dither around the
+ optimal tap values. The pattern matching logic looks for repeating patterns of
+ the tap dithering around the optimal value and is used as one metric to determine
+ that BASE-R training has converged and local device can signal receiver ready.
+
+ The [PAT_EXIT_CNT] variable sets the maximum length of the repeating pattern to search
+ for in the pattern matching array. The pattern matching array has twelve elements
+ therefore the maximum value of [PAT_EXIT_CNT] is 0xC. A value of 0x6 has been
+ found to be optimal for recognizing training tap convergence.
+
+ The GSERN()_LANE()_TRAIN_5_BCFG[PAT_EXIT_CNT] field is used in conjunction with the
+ GSERN()_LANE()_TRAIN_5_BCFG[PAT_MATCH_EN] field to control the training convergence
+ pattern matching logic during BASE-R training. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_5_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_5_bcfg bdk_gsernx_lanex_train_5_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_5_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_5_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003200ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_5_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) bdk_gsernx_lanex_train_5_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) "GSERNX_LANEX_TRAIN_5_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_5_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_6_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 6
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_6_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_6_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t frame_err : 1; /**< [ 63: 63](RO/H) Framing error. When set to a one and the
+ GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check indicates that the DOUTE and DOUTQ pipes could
+ not be aligned to produce error free eye monitor data.
+ For diagnostic use only. */
+ uint64_t no_shft_path_gd : 1; /**< [ 62: 62](RO/H) The non-shifted error path completed the framing test without errors.
+ Valid when the GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check.
+ For diagnostic use only. */
+ uint64_t shft_path_gd : 1; /**< [ 61: 61](RO/H) The shifted error path completed the framing test without errors.
+ Valid when the GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check.
+ For diagnostic use only. */
+ uint64_t en_frmoffs_chk : 1; /**< [ 60: 60](R/W) Enable framing offset check. When [EN_FRMOFFS_CHK] is set to a one the training
+ eye monitor state machine checks if framing offset is needed between the receiver
+ DOUTQ and DOUTE pipes. The framing offset check is performed when BASE-R or PCIe
+ Gen3 training is first enabled.
+ The GSERN()_LANE()_TRAIN_6_BCFG[SHFT_PATH_GD] or
+ GSERN()_LANE()_TRAIN_6_BCFG[NO_SHFT_PATH_GD] flag will be set to indicate which
+ framing offset was required. If no framing offset can be found to that produces
+ an error free eye measurement then the GSERN()_LANE()_TRAIN_6_BCFG[FRAME_ERR] flag will
+ be set.
+ For diagnostic use only. */
+ uint64_t en_rxwt_ctr : 1; /**< [ 59: 59](R/W) Enable receiver adaptation wait timer. When [EN_RXWT_CTR] is set to a one the
+ training state machine eye monitor measurement to measure the figure of merit
+ (FOM) is delayed by 10 microseconds to allow the receiver equalizer to adjust
+ to the link partner TX equalizer tap adjustments (BASE-R training and PCIe
+ training) during link training.
+ For diagnostic use only. */
+ uint64_t en_teoffs : 1; /**< [ 58: 58](R/W) Enable E-path QAC time offset adjustment. This is a diagnostic control used
+ to adjust the QAC E-path time offset. Typically the E-path QAC time offset is
+ set to 0.5UI. Setting [EN_TEOFFS] to a one enables the training state machine
+ to adjust the E-path QAC time offset by the value specified in
+ GSERN()_LANE()_TRAIN_6_BCFG[PRG_TEOFFS].
+ For diagnostic use only. */
+ uint64_t prg_teoffs : 6; /**< [ 57: 52](R/W) Programmable E-path QAC time offset. This is a diagnostic control used to set the
+ eye monitor Epath QAC offset. Use to trim the qac_eoffs offset during eye
+ monitor usage when used in BASE-R and PCIE training to measure the RX eye figure of
+ merit (FOM). Typically set to the middle of the eye, e.g. 0.5UI.
+
+ _ Target_eoffs = [PRG_TEOFFS] + (GSERN()_LANE()_RX_QAC_BSTS[QAC_EOFFS]
+ - GSERN()_LANE()_TRAIN_6_BCFG[PRG_TDELTA]).
+ _ [PRG_TEOFFS] = round(0.5UI/(1/63UI) = 6'h20.
+
+ typically but other values can be set for testing purposes.
+ For diagnostic use only.
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_6_BCFG[PRG_TDELTA], then remove above exempt attribute. */
+ uint64_t trn_tst_pat : 2; /**< [ 51: 50](R/W) Training test pattern. This is a diagnostic control used to send a sequence
+ of predetermined cost values to the BASE-R training logic to mimic training of a
+ predetermined channel between the local device and link partner. This is to
+ facilitate BASE-R testing between channels in a manufacturing test environment.
+ When training starts the predetermined set of cost values (raw figure of merit)
+ values will be provided to the BASE-R receiver and used to steer the training
+ logic and tap convergence logic.
+
+ Used only when GSERN()_LANE()_TRAIN_6_BCFG[TRN_TST_PATEN] is set to one.
+ For diagnostic use only.
+
+ 0x0 = Test training pattern with cost cache disabled 32 dB channel.
+ 0x1 = Test training pattern with cost cache enabled 32 dB channel.
+ 0x2 = Test training pattern with cost cache disabled 32 dB channel.
+ 0x3 = Test training pattern with cost cache enabled 8 dB channel. */
+ uint64_t trn_tst_paten : 1; /**< [ 49: 49](R/W) Training test pattern enable. This is a diagnostic control used to send a sequence
+ of predetermined cost values to the BASE-R training logic to mimic training of a
+ predetermined channel between the local device and link partner. This is to
+ facilitate BASE-R testing between channels in a manufacturing test environment.
+ Used in conjunction with GSERN()_LANE()_TRAIN_6_BCFG[TRN_TST_PAT].
+ For diagnostic use only. */
+ uint64_t sav_cost_cache : 1; /**< [ 48: 48](R/W) Save cost cache contents when BASE-R training is completed. This is a diagnostic
+ control used to preserve the cost cache contents after training is complete.
+ When [SAV_COST_CACHE] is set to one the cost cache is not automatically clear at the
+ completion of BASE-R training. When [SAV_COST_CACHE] is cleared to zero the cost
+ cached is cleared when training is complete so that the BASE-R training logic can
+ process a new request for BASE-R training in cases where training is restarted.
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[COST_CACHE_EN] is set to one.
+ For diagnostic use only. */
+ uint64_t ccache_hits_min : 5; /**< [ 47: 43](R/W) Cost cache hits minimum. When BASE-R training is using the cost average cache to
+ improve the gradient estimation process to get more accurate tap moves during the
+ final stages of training convergence [CCACHE_HITS_MIN] sets the minimum number of
+ cache hits that must be accumulate before the cost cache will be used.
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[COST_CACHE_EN] is set to one.
+ For diagnostic use only. */
+ uint64_t cost_cache_en : 1; /**< [ 42: 42](R/W) Cost cache enable. When set BASE-R training will use the cost average cache to
+ improve the gradient estimation process to get more accurate tap moves during
+ the final stages of training convergence. For diagnostic use only. */
+ uint64_t dffom_exit_en : 1; /**< [ 41: 41](R/W) Delta Filtered FOM Exit Enable. When set to one BASE-R training will conclude and local
+ device will signal ready if the Delta Filtered FOM is within the high and low limits
+ specified in the GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT] for the number of tap move iterations
+ specified in the GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] field.
+ For diagnostic use only. */
+ uint64_t delta_ffom_ccnt : 5; /**< [ 40: 36](R/W) Delta Filtered FOM Convergence Count. Used during BASE-R training to specify the
+ number of successive iterations required for the Delta Filtered FOM to be within
+ the high and low limits specified in the GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT] to signal that BASE-R training is converged
+ on the Local Device receiver.
+
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN] is set to a one.
+
+ For diagnostic use only. */
+ uint64_t exit_loc_main : 8; /**< [ 35: 28](RO/H) Training Exit Location Main tap value. Holds the exit location of the LP Main tap
+ at the completion of BASE-R training when training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t exit_loc_post : 8; /**< [ 27: 20](RO/H) Training Exit Location Post tap value. Holds the exit location of the LP Post tap
+ at the completion of BASE-R training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t exit_loc_pre : 8; /**< [ 19: 12](RO/H) Training Exit Location Pre tap value. Holds the exit location of the LP Pre tap
+ at the completion of BASE-R training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t exit_fom_val : 12; /**< [ 11: 0](RO/H) Pattern match logic exit value. Holds the Figure of merit (FOM) at the completion of
+ BASE-R
+ training when training is converged using the pattern matching logic.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t exit_fom_val : 12; /**< [ 11: 0](RO/H) Pattern match logic exit value. Holds the Figure of merit (FOM) at the completion of
+ BASE-R
+ training when training is converged using the pattern matching logic.
+ For diagnostic use only. */
+ uint64_t exit_loc_pre : 8; /**< [ 19: 12](RO/H) Training Exit Location Pre tap value. Holds the exit location of the LP Pre tap
+ at the completion of BASE-R training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t exit_loc_post : 8; /**< [ 27: 20](RO/H) Training Exit Location Post tap value. Holds the exit location of the LP Post tap
+ at the completion of BASE-R training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t exit_loc_main : 8; /**< [ 35: 28](RO/H) Training Exit Location Main tap value. Holds the exit location of the LP Main tap
+ at the completion of BASE-R training when training completes.
+ Number represented in offset binary notation.
+ For diagnostic use only. */
+ uint64_t delta_ffom_ccnt : 5; /**< [ 40: 36](R/W) Delta Filtered FOM Convergence Count. Used during BASE-R training to specify the
+ number of successive iterations required for the Delta Filtered FOM to be within
+ the high and low limits specified in the GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT] to signal that BASE-R training is converged
+ on the Local Device receiver.
+
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[DFFOM_EXIT_EN] is set to a one.
+
+ For diagnostic use only. */
+ uint64_t dffom_exit_en : 1; /**< [ 41: 41](R/W) Delta Filtered FOM Exit Enable. When set to one BASE-R training will conclude and local
+ device will signal ready if the Delta Filtered FOM is within the high and low limits
+ specified in the GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_HI_LMT] and
+ GSERN()_LANE()_TRAIN_5_BCFG[FDLTFOM_LO_LMT] for the number of tap move iterations
+ specified in the GSERN()_LANE()_TRAIN_6_BCFG[DELTA_FFOM_CCNT] field.
+ For diagnostic use only. */
+ uint64_t cost_cache_en : 1; /**< [ 42: 42](R/W) Cost cache enable. When set BASE-R training will use the cost average cache to
+ improve the gradient estimation process to get more accurate tap moves during
+ the final stages of training convergence. For diagnostic use only. */
+ uint64_t ccache_hits_min : 5; /**< [ 47: 43](R/W) Cost cache hits minimum. When BASE-R training is using the cost average cache to
+ improve the gradient estimation process to get more accurate tap moves during the
+ final stages of training convergence [CCACHE_HITS_MIN] sets the minimum number of
+ cache hits that must be accumulate before the cost cache will be used.
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[COST_CACHE_EN] is set to one.
+ For diagnostic use only. */
+ uint64_t sav_cost_cache : 1; /**< [ 48: 48](R/W) Save cost cache contents when BASE-R training is completed. This is a diagnostic
+ control used to preserve the cost cache contents after training is complete.
+ When [SAV_COST_CACHE] is set to one the cost cache is not automatically clear at the
+ completion of BASE-R training. When [SAV_COST_CACHE] is cleared to zero the cost
+ cached is cleared when training is complete so that the BASE-R training logic can
+ process a new request for BASE-R training in cases where training is restarted.
+ Used when GSERN()_LANE()_TRAIN_6_BCFG[COST_CACHE_EN] is set to one.
+ For diagnostic use only. */
+ uint64_t trn_tst_paten : 1; /**< [ 49: 49](R/W) Training test pattern enable. This is a diagnostic control used to send a sequence
+ of predetermined cost values to the BASE-R training logic to mimic training of a
+ predetermined channel between the local device and link partner. This is to
+ facilitate BASE-R testing between channels in a manufacturing test environment.
+ Used in conjunction with GSERN()_LANE()_TRAIN_6_BCFG[TRN_TST_PAT].
+ For diagnostic use only. */
+ uint64_t trn_tst_pat : 2; /**< [ 51: 50](R/W) Training test pattern. This is a diagnostic control used to send a sequence
+ of predetermined cost values to the BASE-R training logic to mimic training of a
+ predetermined channel between the local device and link partner. This is to
+ facilitate BASE-R testing between channels in a manufacturing test environment.
+ When training starts the predetermined set of cost values (raw figure of merit)
+ values will be provided to the BASE-R receiver and used to steer the training
+ logic and tap convergence logic.
+
+ Used only when GSERN()_LANE()_TRAIN_6_BCFG[TRN_TST_PATEN] is set to one.
+ For diagnostic use only.
+
+ 0x0 = Test training pattern with cost cache disabled 32 dB channel.
+ 0x1 = Test training pattern with cost cache enabled 32 dB channel.
+ 0x2 = Test training pattern with cost cache disabled 32 dB channel.
+ 0x3 = Test training pattern with cost cache enabled 8 dB channel. */
+ uint64_t prg_teoffs : 6; /**< [ 57: 52](R/W) Programmable E-path QAC time offset. This is a diagnostic control used to set the
+ eye monitor Epath QAC offset. Use to trim the qac_eoffs offset during eye
+ monitor usage when used in BASE-R and PCIE training to measure the RX eye figure of
+ merit (FOM). Typically set to the middle of the eye, e.g. 0.5UI.
+
+ _ Target_eoffs = [PRG_TEOFFS] + (GSERN()_LANE()_RX_QAC_BSTS[QAC_EOFFS]
+ - GSERN()_LANE()_TRAIN_6_BCFG[PRG_TDELTA]).
+ _ [PRG_TEOFFS] = round(0.5UI/(1/63UI) = 6'h20.
+
+ typically but other values can be set for testing purposes.
+ For diagnostic use only.
+
+ Internal:
+ FIXME no such field GSERN()_LANE()_TRAIN_6_BCFG[PRG_TDELTA], then remove above exempt attribute. */
+ uint64_t en_teoffs : 1; /**< [ 58: 58](R/W) Enable E-path QAC time offset adjustment. This is a diagnostic control used
+ to adjust the QAC E-path time offset. Typically the E-path QAC time offset is
+ set to 0.5UI. Setting [EN_TEOFFS] to a one enables the training state machine
+ to adjust the E-path QAC time offset by the value specified in
+ GSERN()_LANE()_TRAIN_6_BCFG[PRG_TEOFFS].
+ For diagnostic use only. */
+ uint64_t en_rxwt_ctr : 1; /**< [ 59: 59](R/W) Enable receiver adaptation wait timer. When [EN_RXWT_CTR] is set to a one the
+ training state machine eye monitor measurement to measure the figure of merit
+ (FOM) is delayed by 10 microseconds to allow the receiver equalizer to adjust
+ to the link partner TX equalizer tap adjustments (BASE-R training and PCIe
+ training) during link training.
+ For diagnostic use only. */
+ uint64_t en_frmoffs_chk : 1; /**< [ 60: 60](R/W) Enable framing offset check. When [EN_FRMOFFS_CHK] is set to a one the training
+ eye monitor state machine checks if framing offset is needed between the receiver
+ DOUTQ and DOUTE pipes. The framing offset check is performed when BASE-R or PCIe
+ Gen3 training is first enabled.
+ The GSERN()_LANE()_TRAIN_6_BCFG[SHFT_PATH_GD] or
+ GSERN()_LANE()_TRAIN_6_BCFG[NO_SHFT_PATH_GD] flag will be set to indicate which
+ framing offset was required. If no framing offset can be found to that produces
+ an error free eye measurement then the GSERN()_LANE()_TRAIN_6_BCFG[FRAME_ERR] flag will
+ be set.
+ For diagnostic use only. */
+ uint64_t shft_path_gd : 1; /**< [ 61: 61](RO/H) The shifted error path completed the framing test without errors.
+ Valid when the GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check.
+ For diagnostic use only. */
+ uint64_t no_shft_path_gd : 1; /**< [ 62: 62](RO/H) The non-shifted error path completed the framing test without errors.
+ Valid when the GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check.
+ For diagnostic use only. */
+ uint64_t frame_err : 1; /**< [ 63: 63](RO/H) Framing error. When set to a one and the
+ GSERN()_LANE()_TRAIN_6_BCFG[EN_FRMOFFS_CHK] bit is set
+ to a one and the training state machine has completed the framing
+ alignment check indicates that the DOUTE and DOUTQ pipes could
+ not be aligned to produce error free eye monitor data.
+ For diagnostic use only. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_6_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_6_bcfg bdk_gsernx_lanex_train_6_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_6_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_6_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003210ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_6_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) bdk_gsernx_lanex_train_6_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) "GSERNX_LANEX_TRAIN_6_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_6_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_7_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 7
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_7_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_7_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pcie_fasteq_val : 5; /**< [ 63: 59](R/W) Reserved.
+ Internal:
+ PCIe fast equalization delay value for simulation.
+ Used in conjunction with GSERN()_LANE()_TRAIN_7_BCFG[PCIE_FASTEQ]
+ When testing PCIe Gen3/Gen4 equalization in simulation.
+ The default value of 0x6 programs the PCIe equalization FOM and
+ link evaluation direction change request acknowledgement handshake
+ to 1.6 microseconds to accelerate simulation modeling of the PCIe
+ Gen3/Gen4 equalization phases 2 and 3. .
+ For simulation use only. */
+ uint64_t pcie_fasteq : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ PCIe fast equalization mode for simulation.
+ When testing PCIe Gen3/Gen4 equalization in simulation setting [PCIE_FASTEQ]
+ to 1 will reduce the PCIe equalization response to 1.6 microseconds.
+ Can be used in conjunction with GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN].
+ If the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN] is not used the raw FOM
+ value returned will be zero. Further the [PCIE_FASTEQ] is set the link evaluation
+ feedback direction change for C(-1), C(0), and C(+1) will indicate no change.
+ For simulation use only. */
+ uint64_t pcie_dir_eq_done : 1; /**< [ 57: 57](RO/H) PCIe direction change equalization done flag. During PCIe Gen3/Gen4
+ direction change equalization reflects the state of the direction
+ equalization done flag. When set to 1 indicates that the current
+ direction change equalization tap adjustment sequence is complete.
+ Reset automatically by hardware when PCIe Gen3/Gen4 equalization is
+ completed. */
+ uint64_t pcie_term_adtmout : 1; /**< [ 56: 56](R/W) PCIe terminate direction change feedback equalization when reached the
+ the equalization timeout specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode the equalization timeout period is controlled by
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST].
+ When [PCIE_TERM_ADTMOUT] sets when the equalization timeout timer expires
+ the equalization logic will signal equalization complete on the next
+ equalization request from the PCIe controller.
+ The training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_adtmout_fast : 1; /**< [ 55: 55](R/W) Reserved.
+ Internal:
+ For simulation use only. When set accelerates the PCIe Gen3/Gen4 direction change
+ feedback equalization timeout timer period. When set shortens the direction change
+ equalization time-out timer.
+ See the description for
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL].
+ For diagnostic use only. */
+ uint64_t pcie_adtmout_disable : 1; /**< [ 54: 54](R/W) PCIe Gen3/Gen4 direction change feedback equalization timeout timer disable.
+ When [PCIE_ADTMOUT_DISABLE] is set to 1 the timeout timer that runs during
+ PCIe Gen3/Gen4 direction change feecback equalization is disabled. When
+ [PCIE_ADTMOUT_DISABLE] is cleared to 0 the equalization timeout timer is enabled.
+ The equalization timeout period is controlled by
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST].
+ For diagnostic use only. */
+ uint64_t pcie_adtmout_sel : 2; /**< [ 53: 52](R/W) Selects the timeout value for the PCIe Gen3/Gen4 direction change feedback equalization.
+ This time-out timer value is only valid if
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_DISABLE]
+ is cleared to 0.
+
+ When GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST] is cleared to 0 the link training
+ time-out timer value is set by [PCIE_ADTMOUT_SEL] to the values shown.
+ 0x0 = 5.24 milliseconds.
+ 0x1 = 10.49 milliseconds.
+ 0x2 = 13.1 milliseconds.
+ 0x3 = 15.73 milliseconds.
+
+ When GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST] is set to 1 the link training
+ time-out timer value is set by [PCIE_ADTMOUT_SEL] to the values shown.
+ 0x0 = 81.92 microseconds.
+ 0x1 = 163.84 microseconds.
+ 0x2 = 327.68 microseconds.
+ 0x3 = 655.36 microseconds. */
+ uint64_t pcie_term_max_mvs : 1; /**< [ 51: 51](R/W) PCIe terminate direction change feedback equalization when reached the
+ the maximum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAX_MOVES].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MAX_MOVES] sets the maximum number of tap moves to make
+ before signaling equalization complete. When [PCIE_TERM_MAX_MVS] is set
+ to 1 the training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_term_min_mvs : 1; /**< [ 50: 50](R/W) PCIe terminate direction change feedback equalization when exceeded the
+ the minimum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MIN_MOVES].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the minimum number of tap moves to make
+ before signaling equalization complete. When [PCIE_TERM_MIN_MVS] is set
+ to 1 the training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_max_moves : 8; /**< [ 49: 42](R/W) PCIe maximum tap moves. During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the maximum number of tap moves to make
+ before signaling equalization complete. */
+ uint64_t pcie_min_moves : 8; /**< [ 41: 34](R/W) PCIe minimum tap moves. During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the minimum number of tap moves to make
+ before signaling equalization complete. */
+ uint64_t pcie_rev_dir_hints : 1; /**< [ 33: 33](R/W) When set, reverses the direction of the
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_POST_DIR],
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAIN_DIR], and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_PRE_DIR]
+ Tx tap direction feedback hints. For diagnostic use only. */
+ uint64_t pcie_inv_post_dir : 1; /**< [ 32: 32](R/W) PCIe direction change equalization invert post tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_POST_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_inv_main_dir : 1; /**< [ 31: 31](R/W) PCIe direction change equalization invert main tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAIN_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_inv_pre_dir : 1; /**< [ 30: 30](R/W) PCIe direction change equalization invert pre tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_PRE_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_post_dir : 2; /**< [ 29: 28](RO/H) PCIe direction change equalization post (C+1) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_POST_DIR] field reflects the value of the post (C+1) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved. */
+ uint64_t pcie_main_dir : 2; /**< [ 27: 26](RO/H) PCIe direction change equalization main (C0) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_MAIN_DIR] field reflects the value of the main (C0) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved.
+
+ The main direction will always be 0x0 no change. The PCIe
+ MAC computes the Main (C0) tap direction change. */
+ uint64_t pcie_pre_dir : 2; /**< [ 25: 24](RO/H) PCIe direction change equalization pre (C-1) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_PRE_DIR] field reflects the value of the pre (C-1) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved. */
+ uint64_t pcie_tst_array_rdy : 1; /**< [ 23: 23](RO/H) PCIe test FOM array ready. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL].
+
+ Internal:
+ PCIe test FOM array ready. For verification diagnostic use only.
+ All entries of the PCIe test FOM array are cleared following release
+ of reset. When [PCIE_TST_ARRAY_RDY] is set to 1 the PCIe test FOM
+ array is ready and can be used for PCIe training testing. Do not
+ read or write the PCIe test FOM array while [PCIE_TST_ARRAY_RDY] is
+ cleared to 0. When the GSER QLM is released from reset the
+ [PCIE_TST_ARRAY_RDY] will transition from 0 to 1 after 128 service
+ clock cycles. */
+ uint64_t pcie_tst_fom_mode : 1; /**< [ 22: 22](R/W) PCIe test FOM array mode. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL].
+
+ Internal:
+ PCIe test FOM array mode. For verification diagnostic use only.
+ 0x0 = Test FOM array is used to load and play back test FOMs for PCIe link
+ training.
+ 0x1 = Test FOM array is used to capture raw FOMs during link training for
+ diagnostic verification. */
+ uint64_t pcie_tst_fom_en : 1; /**< [ 21: 21](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_rd : 1; /**< [ 20: 20](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_ld : 1; /**< [ 19: 19](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_addr : 7; /**< [ 18: 12](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_val : 12; /**< [ 11: 0](R/W/H) PCIe test figure of merit array enable. For verification diagnostic use only.
+ Internal:
+ Used to load the test raw figure of merit (raw FOM) array with test
+ FOM values to play back during PCIe Gen3/Gen4 training to check the
+ training preset selection logic and PCIE training logic.
+ An 11-bit by 32 word array is used to hold the test raw FOM values.
+ The array FOM values are initialized by writing the
+ [PCIE_TST_FOM_ADDR] field with a value
+ from 0x0 to 0x7F to index a location in the array, then writing the
+ [PCIE_TST_FOM_VAL] with a 12-bit quantity representing the raw
+ FOM value to be written to the array location, then writing the
+ [PCIE_TST_FOM_LD] bit to 1 to write
+ the raw FOM 12-bit value to the array, and the writing the
+ [PCIE_TST_FOM_LD] bit to 0 to complete
+ array write operation.
+ Before writing the array software should poll the
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_ARRAY_RDY] and wait for
+ [PCIE_TST_ARRAY_RDY] field to be set to 1 before reading or writing
+ the test fom array. Also write
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_MODE] to 0.
+
+ Each array location is written with the desired raw FOM value following
+ the thse steps.
+
+ After all array locations are written, the array locations can be read
+ back. Write the [PCIE_TST_FOM_ADDR] to point
+ to the desired array location, next write
+ [PCIE_TST_FOM_RD] to 1 to enable read back mode.
+ Read the [PCIE_TST_FOM_VAL] field to readback the 12-bit test raw FOM
+ value from the array. Finally write
+ [PCIE_TST_FOM_RD] to 0 to disable read back mode.
+
+ To enable the PCI Express Test FOM array during PCIe Gen3/Gen4 link training
+ write [PCIE_TST_FOM_EN] to 1. Note prior to
+ writing [PCIE_TST_FOM_EN] to 1, ensure that
+ [PCIE_TST_FOM_RD] is cleared to 0 and
+ [PCIE_TST_FOM_LD] is cleared to 0.
+
+ During PCIe Gen3/Gen4 link training each time a Preset receiver evaluation
+ request is received the training logic will return the 12-bit raw FOM
+ from the current test FOM array location to the PIPE PCS logic and then
+ move to the next test FOM array location. The test FOM array always
+ starts at location 0x0 and increments to the next location in the FOM
+ array after each preset evaluation.
+
+ Related Registers
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_ADDR]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_LD]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_RD]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_MODE]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_ARRAY_RDY] */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_tst_fom_val : 12; /**< [ 11: 0](R/W/H) PCIe test figure of merit array enable. For verification diagnostic use only.
+ Internal:
+ Used to load the test raw figure of merit (raw FOM) array with test
+ FOM values to play back during PCIe Gen3/Gen4 training to check the
+ training preset selection logic and PCIE training logic.
+ An 11-bit by 32 word array is used to hold the test raw FOM values.
+ The array FOM values are initialized by writing the
+ [PCIE_TST_FOM_ADDR] field with a value
+ from 0x0 to 0x7F to index a location in the array, then writing the
+ [PCIE_TST_FOM_VAL] with a 12-bit quantity representing the raw
+ FOM value to be written to the array location, then writing the
+ [PCIE_TST_FOM_LD] bit to 1 to write
+ the raw FOM 12-bit value to the array, and the writing the
+ [PCIE_TST_FOM_LD] bit to 0 to complete
+ array write operation.
+ Before writing the array software should poll the
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_ARRAY_RDY] and wait for
+ [PCIE_TST_ARRAY_RDY] field to be set to 1 before reading or writing
+ the test fom array. Also write
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_MODE] to 0.
+
+ Each array location is written with the desired raw FOM value following
+ the thse steps.
+
+ After all array locations are written, the array locations can be read
+ back. Write the [PCIE_TST_FOM_ADDR] to point
+ to the desired array location, next write
+ [PCIE_TST_FOM_RD] to 1 to enable read back mode.
+ Read the [PCIE_TST_FOM_VAL] field to readback the 12-bit test raw FOM
+ value from the array. Finally write
+ [PCIE_TST_FOM_RD] to 0 to disable read back mode.
+
+ To enable the PCI Express Test FOM array during PCIe Gen3/Gen4 link training
+ write [PCIE_TST_FOM_EN] to 1. Note prior to
+ writing [PCIE_TST_FOM_EN] to 1, ensure that
+ [PCIE_TST_FOM_RD] is cleared to 0 and
+ [PCIE_TST_FOM_LD] is cleared to 0.
+
+ During PCIe Gen3/Gen4 link training each time a Preset receiver evaluation
+ request is received the training logic will return the 12-bit raw FOM
+ from the current test FOM array location to the PIPE PCS logic and then
+ move to the next test FOM array location. The test FOM array always
+ starts at location 0x0 and increments to the next location in the FOM
+ array after each preset evaluation.
+
+ Related Registers
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_ADDR]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_LD]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_RD]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_MODE]
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_ARRAY_RDY] */
+ uint64_t pcie_tst_fom_addr : 7; /**< [ 18: 12](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_ld : 1; /**< [ 19: 19](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_rd : 1; /**< [ 20: 20](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_en : 1; /**< [ 21: 21](R/W) PCIe test figure of merit array enable. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL]. */
+ uint64_t pcie_tst_fom_mode : 1; /**< [ 22: 22](R/W) PCIe test FOM array mode. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL].
+
+ Internal:
+ PCIe test FOM array mode. For verification diagnostic use only.
+ 0x0 = Test FOM array is used to load and play back test FOMs for PCIe link
+ training.
+ 0x1 = Test FOM array is used to capture raw FOMs during link training for
+ diagnostic verification. */
+ uint64_t pcie_tst_array_rdy : 1; /**< [ 23: 23](RO/H) PCIe test FOM array ready. For verification diagnostic use only.
+ See [PCIE_TST_FOM_VAL].
+
+ Internal:
+ PCIe test FOM array ready. For verification diagnostic use only.
+ All entries of the PCIe test FOM array are cleared following release
+ of reset. When [PCIE_TST_ARRAY_RDY] is set to 1 the PCIe test FOM
+ array is ready and can be used for PCIe training testing. Do not
+ read or write the PCIe test FOM array while [PCIE_TST_ARRAY_RDY] is
+ cleared to 0. When the GSER QLM is released from reset the
+ [PCIE_TST_ARRAY_RDY] will transition from 0 to 1 after 128 service
+ clock cycles. */
+ uint64_t pcie_pre_dir : 2; /**< [ 25: 24](RO/H) PCIe direction change equalization pre (C-1) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_PRE_DIR] field reflects the value of the pre (C-1) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved. */
+ uint64_t pcie_main_dir : 2; /**< [ 27: 26](RO/H) PCIe direction change equalization main (C0) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_MAIN_DIR] field reflects the value of the main (C0) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved.
+
+ The main direction will always be 0x0 no change. The PCIe
+ MAC computes the Main (C0) tap direction change. */
+ uint64_t pcie_post_dir : 2; /**< [ 29: 28](RO/H) PCIe direction change equalization post (C+1) tap direction.
+ During PCIe Gen3/Gen4 link training using direction change equalization
+ the [PCIE_POST_DIR] field reflects the value of the post (C+1) tap
+ direction for the link evaluation direction feedback.
+ 0x0 = No change.
+ 0x1 = Increment feedback for each coefficient.
+ 0x2 = Decrement feedback for each coefficient.
+ 0x3 = Reserved. */
+ uint64_t pcie_inv_pre_dir : 1; /**< [ 30: 30](R/W) PCIe direction change equalization invert pre tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_PRE_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_inv_main_dir : 1; /**< [ 31: 31](R/W) PCIe direction change equalization invert main tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAIN_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_inv_post_dir : 1; /**< [ 32: 32](R/W) PCIe direction change equalization invert post tap direction.
+ When set reverses the Increment/Decrement direction
+ of the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_POST_DIR]
+ Tx tap direction feedback. For diagnostic use only. */
+ uint64_t pcie_rev_dir_hints : 1; /**< [ 33: 33](R/W) When set, reverses the direction of the
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_POST_DIR],
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAIN_DIR], and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_PRE_DIR]
+ Tx tap direction feedback hints. For diagnostic use only. */
+ uint64_t pcie_min_moves : 8; /**< [ 41: 34](R/W) PCIe minimum tap moves. During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the minimum number of tap moves to make
+ before signaling equalization complete. */
+ uint64_t pcie_max_moves : 8; /**< [ 49: 42](R/W) PCIe maximum tap moves. During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the maximum number of tap moves to make
+ before signaling equalization complete. */
+ uint64_t pcie_term_min_mvs : 1; /**< [ 50: 50](R/W) PCIe terminate direction change feedback equalization when exceeded the
+ the minimum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MIN_MOVES].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MIN_MOVES] sets the minimum number of tap moves to make
+ before signaling equalization complete. When [PCIE_TERM_MIN_MVS] is set
+ to 1 the training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_term_max_mvs : 1; /**< [ 51: 51](R/W) PCIe terminate direction change feedback equalization when reached the
+ the maximum number of tap moves specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_MAX_MOVES].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode [PCIE_MAX_MOVES] sets the maximum number of tap moves to make
+ before signaling equalization complete. When [PCIE_TERM_MAX_MVS] is set
+ to 1 the training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_adtmout_sel : 2; /**< [ 53: 52](R/W) Selects the timeout value for the PCIe Gen3/Gen4 direction change feedback equalization.
+ This time-out timer value is only valid if
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_DISABLE]
+ is cleared to 0.
+
+ When GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST] is cleared to 0 the link training
+ time-out timer value is set by [PCIE_ADTMOUT_SEL] to the values shown.
+ 0x0 = 5.24 milliseconds.
+ 0x1 = 10.49 milliseconds.
+ 0x2 = 13.1 milliseconds.
+ 0x3 = 15.73 milliseconds.
+
+ When GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST] is set to 1 the link training
+ time-out timer value is set by [PCIE_ADTMOUT_SEL] to the values shown.
+ 0x0 = 81.92 microseconds.
+ 0x1 = 163.84 microseconds.
+ 0x2 = 327.68 microseconds.
+ 0x3 = 655.36 microseconds. */
+ uint64_t pcie_adtmout_disable : 1; /**< [ 54: 54](R/W) PCIe Gen3/Gen4 direction change feedback equalization timeout timer disable.
+ When [PCIE_ADTMOUT_DISABLE] is set to 1 the timeout timer that runs during
+ PCIe Gen3/Gen4 direction change feecback equalization is disabled. When
+ [PCIE_ADTMOUT_DISABLE] is cleared to 0 the equalization timeout timer is enabled.
+ The equalization timeout period is controlled by
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST].
+ For diagnostic use only. */
+ uint64_t pcie_adtmout_fast : 1; /**< [ 55: 55](R/W) Reserved.
+ Internal:
+ For simulation use only. When set accelerates the PCIe Gen3/Gen4 direction change
+ feedback equalization timeout timer period. When set shortens the direction change
+ equalization time-out timer.
+ See the description for
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL].
+ For diagnostic use only. */
+ uint64_t pcie_term_adtmout : 1; /**< [ 56: 56](R/W) PCIe terminate direction change feedback equalization when reached the
+ the equalization timeout specified in
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL].
+ During PCIe Gen3/Gen4 equalization direction change
+ feedback mode the equalization timeout period is controlled by
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_SEL] and
+ GSERN()_LANE()_TRAIN_7_BCFG[PCIE_ADTMOUT_FAST].
+ When [PCIE_TERM_ADTMOUT] sets when the equalization timeout timer expires
+ the equalization logic will signal equalization complete on the next
+ equalization request from the PCIe controller.
+ The training logic will signal equalization complete by returning
+ C(-1) TAP direction change set to No Change and C(+1) TAP direction change
+ also set to No Change. This will signal the termination of
+ PCIe Gen3/Gen4 equalization direction change feedback mode. */
+ uint64_t pcie_dir_eq_done : 1; /**< [ 57: 57](RO/H) PCIe direction change equalization done flag. During PCIe Gen3/Gen4
+ direction change equalization reflects the state of the direction
+ equalization done flag. When set to 1 indicates that the current
+ direction change equalization tap adjustment sequence is complete.
+ Reset automatically by hardware when PCIe Gen3/Gen4 equalization is
+ completed. */
+ uint64_t pcie_fasteq : 1; /**< [ 58: 58](R/W) Reserved.
+ Internal:
+ PCIe fast equalization mode for simulation.
+ When testing PCIe Gen3/Gen4 equalization in simulation setting [PCIE_FASTEQ]
+ to 1 will reduce the PCIe equalization response to 1.6 microseconds.
+ Can be used in conjunction with GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN].
+ If the GSERN()_LANE()_TRAIN_7_BCFG[PCIE_TST_FOM_EN] is not used the raw FOM
+ value returned will be zero. Further the [PCIE_FASTEQ] is set the link evaluation
+ feedback direction change for C(-1), C(0), and C(+1) will indicate no change.
+ For simulation use only. */
+ uint64_t pcie_fasteq_val : 5; /**< [ 63: 59](R/W) Reserved.
+ Internal:
+ PCIe fast equalization delay value for simulation.
+ Used in conjunction with GSERN()_LANE()_TRAIN_7_BCFG[PCIE_FASTEQ]
+ When testing PCIe Gen3/Gen4 equalization in simulation.
+ The default value of 0x6 programs the PCIe equalization FOM and
+ link evaluation direction change request acknowledgement handshake
+ to 1.6 microseconds to accelerate simulation modeling of the PCIe
+ Gen3/Gen4 equalization phases 2 and 3. .
+ For simulation use only. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_7_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_7_bcfg bdk_gsernx_lanex_train_7_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_7_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_7_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003220ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_7_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) bdk_gsernx_lanex_train_7_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) "GSERNX_LANEX_TRAIN_7_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_7_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_8_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 8
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_8_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_8_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_61_63 : 3;
+ uint64_t pcie_l_c1_e_adj_sgn : 1; /**< [ 60: 60](R/W) Sets the lower C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the lower C1_E sampler below the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the lower C1_E sampler above the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_L_C1_E_ADJ_STEP] during PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_u_c1_e_adj_sgn : 1; /**< [ 59: 59](R/W) Sets the upper C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the upper C1_E sampler above the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the upper C1_E sampler below the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_U_C1_E_ADJ_STEP] for PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_u_c1_e_adj_step : 5; /**< [ 58: 54](R/W) Sets the C1 E sampler voltage level during eye monitor sampling.
+ Typically [PCIE_U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15 mV above the C1 Q sampler voltage level.
+ Steps are in units of 5.08 mV per step.
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_U_C1_E_ADJ_SGN] for PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_adapt_axis : 3; /**< [ 53: 51](R/W) Sets the number or adaptation axes to use during receiver adaptation.
+ Typically set to 0x7 to enable all three adaptation axes. One-hot encoded.
+
+ Set to 0x1 to only enable axis 1 and disable axis 2 and axis 3.
+ Set to 0x3 to enable axis 1 and axis 2 but disable axis 3.
+ Set to 0x7 to enable axis 1, 2 and 3. (default.)
+ For diagnostic use only. */
+ uint64_t pcie_l_c1_e_adj_step : 5; /**< [ 50: 46](R/W) Sets the lower C1 E sampler voltage level during eye monitor sampling.
+ Typically set to 0x2 to position the eye monitor
+ error sampler at ~15mV below the C1 Q sampler voltage level.
+ Steps are in units of 5.08 mV per step.
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_L_C1_E_ADJ_SGN] during PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_ecnt_div_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [PCIE_ECNT_DIV_EN] is set.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+ 0x8 = Divide by 256.
+ 0x9 = Divide by 512.
+ 0xA = Divide by 1024.
+ 0xB = Divide by 2048.
+ 0xC = Divide by 4096.
+ 0xD = Divide by 8192.
+ 0xE = Divide by 16384.
+ 0xF = Divide by 32768. */
+ uint64_t pcie_ecnt_div_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ For diagnostic use only. */
+ uint64_t pcie_eye_cnt_en : 1; /**< [ 40: 40](R/W) Eye cycle count enable. When set the number of eye monitor
+ cycles to sample and count during the PCIe Gen3/Gen4 training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_VAL].
+ For diagnostic use only. */
+ uint64_t pcie_eye_cnt_val : 40; /**< [ 39: 0](R/W) PCIe eye count value Preset FOM. Sets the number of eye monitor cycles to sample/count
+ during the PCIe training figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_EN]=1.
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_eye_cnt_val : 40; /**< [ 39: 0](R/W) PCIe eye count value Preset FOM. Sets the number of eye monitor cycles to sample/count
+ during the PCIe training figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_EN]=1.
+ For diagnostic use only. */
+ uint64_t pcie_eye_cnt_en : 1; /**< [ 40: 40](R/W) Eye cycle count enable. When set the number of eye monitor
+ cycles to sample and count during the PCIe Gen3/Gen4 training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_VAL].
+ For diagnostic use only. */
+ uint64_t pcie_ecnt_div_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ For diagnostic use only. */
+ uint64_t pcie_ecnt_div_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [PCIE_ECNT_DIV_EN] is set.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+ 0x8 = Divide by 256.
+ 0x9 = Divide by 512.
+ 0xA = Divide by 1024.
+ 0xB = Divide by 2048.
+ 0xC = Divide by 4096.
+ 0xD = Divide by 8192.
+ 0xE = Divide by 16384.
+ 0xF = Divide by 32768. */
+ uint64_t pcie_l_c1_e_adj_step : 5; /**< [ 50: 46](R/W) Sets the lower C1 E sampler voltage level during eye monitor sampling.
+ Typically set to 0x2 to position the eye monitor
+ error sampler at ~15mV below the C1 Q sampler voltage level.
+ Steps are in units of 5.08 mV per step.
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_L_C1_E_ADJ_SGN] during PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_adapt_axis : 3; /**< [ 53: 51](R/W) Sets the number or adaptation axes to use during receiver adaptation.
+ Typically set to 0x7 to enable all three adaptation axes. One-hot encoded.
+
+ Set to 0x1 to only enable axis 1 and disable axis 2 and axis 3.
+ Set to 0x3 to enable axis 1 and axis 2 but disable axis 3.
+ Set to 0x7 to enable axis 1, 2 and 3. (default.)
+ For diagnostic use only. */
+ uint64_t pcie_u_c1_e_adj_step : 5; /**< [ 58: 54](R/W) Sets the C1 E sampler voltage level during eye monitor sampling.
+ Typically [PCIE_U_C1_E_ADJ_STEP] is set to 0x3 to position the eye monitor
+ error sampler at ~15 mV above the C1 Q sampler voltage level.
+ Steps are in units of 5.08 mV per step.
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_U_C1_E_ADJ_SGN] for PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_u_c1_e_adj_sgn : 1; /**< [ 59: 59](R/W) Sets the upper C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the upper C1_E sampler above the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the upper C1_E sampler below the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_U_C1_E_ADJ_STEP] for PCIE training.
+ For diagnostic use only. */
+ uint64_t pcie_l_c1_e_adj_sgn : 1; /**< [ 60: 60](R/W) Sets the lower C1 E sampler adjustment voltage offset sign.
+ 0 = The offset sign is positive
+ positioning the lower C1_E sampler below the eye C1_Q sampler.
+ 1 = The offset sign is negative
+ positioning the lower C1_E sampler above the eye C1_Q sampler.
+
+ Used in conjunction with
+ GSERN()_LANE()_TRAIN_8_BCFG[PCIE_L_C1_E_ADJ_STEP] during PCIE training.
+ For diagnostic use only. */
+ uint64_t reserved_61_63 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_8_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_8_bcfg bdk_gsernx_lanex_train_8_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_8_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_8_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003230ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_8_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) bdk_gsernx_lanex_train_8_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) "GSERNX_LANEX_TRAIN_8_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_8_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_train_9_bcfg
+ *
+ * GSER Lane Training Base Configuration Register 9
+ * This register controls settings for lane training.
+ */
+union bdk_gsernx_lanex_train_9_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_train_9_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_59_63 : 5;
+ uint64_t pcie_dir_fom_en : 1; /**< [ 58: 58](R/W) Enable PCIe Gen3 and Gen4 equalization direction change minimum FOM for termination.
+ During PCIe Gen3 and Gen4 equalization using the direction change method
+ the GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_THRS] field sets the minimum threshold
+ for the raw 12-bit FOM value that when exceeded will terminate direction change
+ equalization.
+ [PCIE_DIR_FOM_EN] must be set to 1 to allow the direction change state machine
+ to terminate equalization when the measured raw FOM has exceeded the value in the
+ GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_THRS] field.
+ For diagnostic use only. */
+ uint64_t pcie_dir_fom_thrs : 12; /**< [ 57: 46](R/W) PCIe Gen3 and Gen4 equalization direction change FOM threshold for termination.
+ During PCIe Gen3 and Gen4 equalization using the direction change method
+ [PCIE_DIR_FOM_THRS] sets the minimum threshold for the raw 12-bit FOM
+ value that when exceeded will terminate direction change equalization.
+ The GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_EN] field must be set to 1 to
+ allow the direction change state machine to terminate equalization when the
+ raw FOM has exceeded the value in [PCIE_DIR_FOM_THRS].
+ For diagnostic use only. */
+ uint64_t pcie_dir_ecnt_div_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [PCIE_DIR_ECNT_DIV_EN] is set.
+ Used when direction change equalization is enabled.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+ 0x8 = Divide by 256.
+ 0x9 = Divide by 512.
+ 0xA = Divide by 1024.
+ 0xB = Divide by 2048.
+ 0xC = Divide by 4096.
+ 0xD = Divide by 8192.
+ 0xE = Divide by 16384.
+ 0xF = Divide by 32768. */
+ uint64_t pcie_dir_ecnt_div_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ Used when direction change equalization is enabled.
+ For diagnostic use only. */
+ uint64_t pcie_dir_eye_cnt_en : 1; /**< [ 40: 40](R/W) Eye cycle count enable. When set the number of eye monitor
+ cycles to sample and count during the PCIe Gen3/Gen4 training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_EYE_CNT_VAL].
+ Used when direction change equalization is enabled.
+ For diagnostic use only. */
+ uint64_t pcie_dir_eye_cnt_val : 40; /**< [ 39: 0](R/W) PCIe eye count value in direction change mode. Sets the number of eye monitor cycles to
+ sample/count during the PCIe training figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_EYE_CNT_EN]=1.
+ See GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_VAL]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pcie_dir_eye_cnt_val : 40; /**< [ 39: 0](R/W) PCIe eye count value in direction change mode. Sets the number of eye monitor cycles to
+ sample/count during the PCIe training figure of merit (FOM) calculation when
+ GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_EYE_CNT_EN]=1.
+ See GSERN()_LANE()_TRAIN_8_BCFG[PCIE_EYE_CNT_VAL]. */
+ uint64_t pcie_dir_eye_cnt_en : 1; /**< [ 40: 40](R/W) Eye cycle count enable. When set the number of eye monitor
+ cycles to sample and count during the PCIe Gen3/Gen4 training
+ figure of merit (FOM) calculation
+ is controlled by GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_EYE_CNT_VAL].
+ Used when direction change equalization is enabled.
+ For diagnostic use only. */
+ uint64_t pcie_dir_ecnt_div_en : 1; /**< [ 41: 41](R/W) Error counter divider override enable.
+ Used when direction change equalization is enabled.
+ For diagnostic use only. */
+ uint64_t pcie_dir_ecnt_div_val : 4; /**< [ 45: 42](R/W) Error counter divider override value. See table below.
+ Divider is active when the [PCIE_DIR_ECNT_DIV_EN] is set.
+ Used when direction change equalization is enabled.
+ For diagnostic use only.
+
+ 0x0 = No divider.
+ 0x1 = Divide by 2.
+ 0x2 = Divide by 4.
+ 0x3 = Divide by 8.
+ 0x4 = Divide by 16.
+ 0x5 = Divide by 32.
+ 0x6 = Divide by 64.
+ 0x7 = Divide by 128.
+ 0x8 = Divide by 256.
+ 0x9 = Divide by 512.
+ 0xA = Divide by 1024.
+ 0xB = Divide by 2048.
+ 0xC = Divide by 4096.
+ 0xD = Divide by 8192.
+ 0xE = Divide by 16384.
+ 0xF = Divide by 32768. */
+ uint64_t pcie_dir_fom_thrs : 12; /**< [ 57: 46](R/W) PCIe Gen3 and Gen4 equalization direction change FOM threshold for termination.
+ During PCIe Gen3 and Gen4 equalization using the direction change method
+ [PCIE_DIR_FOM_THRS] sets the minimum threshold for the raw 12-bit FOM
+ value that when exceeded will terminate direction change equalization.
+ The GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_EN] field must be set to 1 to
+ allow the direction change state machine to terminate equalization when the
+ raw FOM has exceeded the value in [PCIE_DIR_FOM_THRS].
+ For diagnostic use only. */
+ uint64_t pcie_dir_fom_en : 1; /**< [ 58: 58](R/W) Enable PCIe Gen3 and Gen4 equalization direction change minimum FOM for termination.
+ During PCIe Gen3 and Gen4 equalization using the direction change method
+ the GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_THRS] field sets the minimum threshold
+ for the raw 12-bit FOM value that when exceeded will terminate direction change
+ equalization.
+ [PCIE_DIR_FOM_EN] must be set to 1 to allow the direction change state machine
+ to terminate equalization when the measured raw FOM has exceeded the value in the
+ GSERN()_LANE()_TRAIN_9_BCFG[PCIE_DIR_FOM_THRS] field.
+ For diagnostic use only. */
+ uint64_t reserved_59_63 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_train_9_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_train_9_bcfg bdk_gsernx_lanex_train_9_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_9_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TRAIN_9_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090003240ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TRAIN_9_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) bdk_gsernx_lanex_train_9_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) "GSERNX_LANEX_TRAIN_9_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TRAIN_9_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_1_bcfg
+ *
+ * GSER Lane TX Base Configuration Register 1
+ * lane transmitter configuration Register 1
+ */
+union bdk_gsernx_lanex_tx_1_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_1_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_57_63 : 7;
+ uint64_t tx_acjtag : 1; /**< [ 56: 56](R/W) TBD */
+ uint64_t tx_dacj : 8; /**< [ 55: 48](R/W) ACJTAG block data bits (some redundant). */
+ uint64_t reserved_41_47 : 7;
+ uint64_t tx_enloop : 1; /**< [ 40: 40](R/W) Set to enable the DDR loopback mux in the custom transmitter to
+ send a copy of transmit data back into the receive path. */
+ uint64_t reserved_33_39 : 7;
+ uint64_t nvlink : 1; /**< [ 32: 32](R/W) Transmitter lower impedance termination control (43 ohm instead of 50 ohm). */
+ uint64_t reserved_26_31 : 6;
+ uint64_t rx_mod4 : 1; /**< [ 25: 25](R/W) Use PCS layer receive data path clock ratio of 16:1 or 32:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer receive clock ratio of 20:1 or 40:1.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[RX_MOD4]
+ together set the width of the parallel receive data path (pipe) in the
+ custom receiver. GSERN()_LANE()_TX_1_BCFG[RX_POST4] and
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] together control the clock ratio of the
+ serializer in the custom receiver.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] and GSERN()_LANE()_TX_1_BCFG[MOD4] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t rx_post4 : 1; /**< [ 24: 24](R/W) Use PCS layer receive data path clock ratio of 32:1 or 40:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer receive clock ratio of 16:1 or 20:1. (The
+ function is similar to [DIV20] but for the receiver instead of the
+ transmitter.)
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[RX_MOD4]
+ together set the width of the parallel receive data path (pipe) in the
+ custom receiver. GSERN()_LANE()_TX_1_BCFG[RX_POST4] and
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] together control the clock ratio of the
+ serializer in the custom receiver.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[DIV20] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t mod4 : 1; /**< [ 17: 17](R/W) Use PCS layer transmit data path clock ratio of 16:1 or 32:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer transmit clock ratio of 20:1 or 40:1.
+
+ Should be programed as desired before sequencing the transmitter reset
+ state machine.
+
+ GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4] together set
+ the width of the parallel transmit data path (pipe) in the custom
+ transmitter. GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4]
+ together control the clock ratio of the serializer in the custom
+ transmitter.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] and GSERN()_LANE()_TX_1_BCFG[MOD4] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t div20 : 1; /**< [ 16: 16](R/W) Use PCS layer transmit data path clock ratio of 32:1 or 40:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer transmit clock ratio of 16:1 or 20:1.
+
+ Should be programed as desired before sequencing the transmitter reset
+ state machine.
+
+ GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4] together set
+ the width of the parallel transmit data path (pipe) in the custom
+ transmitter. GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4]
+ together control the clock ratio of the serializer in the custom
+ transnmitter.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[DIV20] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t tx_enfast : 1; /**< [ 8: 8](R/W) Enable fast slew on the TX preamp output. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t tx_encm : 1; /**< [ 0: 0](R/W) Enable common mode correction in the transmitter. */
+#else /* Word 0 - Little Endian */
+ uint64_t tx_encm : 1; /**< [ 0: 0](R/W) Enable common mode correction in the transmitter. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t tx_enfast : 1; /**< [ 8: 8](R/W) Enable fast slew on the TX preamp output. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t div20 : 1; /**< [ 16: 16](R/W) Use PCS layer transmit data path clock ratio of 32:1 or 40:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer transmit clock ratio of 16:1 or 20:1.
+
+ Should be programed as desired before sequencing the transmitter reset
+ state machine.
+
+ GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4] together set
+ the width of the parallel transmit data path (pipe) in the custom
+ transmitter. GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4]
+ together control the clock ratio of the serializer in the custom
+ transnmitter.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[DIV20] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t mod4 : 1; /**< [ 17: 17](R/W) Use PCS layer transmit data path clock ratio of 16:1 or 32:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer transmit clock ratio of 20:1 or 40:1.
+
+ Should be programed as desired before sequencing the transmitter reset
+ state machine.
+
+ GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4] together set
+ the width of the parallel transmit data path (pipe) in the custom
+ transmitter. GSERN()_LANE()_TX_1_BCFG[DIV20] and GSERN()_LANE()_TX_1_BCFG[MOD4]
+ together control the clock ratio of the serializer in the custom
+ transmitter.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] and GSERN()_LANE()_TX_1_BCFG[MOD4] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t reserved_18_23 : 6;
+ uint64_t rx_post4 : 1; /**< [ 24: 24](R/W) Use PCS layer receive data path clock ratio of 32:1 or 40:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer receive clock ratio of 16:1 or 20:1. (The
+ function is similar to [DIV20] but for the receiver instead of the
+ transmitter.)
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[RX_MOD4]
+ together set the width of the parallel receive data path (pipe) in the
+ custom receiver. GSERN()_LANE()_TX_1_BCFG[RX_POST4] and
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] together control the clock ratio of the
+ serializer in the custom receiver.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[DIV20] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t rx_mod4 : 1; /**< [ 25: 25](R/W) Use PCS layer receive data path clock ratio of 16:1 or 32:1
+ (serdes-data-rate:PCS-layer-clock-frequency) when set to 1. When set
+ to 0, use PCS layer receive clock ratio of 20:1 or 40:1.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_POST4] and GSERN()_LANE()_TX_1_BCFG[RX_MOD4]
+ together set the width of the parallel receive data path (pipe) in the
+ custom receiver. GSERN()_LANE()_TX_1_BCFG[RX_POST4] and
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] together control the clock ratio of the
+ serializer in the custom receiver.
+
+ GSERN()_LANE()_TX_1_BCFG[RX_MOD4] and GSERN()_LANE()_TX_1_BCFG[MOD4] and
+ would normally be set to the same value to use the transmitter and
+ receiver at the same PCS clock ratio. */
+ uint64_t reserved_26_31 : 6;
+ uint64_t nvlink : 1; /**< [ 32: 32](R/W) Transmitter lower impedance termination control (43 ohm instead of 50 ohm). */
+ uint64_t reserved_33_39 : 7;
+ uint64_t tx_enloop : 1; /**< [ 40: 40](R/W) Set to enable the DDR loopback mux in the custom transmitter to
+ send a copy of transmit data back into the receive path. */
+ uint64_t reserved_41_47 : 7;
+ uint64_t tx_dacj : 8; /**< [ 55: 48](R/W) ACJTAG block data bits (some redundant). */
+ uint64_t tx_acjtag : 1; /**< [ 56: 56](R/W) TBD */
+ uint64_t reserved_57_63 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_1_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_1_bcfg bdk_gsernx_lanex_tx_1_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_1_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_1_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b40ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_1_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) bdk_gsernx_lanex_tx_1_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) "GSERNX_LANEX_TX_1_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_1_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_bsts
+ *
+ * GSER Lane TX Base Status Register
+ * lane transmitter status
+ */
+union bdk_gsernx_lanex_tx_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t rxdetn : 1; /**< [ 2: 2](RO/H) Transmitter block detection of receiver termination presence,
+ low-side. Asserted indicates termination presence was
+ detected. Valid only if [RXDETCOMPLETE] is set. */
+ uint64_t rxdetp : 1; /**< [ 1: 1](RO/H) Transmitter block detection of receiver termination presence,
+ high-side. Asserted indicates termination presence was
+ detected. Valid only if [RXDETCOMPLETE] is set. */
+ uint64_t rxdetcomplete : 1; /**< [ 0: 0](RO/H) Receiver presence detection engine has completed. */
+#else /* Word 0 - Little Endian */
+ uint64_t rxdetcomplete : 1; /**< [ 0: 0](RO/H) Receiver presence detection engine has completed. */
+ uint64_t rxdetp : 1; /**< [ 1: 1](RO/H) Transmitter block detection of receiver termination presence,
+ high-side. Asserted indicates termination presence was
+ detected. Valid only if [RXDETCOMPLETE] is set. */
+ uint64_t rxdetn : 1; /**< [ 2: 2](RO/H) Transmitter block detection of receiver termination presence,
+ low-side. Asserted indicates termination presence was
+ detected. Valid only if [RXDETCOMPLETE] is set. */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_bsts bdk_gsernx_lanex_tx_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b60ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_BSTS(a,b) bdk_gsernx_lanex_tx_bsts_t
+#define bustype_BDK_GSERNX_LANEX_TX_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_BSTS(a,b) "GSERNX_LANEX_TX_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_TX_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_drv2_bcfg
+ *
+ * GSER Lane TX Drive Override Base Configuration Register 2
+ * Upper limits on the allowed preemphasis and postemphasis values before translating to the
+ * raw transmitter control settings.
+ */
+union bdk_gsernx_lanex_tx_drv2_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_drv2_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t cpost_limit : 5; /**< [ 12: 8](R/W) Upper limit for the postemphasis value. The valid range is 0x0 to 0x10. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t cpre_limit : 5; /**< [ 4: 0](R/W) Upper limit for the preemphasis value. The valid range is 0x0 to 0x10. */
+#else /* Word 0 - Little Endian */
+ uint64_t cpre_limit : 5; /**< [ 4: 0](R/W) Upper limit for the preemphasis value. The valid range is 0x0 to 0x10. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t cpost_limit : 5; /**< [ 12: 8](R/W) Upper limit for the postemphasis value. The valid range is 0x0 to 0x10. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_drv2_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_drv2_bcfg bdk_gsernx_lanex_tx_drv2_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV2_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV2_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b20ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_DRV2_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) bdk_gsernx_lanex_tx_drv2_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) "GSERNX_LANEX_TX_DRV2_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_DRV2_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_drv_bcfg
+ *
+ * GSER Lane TX Drive Override Base Configuration Register
+ * Lane transmitter drive override values and enables configuration
+ * Register. Default values are chosen to provide the "idle" configuration
+ * when the lane reset state machine completes. The transmitter "idle"
+ * configuration drives the output to mid-rail with 2 pull-up and 2
+ * pull-down legs active.
+ *
+ * These value fields in this register are in effect when the
+ * corresponding enable fields ([EN_TX_DRV], [EN_TX_CSPD], and
+ * GSERN()_LANE()_TX_DRV_BCFG[EN_TX_BS]) are set.
+ */
+union bdk_gsernx_lanex_tx_drv_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_drv_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tx_cspd : 1; /**< [ 63: 63](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+ uint64_t reserved_62 : 1;
+ uint64_t tx_bs : 6; /**< [ 61: 56](R/W) TX bias/swing selection. This setting only takes effect if [EN_TX_BS]
+ is asserted and [TX_CSPD] is deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t reserved_51_55 : 5;
+ uint64_t en_tx_cspd : 1; /**< [ 50: 50](R/W) Enables use of [TX_CSPD] an overrides to
+ set the current source power down control of the transmitter. */
+ uint64_t en_tx_bs : 1; /**< [ 49: 49](R/W) Enables use of [TX_BS] as an override to
+ set the bias/swing control of the transmitter. */
+ uint64_t en_tx_drv : 1; /**< [ 48: 48](R/W) Enables use of the transmit drive strength fields in this register as overrides
+ to explicitly set the base transmitter controls. (All fields except [TX_BS] and
+ [TX_CSPD], which have separate override enables.) For diagnostic use only. */
+ uint64_t reserved_42_47 : 6;
+ uint64_t muxpost : 2; /**< [ 41: 40](R/W) Postcursor mux controls. */
+ uint64_t cpostb : 3; /**< [ 39: 37](R/W) Post cursor block 1 coefficient. */
+ uint64_t cposta : 3; /**< [ 36: 34](R/W) Post cursor block 0 coefficient. */
+ uint64_t enpost : 2; /**< [ 33: 32](R/W) Postcursor block enables. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t muxmain : 4; /**< [ 26: 23](R/W) Main mux controls (some redundant). */
+ uint64_t cmaind : 3; /**< [ 22: 20](R/W) Main block 3 coefficient. */
+ uint64_t enmain : 4; /**< [ 19: 16](R/W) Main block enables. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t muxpre : 2; /**< [ 9: 8](R/W) Precursor mux controls. */
+ uint64_t cpreb : 3; /**< [ 7: 5](R/W) Precursor Block 1 coefficient. */
+ uint64_t cprea : 3; /**< [ 4: 2](R/W) Precursor Block 0 coefficient. */
+ uint64_t enpre : 2; /**< [ 1: 0](R/W) Precursor block enables. */
+#else /* Word 0 - Little Endian */
+ uint64_t enpre : 2; /**< [ 1: 0](R/W) Precursor block enables. */
+ uint64_t cprea : 3; /**< [ 4: 2](R/W) Precursor Block 0 coefficient. */
+ uint64_t cpreb : 3; /**< [ 7: 5](R/W) Precursor Block 1 coefficient. */
+ uint64_t muxpre : 2; /**< [ 9: 8](R/W) Precursor mux controls. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t enmain : 4; /**< [ 19: 16](R/W) Main block enables. */
+ uint64_t cmaind : 3; /**< [ 22: 20](R/W) Main block 3 coefficient. */
+ uint64_t muxmain : 4; /**< [ 26: 23](R/W) Main mux controls (some redundant). */
+ uint64_t reserved_27_31 : 5;
+ uint64_t enpost : 2; /**< [ 33: 32](R/W) Postcursor block enables. */
+ uint64_t cposta : 3; /**< [ 36: 34](R/W) Post cursor block 0 coefficient. */
+ uint64_t cpostb : 3; /**< [ 39: 37](R/W) Post cursor block 1 coefficient. */
+ uint64_t muxpost : 2; /**< [ 41: 40](R/W) Postcursor mux controls. */
+ uint64_t reserved_42_47 : 6;
+ uint64_t en_tx_drv : 1; /**< [ 48: 48](R/W) Enables use of the transmit drive strength fields in this register as overrides
+ to explicitly set the base transmitter controls. (All fields except [TX_BS] and
+ [TX_CSPD], which have separate override enables.) For diagnostic use only. */
+ uint64_t en_tx_bs : 1; /**< [ 49: 49](R/W) Enables use of [TX_BS] as an override to
+ set the bias/swing control of the transmitter. */
+ uint64_t en_tx_cspd : 1; /**< [ 50: 50](R/W) Enables use of [TX_CSPD] an overrides to
+ set the current source power down control of the transmitter. */
+ uint64_t reserved_51_55 : 5;
+ uint64_t tx_bs : 6; /**< [ 61: 56](R/W) TX bias/swing selection. This setting only takes effect if [EN_TX_BS]
+ is asserted and [TX_CSPD] is deasserted; with [TX_CSPD] asserted the
+ bias/swing control setting seen in the analog bias generator is zero.
+
+ Typical override values would be:
+ 42 = Nominal 1.0V p-p transmit amplitude.
+ 52 = Nominal 1.2V p-p transmit amplitude.
+
+ The maximum usable value without transmitted waveform distortion depends
+ primarily on voltage, secondarily on process corner and temperature, but is at
+ least 52. There is no minimum setting based on transmitter distortion, only
+ that set by the receiver. */
+ uint64_t reserved_62 : 1;
+ uint64_t tx_cspd : 1; /**< [ 63: 63](R/W) Power-down control for a second TX bias/swing leg with the same
+ weight as TX_BS[3]. Normally this field is left deasserted to
+ provide a minimum transmit amplitude. Asserting [TX_CSPD] will turn
+ off all legs of the bias/swing generator for lower standby power. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_drv_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_drv_bcfg bdk_gsernx_lanex_tx_drv_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b10ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_DRV_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) bdk_gsernx_lanex_tx_drv_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) "GSERNX_LANEX_TX_DRV_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_DRV_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_drv_bsts
+ *
+ * GSER Lane TX Drive Base Status Register
+ * Lane transmitter drive setup status, i.e., settings which the
+ * transmitter is actually using. During a transmitter receiver presence
+ * detection sequence the fields of this register not reliable, i.e.,
+ * following a write of GSERN()_LANE()_TX_RXD_BCFG[TRIGGER] to one this register is not
+ * reliable until after GSERN()_LANE()_TX_BSTS[RXDETCOMPLETE] reads as one.
+ */
+union bdk_gsernx_lanex_tx_drv_bsts
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_drv_bsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tx_cspd : 1; /**< [ 63: 63](RO/H) TX current source power down (cspd) setting in use, a second
+ bias/swing leg with the same weight as TX_BS[3], but with opposite
+ polarity for the control signal. */
+ uint64_t reserved_62 : 1;
+ uint64_t tx_bs : 6; /**< [ 61: 56](RO/H) TX bias/swing selection in use. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t tx_invalid : 1; /**< [ 51: 51](RO/H) Invalid status generated by the gser_lane_pnr_txdrv_remap module
+ indicating an invalid combination of (cpre, cpost, cmain, bit-stuff)
+ was requested. */
+ uint64_t reserved_42_50 : 9;
+ uint64_t muxpost : 2; /**< [ 41: 40](RO/H) Postcursor mux controls in use. */
+ uint64_t cpostb : 3; /**< [ 39: 37](RO/H) Post cursor block 1 coefficient in use. */
+ uint64_t cposta : 3; /**< [ 36: 34](RO/H) Post cursor block 0 coefficient in use. */
+ uint64_t enpost : 2; /**< [ 33: 32](RO/H) Postcursor block enables in use. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t muxmain : 4; /**< [ 26: 23](RO/H) Main mux controls (some redundant) in use. */
+ uint64_t cmaind : 3; /**< [ 22: 20](RO/H) Main block 3 coefficient in use. */
+ uint64_t enmain : 4; /**< [ 19: 16](RO/H) Main block enables in use. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t muxpre : 2; /**< [ 9: 8](RO/H) Precursor mux controls in use. */
+ uint64_t cpreb : 3; /**< [ 7: 5](RO/H) Precursor Block 1 coefficient in use. */
+ uint64_t cprea : 3; /**< [ 4: 2](RO/H) Precursor Block 0 coefficient in use. */
+ uint64_t enpre : 2; /**< [ 1: 0](RO/H) Precursor block enables in use. */
+#else /* Word 0 - Little Endian */
+ uint64_t enpre : 2; /**< [ 1: 0](RO/H) Precursor block enables in use. */
+ uint64_t cprea : 3; /**< [ 4: 2](RO/H) Precursor Block 0 coefficient in use. */
+ uint64_t cpreb : 3; /**< [ 7: 5](RO/H) Precursor Block 1 coefficient in use. */
+ uint64_t muxpre : 2; /**< [ 9: 8](RO/H) Precursor mux controls in use. */
+ uint64_t reserved_10_15 : 6;
+ uint64_t enmain : 4; /**< [ 19: 16](RO/H) Main block enables in use. */
+ uint64_t cmaind : 3; /**< [ 22: 20](RO/H) Main block 3 coefficient in use. */
+ uint64_t muxmain : 4; /**< [ 26: 23](RO/H) Main mux controls (some redundant) in use. */
+ uint64_t reserved_27_31 : 5;
+ uint64_t enpost : 2; /**< [ 33: 32](RO/H) Postcursor block enables in use. */
+ uint64_t cposta : 3; /**< [ 36: 34](RO/H) Post cursor block 0 coefficient in use. */
+ uint64_t cpostb : 3; /**< [ 39: 37](RO/H) Post cursor block 1 coefficient in use. */
+ uint64_t muxpost : 2; /**< [ 41: 40](RO/H) Postcursor mux controls in use. */
+ uint64_t reserved_42_50 : 9;
+ uint64_t tx_invalid : 1; /**< [ 51: 51](RO/H) Invalid status generated by the gser_lane_pnr_txdrv_remap module
+ indicating an invalid combination of (cpre, cpost, cmain, bit-stuff)
+ was requested. */
+ uint64_t reserved_52_55 : 4;
+ uint64_t tx_bs : 6; /**< [ 61: 56](RO/H) TX bias/swing selection in use. */
+ uint64_t reserved_62 : 1;
+ uint64_t tx_cspd : 1; /**< [ 63: 63](RO/H) TX current source power down (cspd) setting in use, a second
+ bias/swing leg with the same weight as TX_BS[3], but with opposite
+ polarity for the control signal. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_drv_bsts_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_drv_bsts bdk_gsernx_lanex_tx_drv_bsts_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV_BSTS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_DRV_BSTS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b30ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_DRV_BSTS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) bdk_gsernx_lanex_tx_drv_bsts_t
+#define bustype_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) "GSERNX_LANEX_TX_DRV_BSTS"
+#define device_bar_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_DRV_BSTS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_tx_rxd_bcfg
+ *
+ * GSER Lane TX Receive Presence Detector Base Configuration Register
+ * The lane transmitter receiver presence detector controls are in this
+ * register. When the transmitter's receiver presence detection sequencer
+ * is triggered (by asserting [TRIGGER]), the transmitter needs to
+ * be in a weak idle state, i.e., all fields of GSERN()_LANE()_TX_DRV_BSTS
+ * should reflect the reset default values of the same fields in
+ * GSERN()_LANE()_TX_DRV_BCFG.
+ */
+union bdk_gsernx_lanex_tx_rxd_bcfg
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_tx_rxd_bcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t ovrride_det_en : 1; /**< [ 33: 33](R/W) Enable use of the [OVRRIDE_DET] value for the result of PCIe transmitter
+ receiver presense detection instead of the normal measured result.
+
+ Internal:
+ When asserted, this control will also suppress the normal pull-down and release
+ of the transmit signals that takes place during receiver presence detaction. */
+ uint64_t ovrride_det : 1; /**< [ 32: 32](R/W) When enabled by [OVRRIDE_DET_EN], the PCIe transmitter receiver presence
+ detector will use this value instead of that measured by the functional
+ circuit. This provides a mechanism to force recognition of a known number of
+ lanes in the link independent of the normal receiver presence detection
+ procedure. */
+ uint64_t reserved_30_31 : 2;
+ uint64_t release_wait : 6; /**< [ 29: 24](R/W) Wait time after asserting rxd_samp and rxd_samn to capture the
+ result before releasing tx_rxd, rxd_samp, and rxd_samn,
+ expressed as a count of txdivclk cycles minus one, e.g., set to 0
+ to get 1 cycles. Typically set for 8 ns, or a count of 1 cycle when
+ using for PCIe gen1 (125 MHz txdivclk). */
+ uint64_t reserved_22_23 : 2;
+ uint64_t sample_wait : 6; /**< [ 21: 16](R/W) Wait time after asserting tx_rxd before asserting rxd_samp and
+ rxd_samn to sample the result, expressed as a count of lane PLL
+ reference clock cycles minus 1, e.g., set to 1 to get 2 cycles.
+ Typically set for 16 ns, or a count of 2 cycles for PCIe gen1
+ (125 MHz txdivclk). */
+ uint64_t reserved_12_15 : 4;
+ uint64_t tx_disable : 1; /**< [ 11: 11](R/W) Disable all transmitter eqdrv blocks during the receiver-present
+ detection sequence. When asserted, this temporarily overrides the
+ enmain, empre, and enpost settings in
+ GSERN()_LANE()_TX_DRV_BCFG, tri-stating the transmitter
+ during the sequence instead of leaving it in weak idle. */
+ uint64_t samn_en : 1; /**< [ 10: 10](R/W) Enable sampling of the transmitter's receiver termination presence
+ detector on the padn output. */
+ uint64_t samp_en : 1; /**< [ 9: 9](R/W) Enable sampling of the transmitter's receiver termination presence
+ detector on the padp output. */
+ uint64_t rxd_en : 1; /**< [ 8: 8](R/W) Enable assertion of the RXD pulldown on the (common) termination
+ point for differential pair prior to sampling the pad voltages. Set
+ to one for the normal detection sequence to work correctly. Setting
+ to zero is a verification hook to allow sampling the pad values
+ without first pulling the pads low. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t trigger : 1; /**< [ 0: 0](R/W/H) Enable the sequencer which exercises the transmitter's receiver
+ termination presence detection. An asserting edge will start the
+ sequencer. This field self-clears when the sequence has completed. */
+#else /* Word 0 - Little Endian */
+ uint64_t trigger : 1; /**< [ 0: 0](R/W/H) Enable the sequencer which exercises the transmitter's receiver
+ termination presence detection. An asserting edge will start the
+ sequencer. This field self-clears when the sequence has completed. */
+ uint64_t reserved_1_7 : 7;
+ uint64_t rxd_en : 1; /**< [ 8: 8](R/W) Enable assertion of the RXD pulldown on the (common) termination
+ point for differential pair prior to sampling the pad voltages. Set
+ to one for the normal detection sequence to work correctly. Setting
+ to zero is a verification hook to allow sampling the pad values
+ without first pulling the pads low. */
+ uint64_t samp_en : 1; /**< [ 9: 9](R/W) Enable sampling of the transmitter's receiver termination presence
+ detector on the padp output. */
+ uint64_t samn_en : 1; /**< [ 10: 10](R/W) Enable sampling of the transmitter's receiver termination presence
+ detector on the padn output. */
+ uint64_t tx_disable : 1; /**< [ 11: 11](R/W) Disable all transmitter eqdrv blocks during the receiver-present
+ detection sequence. When asserted, this temporarily overrides the
+ enmain, empre, and enpost settings in
+ GSERN()_LANE()_TX_DRV_BCFG, tri-stating the transmitter
+ during the sequence instead of leaving it in weak idle. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t sample_wait : 6; /**< [ 21: 16](R/W) Wait time after asserting tx_rxd before asserting rxd_samp and
+ rxd_samn to sample the result, expressed as a count of lane PLL
+ reference clock cycles minus 1, e.g., set to 1 to get 2 cycles.
+ Typically set for 16 ns, or a count of 2 cycles for PCIe gen1
+ (125 MHz txdivclk). */
+ uint64_t reserved_22_23 : 2;
+ uint64_t release_wait : 6; /**< [ 29: 24](R/W) Wait time after asserting rxd_samp and rxd_samn to capture the
+ result before releasing tx_rxd, rxd_samp, and rxd_samn,
+ expressed as a count of txdivclk cycles minus one, e.g., set to 0
+ to get 1 cycles. Typically set for 8 ns, or a count of 1 cycle when
+ using for PCIe gen1 (125 MHz txdivclk). */
+ uint64_t reserved_30_31 : 2;
+ uint64_t ovrride_det : 1; /**< [ 32: 32](R/W) When enabled by [OVRRIDE_DET_EN], the PCIe transmitter receiver presence
+ detector will use this value instead of that measured by the functional
+ circuit. This provides a mechanism to force recognition of a known number of
+ lanes in the link independent of the normal receiver presence detection
+ procedure. */
+ uint64_t ovrride_det_en : 1; /**< [ 33: 33](R/W) Enable use of the [OVRRIDE_DET] value for the result of PCIe transmitter
+ receiver presense detection instead of the normal measured result.
+
+ Internal:
+ When asserted, this control will also suppress the normal pull-down and release
+ of the transmit signals that takes place during receiver presence detaction. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_tx_rxd_bcfg_s cn; */
+};
+typedef union bdk_gsernx_lanex_tx_rxd_bcfg bdk_gsernx_lanex_tx_rxd_bcfg_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TX_RXD_BCFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TX_RXD_BCFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e090000b50ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TX_RXD_BCFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) bdk_gsernx_lanex_tx_rxd_bcfg_t
+#define bustype_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) "GSERNX_LANEX_TX_RXD_BCFG"
+#define device_bar_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TX_RXD_BCFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) gsern#_lane#_txdivclk_ctr
+ *
+ * GSER Lane TX Div Clock Cycle Counter Register
+ * A free-running counter of lane txdivclk cycles to enable rough confirmation of
+ * SerDes transmit data rate. Read the counter; wait some time, e.g., 100ms; read the
+ * counter; calculate frequency based on the difference in values during the known wait
+ * time and the programmed data path width.
+ */
+union bdk_gsernx_lanex_txdivclk_ctr
+{
+ uint64_t u;
+ struct bdk_gsernx_lanex_txdivclk_ctr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Running count of txdivclk cycles. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Running count of txdivclk cycles. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_gsernx_lanex_txdivclk_ctr_s cn; */
+};
+typedef union bdk_gsernx_lanex_txdivclk_ctr bdk_gsernx_lanex_txdivclk_ctr_t;
+
+static inline uint64_t BDK_GSERNX_LANEX_TXDIVCLK_CTR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_GSERNX_LANEX_TXDIVCLK_CTR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=7) && (b<=4)))
+ return 0x87e0900030b0ll + 0x1000000ll * ((a) & 0x7) + 0x10000ll * ((b) & 0x7);
+ __bdk_csr_fatal("GSERNX_LANEX_TXDIVCLK_CTR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) bdk_gsernx_lanex_txdivclk_ctr_t
+#define bustype_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) "GSERNX_LANEX_TXDIVCLK_CTR"
+#define device_bar_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) (a)
+#define arguments_BDK_GSERNX_LANEX_TXDIVCLK_CTR(a,b) (a),(b),-1,-1
+
+#endif /* __BDK_CSRS_GSERN_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-iobn.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-iobn.h
new file mode 100644
index 0000000000..afa4c7d9f6
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-iobn.h
@@ -0,0 +1,7054 @@
+#ifndef __BDK_CSRS_IOBN_H__
+#define __BDK_CSRS_IOBN_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium IOBN.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration iobn_bar_e
+ *
+ * IOBN Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_IOBN_BAR_E_IOBNX_PF_BAR0_CN8(a) (0x87e0f0000000ll + 0x1000000ll * (a))
+#define BDK_IOBN_BAR_E_IOBNX_PF_BAR0_CN8_SIZE 0x800000ull
+#define BDK_IOBN_BAR_E_IOBNX_PF_BAR0_CN9(a) (0x87e0f0000000ll + 0x1000000ll * (a))
+#define BDK_IOBN_BAR_E_IOBNX_PF_BAR0_CN9_SIZE 0x100000ull
+#define BDK_IOBN_BAR_E_IOBNX_PF_BAR4(a) (0x87e0f0f00000ll + 0x1000000ll * (a))
+#define BDK_IOBN_BAR_E_IOBNX_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration iobn_int_vec_e
+ *
+ * IOBN MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_IOBN_INT_VEC_E_INTS (0)
+#define BDK_IOBN_INT_VEC_E_INTS1 (1)
+
+/**
+ * Enumeration iobn_ncbi_ro_mod_e
+ *
+ * IOBN NCBI Relax Order Modification Enumeration
+ * Enumerates the controls for when CR's are allowed to pass PRs, see
+ * IOBN()_ARBID()_CTL[CRPPR_ENA].
+ */
+#define BDK_IOBN_NCBI_RO_MOD_E_BUS_CTL (0)
+#define BDK_IOBN_NCBI_RO_MOD_E_OFF (2)
+#define BDK_IOBN_NCBI_RO_MOD_E_ON (3)
+#define BDK_IOBN_NCBI_RO_MOD_E_RSVD (1)
+
+/**
+ * Enumeration iobn_psb_acc_e
+ *
+ * IOBN Power Serial Bus Accumulator Enumeration
+ * Enumerates the IOB accumulators for IOB slaves, which correspond to index {b} of
+ * PSBS_SYS()_ACCUM().
+ */
+#define BDK_IOBN_PSB_ACC_E_NCBI_RD_CMD_ACTIVE (0)
+#define BDK_IOBN_PSB_ACC_E_NCBI_WR_CMD_ACTIVE (1)
+#define BDK_IOBN_PSB_ACC_E_NCBO_CMD_ACTIVE (2)
+
+/**
+ * Enumeration iobn_psb_event_e
+ *
+ * IOBN Power Serial Bus Event Enumeration
+ * Enumerates the event numbers for IOB slaves, which correspond to index {b} of
+ * PSBS_SYS()_EVENT()_CFG.
+ */
+#define BDK_IOBN_PSB_EVENT_E_NCBI_CMD_ACTIVE_BUS0 (8)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_CMD_ACTIVE_BUS1 (9)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_CMD_ACTIVE_BUS2 (0xa)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_CMD_ACTIVE_BUS_RSV0 (0xb)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_DATA_ACTIVE_BUS0 (0xc)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_DATA_ACTIVE_BUS1 (0xd)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_DATA_ACTIVE_BUS2 (0xe)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_DATA_ACTIVE_BUS_RSV0 (0xf)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_RD_CMD_ACTIVE_BUS0 (0)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_RD_CMD_ACTIVE_BUS1 (1)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_RD_CMD_ACTIVE_BUS2 (2)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_RD_CMD_ACTIVE_BUS_RSV0 (3)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_WR_CMD_ACTIVE_BUS0 (4)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_WR_CMD_ACTIVE_BUS1 (5)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_WR_CMD_ACTIVE_BUS2 (6)
+#define BDK_IOBN_PSB_EVENT_E_NCBI_WR_CMD_ACTIVE_BUS_RSV0 (7)
+#define BDK_IOBN_PSB_EVENT_E_NCBO_CMD_ACTIVE_BUS0 (0x10)
+#define BDK_IOBN_PSB_EVENT_E_NCBO_CMD_ACTIVE_BUS1 (0x11)
+#define BDK_IOBN_PSB_EVENT_E_NCBO_CMD_ACTIVE_BUS2 (0x12)
+#define BDK_IOBN_PSB_EVENT_E_NCBO_CMD_ACTIVE_BUS_RSV0 (0x13)
+#define BDK_IOBN_PSB_EVENT_E_NCBO_DATA_ACTIVE_BUS0 (0x14)
+#define BDK_IOBN_PSB_EVENT_E_NCBO_DATA_ACTIVE_BUS1 (0x15)
+#define BDK_IOBN_PSB_EVENT_E_NCBO_DATA_ACTIVE_BUS2 (0x16)
+#define BDK_IOBN_PSB_EVENT_E_NCBO_DATA_ACTIVE_BUS_RSV0 (0x17)
+
+/**
+ * Register (RSL) iobn#_arbid#_ctl
+ *
+ * IOBN NCB Constant Registers
+ * This register set properties for each of the flat ARBIDs.
+ */
+union bdk_iobnx_arbidx_ctl
+{
+ uint64_t u;
+ struct bdk_iobnx_arbidx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t fast_ord : 1; /**< [ 9: 9](R/W) When set IOB will send commits for PR's to IOW as soon as PR is sent to mesh,
+ rather than waiting for ordering. This bit should only be set for non-PEM
+ devices */
+ uint64_t sow_dis : 1; /**< [ 8: 8](R/W) Disables the PCIe store widget for memory store performance. Does not affect
+ observable ordering. No impact on IO stores. For diagnostic use only.
+ 0 = Performance optimization on. Issue prefetches on stores to improve
+ store-store ordering.
+ 1 = Performance optimization off. No prefetches.
+
+ Internal:
+ The SOW is only available on the NCB2/256b devices which include PEMs, CPT,
+ DPI. The expectation is that CPT and DPI use the RelaxOrder bit so they will
+ only use the widget when the VA address CAM detects and promotes two
+ transactions to the same memory cacheline. */
+ uint64_t crppr_ena : 2; /**< [ 7: 6](R/W) Controls when a CR is allowed to pass a PR for NCBO and NCBI. Enumerated by IOBN_NCBI_RO_MOD_E. */
+ uint64_t prefetch_dis : 1; /**< [ 5: 5](R/W) Disables mesh prefetches. For diagnostic use only.
+ 0 = Store-store ordered transactions will issue prefetches before the second
+ store to improve performance.
+ 1 = No prefetches. */
+ uint64_t pr_iova_dis : 1; /**< [ 4: 4](R/W) PR queue IOVA comparison disable. For diagnostic use only.
+ 0 = PR will not pass a younger PR with the same IOVA.
+ 1 = PR may pass a younger PR with the same IOVA, if the relaxed ordering request
+ and [RO_DIS] bit allow it. */
+ uint64_t ro_dis : 1; /**< [ 3: 3](R/W) Disable relaxed ordering. For diagnostic use only.
+ 0 = Relaxed ordering is performed if the NCB device requests it.
+ 1 = IOB ignores the relaxed ordering request bit and treats all requests as
+ strictly ordered. */
+ uint64_t st_ld_ord : 1; /**< [ 2: 2](R/W) If enabled, NP queue loaded in order it arrives from NCBI. [ST_ST_ORD] should also be
+ set when this bit is set. NP queue will not pass PS queue.
+
+ Internal:
+ FIXME check meaning */
+ uint64_t st_st_ord : 1; /**< [ 1: 1](R/W) If enabled, PS queue used (ignore RO bit). Placed in order store arrives.
+ Internal:
+ FIXME check meaning */
+ uint64_t ld_ld_ord : 1; /**< [ 0: 0](R/W) Load-load ordering. For diagnostic use only.
+ 0 = NPR may pass NPR under some cases. The ordering is based on SMMU completion
+ ordering.
+ 1 = NPR never passes NPR; the NPR ordering is based strictly on NCB arrival order.
+ This may harm performance. */
+#else /* Word 0 - Little Endian */
+ uint64_t ld_ld_ord : 1; /**< [ 0: 0](R/W) Load-load ordering. For diagnostic use only.
+ 0 = NPR may pass NPR under some cases. The ordering is based on SMMU completion
+ ordering.
+ 1 = NPR never passes NPR; the NPR ordering is based strictly on NCB arrival order.
+ This may harm performance. */
+ uint64_t st_st_ord : 1; /**< [ 1: 1](R/W) If enabled, PS queue used (ignore RO bit). Placed in order store arrives.
+ Internal:
+ FIXME check meaning */
+ uint64_t st_ld_ord : 1; /**< [ 2: 2](R/W) If enabled, NP queue loaded in order it arrives from NCBI. [ST_ST_ORD] should also be
+ set when this bit is set. NP queue will not pass PS queue.
+
+ Internal:
+ FIXME check meaning */
+ uint64_t ro_dis : 1; /**< [ 3: 3](R/W) Disable relaxed ordering. For diagnostic use only.
+ 0 = Relaxed ordering is performed if the NCB device requests it.
+ 1 = IOB ignores the relaxed ordering request bit and treats all requests as
+ strictly ordered. */
+ uint64_t pr_iova_dis : 1; /**< [ 4: 4](R/W) PR queue IOVA comparison disable. For diagnostic use only.
+ 0 = PR will not pass a younger PR with the same IOVA.
+ 1 = PR may pass a younger PR with the same IOVA, if the relaxed ordering request
+ and [RO_DIS] bit allow it. */
+ uint64_t prefetch_dis : 1; /**< [ 5: 5](R/W) Disables mesh prefetches. For diagnostic use only.
+ 0 = Store-store ordered transactions will issue prefetches before the second
+ store to improve performance.
+ 1 = No prefetches. */
+ uint64_t crppr_ena : 2; /**< [ 7: 6](R/W) Controls when a CR is allowed to pass a PR for NCBO and NCBI. Enumerated by IOBN_NCBI_RO_MOD_E. */
+ uint64_t sow_dis : 1; /**< [ 8: 8](R/W) Disables the PCIe store widget for memory store performance. Does not affect
+ observable ordering. No impact on IO stores. For diagnostic use only.
+ 0 = Performance optimization on. Issue prefetches on stores to improve
+ store-store ordering.
+ 1 = Performance optimization off. No prefetches.
+
+ Internal:
+ The SOW is only available on the NCB2/256b devices which include PEMs, CPT,
+ DPI. The expectation is that CPT and DPI use the RelaxOrder bit so they will
+ only use the widget when the VA address CAM detects and promotes two
+ transactions to the same memory cacheline. */
+ uint64_t fast_ord : 1; /**< [ 9: 9](R/W) When set IOB will send commits for PR's to IOW as soon as PR is sent to mesh,
+ rather than waiting for ordering. This bit should only be set for non-PEM
+ devices */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_arbidx_ctl_s cn; */
+};
+typedef union bdk_iobnx_arbidx_ctl bdk_iobnx_arbidx_ctl_t;
+
+static inline uint64_t BDK_IOBNX_ARBIDX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_ARBIDX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=19)))
+ return 0x87e0f0002100ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1f);
+ __bdk_csr_fatal("IOBNX_ARBIDX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_ARBIDX_CTL(a,b) bdk_iobnx_arbidx_ctl_t
+#define bustype_BDK_IOBNX_ARBIDX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_ARBIDX_CTL(a,b) "IOBNX_ARBIDX_CTL"
+#define device_bar_BDK_IOBNX_ARBIDX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_ARBIDX_CTL(a,b) (a)
+#define arguments_BDK_IOBNX_ARBIDX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_bistr_reg
+ *
+ * IOBN BIST Status Register
+ * This register contains the result of the BIST run on the IOB rclk memories.
+ */
+union bdk_iobnx_bistr_reg
+{
+ uint64_t u;
+ struct bdk_iobnx_bistr_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_22_63 : 42;
+ uint64_t status : 22; /**< [ 21: 0](RO/H) Memory BIST status.
+ Internal:
+ \<18\> = gmr_ixofifo_bstatus_rclk.
+ \<17\> = sli_preq_2_ffifo_bstatus_rclk.
+ \<16\> = sli_req_2_ffifo_bstatus_rclk.
+ \<15\> = sli_preq_1_ffifo_bstatus_rclk.
+ \<14\> = sli_req_1_ffifo_bstatus_rclk.
+ \<13\> = sli_preq_0_ffifo_bstatus_rclk.
+ \<12\> = sli_req_0_ffifo_bstatus_rclk.
+ \<11\> = iop_ffifo_bstatus_rclk.
+ \<10\> = ixo_icc_fifo0_bstatus_rclk.
+ \<9\> = ixo_icc_fifo1_bstatus_rclk.
+ \<8\> = ixo_ics_mem_bstatus_rclk.
+ \<7\> = iob_mem_data_xmd0_bstatus_rclk.
+ \<6\> = iob_mem_data_xmd1_bstatus_rclk.
+ \<5\> = ics_cmd_fifo_bstatus_rclk.
+ \<4\> = ixo_xmd_mem0_bstatus_rclk.
+ \<3\> = ixo_xmd_mem1_bstatus_rclk.
+ \<2\> = iobn_iorn_ffifo0_bstatus_rclk.
+ \<1\> = iobn_iorn_ffifo1_bstatus_rclk.
+ \<0\> = ixo_smmu_mem0_bstatus_rclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 22; /**< [ 21: 0](RO/H) Memory BIST status.
+ Internal:
+ \<18\> = gmr_ixofifo_bstatus_rclk.
+ \<17\> = sli_preq_2_ffifo_bstatus_rclk.
+ \<16\> = sli_req_2_ffifo_bstatus_rclk.
+ \<15\> = sli_preq_1_ffifo_bstatus_rclk.
+ \<14\> = sli_req_1_ffifo_bstatus_rclk.
+ \<13\> = sli_preq_0_ffifo_bstatus_rclk.
+ \<12\> = sli_req_0_ffifo_bstatus_rclk.
+ \<11\> = iop_ffifo_bstatus_rclk.
+ \<10\> = ixo_icc_fifo0_bstatus_rclk.
+ \<9\> = ixo_icc_fifo1_bstatus_rclk.
+ \<8\> = ixo_ics_mem_bstatus_rclk.
+ \<7\> = iob_mem_data_xmd0_bstatus_rclk.
+ \<6\> = iob_mem_data_xmd1_bstatus_rclk.
+ \<5\> = ics_cmd_fifo_bstatus_rclk.
+ \<4\> = ixo_xmd_mem0_bstatus_rclk.
+ \<3\> = ixo_xmd_mem1_bstatus_rclk.
+ \<2\> = iobn_iorn_ffifo0_bstatus_rclk.
+ \<1\> = iobn_iorn_ffifo1_bstatus_rclk.
+ \<0\> = ixo_smmu_mem0_bstatus_rclk. */
+ uint64_t reserved_22_63 : 42;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_bistr_reg_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_19_63 : 45;
+ uint64_t status : 19; /**< [ 18: 0](RO/H) Memory BIST status.
+ Internal:
+ \<18\> = gmr_ixofifo_bstatus_rclk.
+ \<17\> = sli_preq_2_ffifo_bstatus_rclk.
+ \<16\> = sli_req_2_ffifo_bstatus_rclk.
+ \<15\> = sli_preq_1_ffifo_bstatus_rclk.
+ \<14\> = sli_req_1_ffifo_bstatus_rclk.
+ \<13\> = sli_preq_0_ffifo_bstatus_rclk.
+ \<12\> = sli_req_0_ffifo_bstatus_rclk.
+ \<11\> = iop_ffifo_bstatus_rclk.
+ \<10\> = ixo_icc_fifo0_bstatus_rclk.
+ \<9\> = ixo_icc_fifo1_bstatus_rclk.
+ \<8\> = ixo_ics_mem_bstatus_rclk.
+ \<7\> = iob_mem_data_xmd0_bstatus_rclk.
+ \<6\> = 0 unused.
+ \<5\> = ics_cmd_fifo_bstatus_rclk.
+ \<4\> = ixo_xmd_mem0_bstatus_rclk.
+ \<3\> = ixo_xmd_mem1_bstatus_rclk.
+ \<2\> = iobn_iorn_ffifo0_bstatus_rclk.
+ \<1\> = iobn_iorn_ffifo1_bstatus_rclk.
+ \<0\> = ixo_smmu_mem0_bstatus_rclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 19; /**< [ 18: 0](RO/H) Memory BIST status.
+ Internal:
+ \<18\> = gmr_ixofifo_bstatus_rclk.
+ \<17\> = sli_preq_2_ffifo_bstatus_rclk.
+ \<16\> = sli_req_2_ffifo_bstatus_rclk.
+ \<15\> = sli_preq_1_ffifo_bstatus_rclk.
+ \<14\> = sli_req_1_ffifo_bstatus_rclk.
+ \<13\> = sli_preq_0_ffifo_bstatus_rclk.
+ \<12\> = sli_req_0_ffifo_bstatus_rclk.
+ \<11\> = iop_ffifo_bstatus_rclk.
+ \<10\> = ixo_icc_fifo0_bstatus_rclk.
+ \<9\> = ixo_icc_fifo1_bstatus_rclk.
+ \<8\> = ixo_ics_mem_bstatus_rclk.
+ \<7\> = iob_mem_data_xmd0_bstatus_rclk.
+ \<6\> = 0 unused.
+ \<5\> = ics_cmd_fifo_bstatus_rclk.
+ \<4\> = ixo_xmd_mem0_bstatus_rclk.
+ \<3\> = ixo_xmd_mem1_bstatus_rclk.
+ \<2\> = iobn_iorn_ffifo0_bstatus_rclk.
+ \<1\> = iobn_iorn_ffifo1_bstatus_rclk.
+ \<0\> = ixo_smmu_mem0_bstatus_rclk. */
+ uint64_t reserved_19_63 : 45;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_iobnx_bistr_reg_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_19_63 : 45;
+ uint64_t status : 19; /**< [ 18: 0](RO/H) Memory BIST status.
+ Internal:
+ \<18\> = gmr_ixofifo_bstatus_rclk.
+ \<17\> = sli_preq_2_ffifo_bstatus_rclk.
+ \<16\> = sli_req_2_ffifo_bstatus_rclk.
+ \<15\> = sli_preq_1_ffifo_bstatus_rclk.
+ \<14\> = sli_req_1_ffifo_bstatus_rclk.
+ \<13\> = sli_preq_0_ffifo_bstatus_rclk.
+ \<12\> = sli_req_0_ffifo_bstatus_rclk.
+ \<11\> = iop_ffifo_bstatus_rclk.
+ \<10\> = ixo_icc_fifo0_bstatus_rclk.
+ \<9\> = ixo_icc_fifo1_bstatus_rclk.
+ \<8\> = ixo_ics_mem_bstatus_rclk.
+ \<7\> = iob_mem_data_xmd0_bstatus_rclk.
+ \<6\> = iob_mem_data_xmd1_bstatus_rclk.
+ \<5\> = ics_cmd_fifo_bstatus_rclk.
+ \<4\> = ixo_xmd_mem0_bstatus_rclk.
+ \<3\> = ixo_xmd_mem1_bstatus_rclk.
+ \<2\> = iobn_iorn_ffifo0_bstatus_rclk.
+ \<1\> = iobn_iorn_ffifo1_bstatus_rclk.
+ \<0\> = ixo_smmu_mem0_bstatus_rclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 19; /**< [ 18: 0](RO/H) Memory BIST status.
+ Internal:
+ \<18\> = gmr_ixofifo_bstatus_rclk.
+ \<17\> = sli_preq_2_ffifo_bstatus_rclk.
+ \<16\> = sli_req_2_ffifo_bstatus_rclk.
+ \<15\> = sli_preq_1_ffifo_bstatus_rclk.
+ \<14\> = sli_req_1_ffifo_bstatus_rclk.
+ \<13\> = sli_preq_0_ffifo_bstatus_rclk.
+ \<12\> = sli_req_0_ffifo_bstatus_rclk.
+ \<11\> = iop_ffifo_bstatus_rclk.
+ \<10\> = ixo_icc_fifo0_bstatus_rclk.
+ \<9\> = ixo_icc_fifo1_bstatus_rclk.
+ \<8\> = ixo_ics_mem_bstatus_rclk.
+ \<7\> = iob_mem_data_xmd0_bstatus_rclk.
+ \<6\> = iob_mem_data_xmd1_bstatus_rclk.
+ \<5\> = ics_cmd_fifo_bstatus_rclk.
+ \<4\> = ixo_xmd_mem0_bstatus_rclk.
+ \<3\> = ixo_xmd_mem1_bstatus_rclk.
+ \<2\> = iobn_iorn_ffifo0_bstatus_rclk.
+ \<1\> = iobn_iorn_ffifo1_bstatus_rclk.
+ \<0\> = ixo_smmu_mem0_bstatus_rclk. */
+ uint64_t reserved_19_63 : 45;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_iobnx_bistr_reg_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_22_63 : 42;
+ uint64_t status : 22; /**< [ 21: 0](RO/H) Memory BIST status.
+ Internal:
+ \<21\> = gmr_sli_ixofifo_bstatus_rclk.
+ \<20\> = sli_preq_2_ffifo_bstatus_rclk.
+ \<19\> = sli_req_2_ffifo_bstatus_rclk.
+ \<18\> = gmr_ixofifo_bstatus_rclk.
+ \<17\> = sli_preq_2_ffifo_bstatus_rclk.
+ \<16\> = sli_req_2_ffifo_bstatus_rclk.
+ \<15\> = sli_preq_1_ffifo_bstatus_rclk.
+ \<14\> = sli_req_1_ffifo_bstatus_rclk.
+ \<13\> = sli_preq_0_ffifo_bstatus_rclk.
+ \<12\> = sli_req_0_ffifo_bstatus_rclk.
+ \<11\> = iop_ffifo_bstatus_rclk.
+ \<10\> = ixo_icc_fifo0_bstatus_rclk.
+ \<9\> = ixo_icc_fifo1_bstatus_rclk.
+ \<8\> = ixo_ics_mem_bstatus_rclk.
+ \<7\> = iob_mem_data_xmd0_bstatus_rclk.
+ \<6\> = iob_mem_data_xmd1_bstatus_rclk.
+ \<5\> = ics_cmd_fifo_bstatus_rclk.
+ \<4\> = ixo_xmd_mem0_bstatus_rclk.
+ \<3\> = ixo_xmd_mem1_bstatus_rclk.
+ \<2\> = iobn_iorn_ffifo0_bstatus_rclk.
+ \<1\> = iobn_iorn_ffifo1_bstatus_rclk.
+ \<0\> = ixo_smmu_mem0_bstatus_rclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 22; /**< [ 21: 0](RO/H) Memory BIST status.
+ Internal:
+ \<21\> = gmr_sli_ixofifo_bstatus_rclk.
+ \<20\> = sli_preq_2_ffifo_bstatus_rclk.
+ \<19\> = sli_req_2_ffifo_bstatus_rclk.
+ \<18\> = gmr_ixofifo_bstatus_rclk.
+ \<17\> = sli_preq_2_ffifo_bstatus_rclk.
+ \<16\> = sli_req_2_ffifo_bstatus_rclk.
+ \<15\> = sli_preq_1_ffifo_bstatus_rclk.
+ \<14\> = sli_req_1_ffifo_bstatus_rclk.
+ \<13\> = sli_preq_0_ffifo_bstatus_rclk.
+ \<12\> = sli_req_0_ffifo_bstatus_rclk.
+ \<11\> = iop_ffifo_bstatus_rclk.
+ \<10\> = ixo_icc_fifo0_bstatus_rclk.
+ \<9\> = ixo_icc_fifo1_bstatus_rclk.
+ \<8\> = ixo_ics_mem_bstatus_rclk.
+ \<7\> = iob_mem_data_xmd0_bstatus_rclk.
+ \<6\> = iob_mem_data_xmd1_bstatus_rclk.
+ \<5\> = ics_cmd_fifo_bstatus_rclk.
+ \<4\> = ixo_xmd_mem0_bstatus_rclk.
+ \<3\> = ixo_xmd_mem1_bstatus_rclk.
+ \<2\> = iobn_iorn_ffifo0_bstatus_rclk.
+ \<1\> = iobn_iorn_ffifo1_bstatus_rclk.
+ \<0\> = ixo_smmu_mem0_bstatus_rclk. */
+ uint64_t reserved_22_63 : 42;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_iobnx_bistr_reg bdk_iobnx_bistr_reg_t;
+
+static inline uint64_t BDK_IOBNX_BISTR_REG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_BISTR_REG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0005080ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0005080ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0005080ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_BISTR_REG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_BISTR_REG(a) bdk_iobnx_bistr_reg_t
+#define bustype_BDK_IOBNX_BISTR_REG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_BISTR_REG(a) "IOBNX_BISTR_REG"
+#define device_bar_BDK_IOBNX_BISTR_REG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_BISTR_REG(a) (a)
+#define arguments_BDK_IOBNX_BISTR_REG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_bists_reg
+ *
+ * IOBN BIST Status Register
+ * This register contains the result of the BIST run on the IOB sclk memories.
+ */
+union bdk_iobnx_bists_reg
+{
+ uint64_t u;
+ struct bdk_iobnx_bists_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t status : 11; /**< [ 10: 0](RO/H) Memory BIST status.
+ Internal:
+ \<10\> = irp0_flid_mem_status.
+ \<9\> = irp1_flid_mem_status.
+ \<8\> = icc0_xmc_fifo_ecc_bstatus.
+ \<7\> = icc1_xmc_fifo_ecc_bstatus.
+ \<6\> = icc_xmc_fifo_ecc_bstatus.
+ \<5\> = rsd_mem0_bstatus.
+ \<4\> = rsd_mem1_bstatus.
+ \<3\> = iop_breq_fifo0_bstatus.
+ \<2\> = iop_breq_fifo1_bstatus.
+ \<1\> = iop_breq_fifo2_bstatus.
+ \<0\> = iop_breq_fifo3_bstatus. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 11; /**< [ 10: 0](RO/H) Memory BIST status.
+ Internal:
+ \<10\> = irp0_flid_mem_status.
+ \<9\> = irp1_flid_mem_status.
+ \<8\> = icc0_xmc_fifo_ecc_bstatus.
+ \<7\> = icc1_xmc_fifo_ecc_bstatus.
+ \<6\> = icc_xmc_fifo_ecc_bstatus.
+ \<5\> = rsd_mem0_bstatus.
+ \<4\> = rsd_mem1_bstatus.
+ \<3\> = iop_breq_fifo0_bstatus.
+ \<2\> = iop_breq_fifo1_bstatus.
+ \<1\> = iop_breq_fifo2_bstatus.
+ \<0\> = iop_breq_fifo3_bstatus. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_bists_reg_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t status : 11; /**< [ 10: 0](RO/H) Memory BIST status.
+ Internal:
+ \<10\> = irp0_flid_mem_status.
+ \<9\> = 0.
+ \<8\> = icc0_xmc_fifo_ecc_bstatus.
+ \<7\> = 0 unused.
+ \<6\> = icc_xmc_fifo_ecc_bstatus.
+ \<5\> = rsd_mem0_bstatus.
+ \<4\> = 0 un used
+ \<3\> = iop_breq_fifo0_bstatus.
+ \<2\> = 0 Unused
+ \<1\> = iop_breq_fifo2_bstatus.
+ \<0\> = iop_breq_fifo3_bstatus. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 11; /**< [ 10: 0](RO/H) Memory BIST status.
+ Internal:
+ \<10\> = irp0_flid_mem_status.
+ \<9\> = 0.
+ \<8\> = icc0_xmc_fifo_ecc_bstatus.
+ \<7\> = 0 unused.
+ \<6\> = icc_xmc_fifo_ecc_bstatus.
+ \<5\> = rsd_mem0_bstatus.
+ \<4\> = 0 un used
+ \<3\> = iop_breq_fifo0_bstatus.
+ \<2\> = 0 Unused
+ \<1\> = iop_breq_fifo2_bstatus.
+ \<0\> = iop_breq_fifo3_bstatus. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_iobnx_bists_reg_s cn88xx; */
+ /* struct bdk_iobnx_bists_reg_s cn83xx; */
+};
+typedef union bdk_iobnx_bists_reg bdk_iobnx_bists_reg_t;
+
+static inline uint64_t BDK_IOBNX_BISTS_REG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_BISTS_REG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0005000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0005000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0005000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_BISTS_REG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_BISTS_REG(a) bdk_iobnx_bists_reg_t
+#define bustype_BDK_IOBNX_BISTS_REG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_BISTS_REG(a) "IOBNX_BISTS_REG"
+#define device_bar_BDK_IOBNX_BISTS_REG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_BISTS_REG(a) (a)
+#define arguments_BDK_IOBNX_BISTS_REG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_bp_test#
+ *
+ * INTERNAL: IOBN Backpressure Test Registers
+ */
+union bdk_iobnx_bp_testx
+{
+ uint64_t u;
+ struct bdk_iobnx_bp_testx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 8; /**< [ 63: 56](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+
+ IOBN()_BP_TEST(0) - INRF: Defined by iobn_defs::inrf_bp_test_t
+ \<63\> = TBD.
+ \<62\> = TBD.
+ \<61\> = TBD.
+ \<60\> = TBD.
+ \<59\> = TBD.
+ \<58\> = TBD.
+ \<57\> = TBD.
+ \<56\> = TBD.
+
+ IOBN()_BP_TEST(1) - INRM: Defined by iobn_defs::inrm_bp_test_t
+ \<63\> = Stall CMT processing for outbound LBK transactions
+ \<62\> = Stall CMT processing for outbound MSH transactions
+ \<61\> = omp_vcc_ret.
+ \<60\> = imi_dat_fif - Backpressure VCC return counters(OMP)
+ \<59\> = TBD.
+ \<58\> = TBD.
+ \<57\> = TBD.
+ \<56\> = TBD.
+
+ IOBN()_BP_TEST(2) - INRF: Defined by iobn_defs::inrf_bp_test_t
+ \<63\> = TBD.
+ \<62\> = TBD.
+ \<61\> = TBD.
+ \<60\> = TBD.
+ \<59\> = TBD.
+ \<58\> = TBD.
+ \<57\> = TBD.
+ \<56\> = TBD.
+
+ IOBN()_BP_TEST(3) - INRF: Defined by iobn_defs::inrm_bp_test_t
+ \<63\> = VCC - Victim DAT.
+ \<62\> = VCC - Victim REQ (CMD).
+ \<61\> = VCC - DAT (REQ/REQH).
+ \<60\> = VCC - CMD (REQ/RQH).
+ \<59\> = SLC - VCC.
+ \<58\> = SLC - ACK.
+ \<57\> = SLC - DAT.
+ \<56\> = SLC - CMD. */
+ uint64_t reserved_32_55 : 24;
+ uint64_t bp_cfg : 16; /**< [ 31: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<31:30\> = Config 7.
+ \<29:28\> = Config 6.
+ \<27:26\> = Config 5.
+ \<25:24\> = Config 4.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 16; /**< [ 31: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<31:30\> = Config 7.
+ \<29:28\> = Config 6.
+ \<27:26\> = Config 5.
+ \<25:24\> = Config 4.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_32_55 : 24;
+ uint64_t enable : 8; /**< [ 63: 56](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+
+ IOBN()_BP_TEST(0) - INRF: Defined by iobn_defs::inrf_bp_test_t
+ \<63\> = TBD.
+ \<62\> = TBD.
+ \<61\> = TBD.
+ \<60\> = TBD.
+ \<59\> = TBD.
+ \<58\> = TBD.
+ \<57\> = TBD.
+ \<56\> = TBD.
+
+ IOBN()_BP_TEST(1) - INRM: Defined by iobn_defs::inrm_bp_test_t
+ \<63\> = Stall CMT processing for outbound LBK transactions
+ \<62\> = Stall CMT processing for outbound MSH transactions
+ \<61\> = omp_vcc_ret.
+ \<60\> = imi_dat_fif - Backpressure VCC return counters(OMP)
+ \<59\> = TBD.
+ \<58\> = TBD.
+ \<57\> = TBD.
+ \<56\> = TBD.
+
+ IOBN()_BP_TEST(2) - INRF: Defined by iobn_defs::inrf_bp_test_t
+ \<63\> = TBD.
+ \<62\> = TBD.
+ \<61\> = TBD.
+ \<60\> = TBD.
+ \<59\> = TBD.
+ \<58\> = TBD.
+ \<57\> = TBD.
+ \<56\> = TBD.
+
+ IOBN()_BP_TEST(3) - INRF: Defined by iobn_defs::inrm_bp_test_t
+ \<63\> = VCC - Victim DAT.
+ \<62\> = VCC - Victim REQ (CMD).
+ \<61\> = VCC - DAT (REQ/REQH).
+ \<60\> = VCC - CMD (REQ/RQH).
+ \<59\> = SLC - VCC.
+ \<58\> = SLC - ACK.
+ \<57\> = SLC - DAT.
+ \<56\> = SLC - CMD. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_bp_testx_s cn; */
+};
+typedef union bdk_iobnx_bp_testx bdk_iobnx_bp_testx_t;
+
+static inline uint64_t BDK_IOBNX_BP_TESTX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_BP_TESTX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=3)))
+ return 0x87e0f0003800ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x3);
+ __bdk_csr_fatal("IOBNX_BP_TESTX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_BP_TESTX(a,b) bdk_iobnx_bp_testx_t
+#define bustype_BDK_IOBNX_BP_TESTX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_BP_TESTX(a,b) "IOBNX_BP_TESTX"
+#define device_bar_BDK_IOBNX_BP_TESTX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_BP_TESTX(a,b) (a)
+#define arguments_BDK_IOBNX_BP_TESTX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_cfg0
+ *
+ * IOBN General Configuration 0 Register
+ */
+union bdk_iobnx_cfg0
+{
+ uint64_t u;
+ struct bdk_iobnx_cfg0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_5_63 : 59;
+ uint64_t force_gibm_ncbi_clk_en : 1; /**< [ 4: 4](R/W) Force on GIBM NCBI clocks. For diagnostic use only. */
+ uint64_t force_immx_sclk_cond_clk_en : 1;/**< [ 3: 3](R/W) Force on IMMX clocks. For diagnostic use only. */
+ uint64_t force_inrm_sclk_cond_clk_en : 1;/**< [ 2: 2](R/W) Force on INRM clocks. For diagnostic use only. */
+ uint64_t force_inrf_sclk_cond_clk_en : 1;/**< [ 1: 1](R/W) Force on INRF clocks. For diagnostic use only. */
+ uint64_t force_ins_sclk_cond_clk_en : 1;/**< [ 0: 0](R/W) Force on INS clocks. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t force_ins_sclk_cond_clk_en : 1;/**< [ 0: 0](R/W) Force on INS clocks. For diagnostic use only. */
+ uint64_t force_inrf_sclk_cond_clk_en : 1;/**< [ 1: 1](R/W) Force on INRF clocks. For diagnostic use only. */
+ uint64_t force_inrm_sclk_cond_clk_en : 1;/**< [ 2: 2](R/W) Force on INRM clocks. For diagnostic use only. */
+ uint64_t force_immx_sclk_cond_clk_en : 1;/**< [ 3: 3](R/W) Force on IMMX clocks. For diagnostic use only. */
+ uint64_t force_gibm_ncbi_clk_en : 1; /**< [ 4: 4](R/W) Force on GIBM NCBI clocks. For diagnostic use only. */
+ uint64_t reserved_5_63 : 59;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_cfg0_s cn; */
+};
+typedef union bdk_iobnx_cfg0 bdk_iobnx_cfg0_t;
+
+static inline uint64_t BDK_IOBNX_CFG0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_CFG0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0002000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_CFG0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_CFG0(a) bdk_iobnx_cfg0_t
+#define bustype_BDK_IOBNX_CFG0(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_CFG0(a) "IOBNX_CFG0"
+#define device_bar_BDK_IOBNX_CFG0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_CFG0(a) (a)
+#define arguments_BDK_IOBNX_CFG0(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_cfg1
+ *
+ * IOBN General Configuration 1 Register
+ */
+union bdk_iobnx_cfg1
+{
+ uint64_t u;
+ struct bdk_iobnx_cfg1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_3_63 : 61;
+ uint64_t force_immx_rclk_cond_clk_en : 1;/**< [ 2: 2](R/W) For debug only. Must be zero during normal operation.
+ Internal:
+ Force the conditional clock active */
+ uint64_t force_inrm_rclk_cond_clk_en : 1;/**< [ 1: 1](R/W) For debug only. Must be zero during normal operation.
+ Internal:
+ Force the conditional clock active */
+ uint64_t force_inrf_rclk_cond_clk_en : 1;/**< [ 0: 0](R/W) For debug only. Must be zero during normal operation.
+ Internal:
+ Force the conditional clock active */
+#else /* Word 0 - Little Endian */
+ uint64_t force_inrf_rclk_cond_clk_en : 1;/**< [ 0: 0](R/W) For debug only. Must be zero during normal operation.
+ Internal:
+ Force the conditional clock active */
+ uint64_t force_inrm_rclk_cond_clk_en : 1;/**< [ 1: 1](R/W) For debug only. Must be zero during normal operation.
+ Internal:
+ Force the conditional clock active */
+ uint64_t force_immx_rclk_cond_clk_en : 1;/**< [ 2: 2](R/W) For debug only. Must be zero during normal operation.
+ Internal:
+ Force the conditional clock active */
+ uint64_t reserved_3_63 : 61;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_cfg1_s cn; */
+};
+typedef union bdk_iobnx_cfg1 bdk_iobnx_cfg1_t;
+
+static inline uint64_t BDK_IOBNX_CFG1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_CFG1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0002010ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_CFG1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_CFG1(a) bdk_iobnx_cfg1_t
+#define bustype_BDK_IOBNX_CFG1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_CFG1(a) "IOBNX_CFG1"
+#define device_bar_BDK_IOBNX_CFG1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_CFG1(a) (a)
+#define arguments_BDK_IOBNX_CFG1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_chip_cur_pwr
+ *
+ * INTERNAL: IOBN Chip Current Power Register
+ *
+ * For diagnostic use only.
+ * This register contains the current power setting.
+ * Only index zero (IOB(0)) is used.
+ */
+union bdk_iobnx_chip_cur_pwr
+{
+ uint64_t u;
+ struct bdk_iobnx_chip_cur_pwr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t current_power_setting : 8; /**< [ 7: 0](RO/H) Global throttling value currently being used. Throttling can force units (CPU cores, in
+ particular) idle for a portion of time, which will reduce power consumption. When
+ [CURRENT_POWER_SETTING] is equal to zero, the unit is idle most of the time and consumes
+ minimum power. When [CURRENT_POWER_SETTING] is equal to 0xFF, units are never idled to
+ reduce power. The hardware generally uses a [CURRENT_POWER_SETTING] value that is as large
+ as possible (in order to maximize performance) subject to the following constraints (in
+ priority order):
+ * PWR_MIN \<= [CURRENT_POWER_SETTING] \<= PWR_MAX.
+ * Power limits from the PWR_SETTING feedback control system.
+
+ In the case of the CPU cores, [CURRENT_POWER_SETTING] effectively limits the CP0
+ PowThrottle[POWLIM] value: effective POWLIM = MINIMUM([CURRENT_POWER_SETTING],
+ PowThrottle[POWLIM]) */
+#else /* Word 0 - Little Endian */
+ uint64_t current_power_setting : 8; /**< [ 7: 0](RO/H) Global throttling value currently being used. Throttling can force units (CPU cores, in
+ particular) idle for a portion of time, which will reduce power consumption. When
+ [CURRENT_POWER_SETTING] is equal to zero, the unit is idle most of the time and consumes
+ minimum power. When [CURRENT_POWER_SETTING] is equal to 0xFF, units are never idled to
+ reduce power. The hardware generally uses a [CURRENT_POWER_SETTING] value that is as large
+ as possible (in order to maximize performance) subject to the following constraints (in
+ priority order):
+ * PWR_MIN \<= [CURRENT_POWER_SETTING] \<= PWR_MAX.
+ * Power limits from the PWR_SETTING feedback control system.
+
+ In the case of the CPU cores, [CURRENT_POWER_SETTING] effectively limits the CP0
+ PowThrottle[POWLIM] value: effective POWLIM = MINIMUM([CURRENT_POWER_SETTING],
+ PowThrottle[POWLIM]) */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_chip_cur_pwr_s cn; */
+};
+typedef union bdk_iobnx_chip_cur_pwr bdk_iobnx_chip_cur_pwr_t;
+
+static inline uint64_t BDK_IOBNX_CHIP_CUR_PWR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_CHIP_CUR_PWR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f000a110ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f000a110ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f000a110ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_CHIP_CUR_PWR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_CHIP_CUR_PWR(a) bdk_iobnx_chip_cur_pwr_t
+#define bustype_BDK_IOBNX_CHIP_CUR_PWR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_CHIP_CUR_PWR(a) "IOBNX_CHIP_CUR_PWR"
+#define device_bar_BDK_IOBNX_CHIP_CUR_PWR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_CHIP_CUR_PWR(a) (a)
+#define arguments_BDK_IOBNX_CHIP_CUR_PWR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_chip_glb_pwr_throttle
+ *
+ * INTERNAL: IOBN Chip Global Power Throttle Register
+ *
+ * For diagnostic use only.
+ * This register controls the min/max power settings.
+ * Only index zero (IOB(0)) is used.
+ */
+union bdk_iobnx_chip_glb_pwr_throttle
+{
+ uint64_t u;
+ struct bdk_iobnx_chip_glb_pwr_throttle_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_34_63 : 30;
+ uint64_t pwr_bw : 2; /**< [ 33: 32](R/W) Configures the reaction time of the closed-loop feedback control system for the
+ AVG_CHIP_POWER power approximation. Higher numbers decrease bandwidth, reducing response
+ time, which could lead to greater tracking error, but reduce ringing. */
+ uint64_t pwr_max : 8; /**< [ 31: 24](R/W) Reserved. */
+ uint64_t pwr_min : 8; /**< [ 23: 16](R/W) Reserved. */
+ uint64_t pwr_setting : 16; /**< [ 15: 0](R/W) A power limiter for the chip. A limiter of the power consumption of the
+ chip. This power limiting is implemented by a closed-loop feedback control
+ system for the AVG_CHIP_POWER power approximation. The direct output of the
+ [PWR_SETTING] feedback control system is the CURRENT_POWER_SETTING value. The
+ power consumed by the chip (estimated currently by the AVG_CHIP_POWER value) is
+ an indirect output of the PWR_SETTING feedback control system. [PWR_SETTING] is
+ not used by the hardware when [PWR_MIN] equals [PWR_MAX]. [PWR_MIN] and
+ [PWR_MAX] threshold requirements always supersede [PWR_SETTING] limits. (For
+ maximum [PWR_SETTING] feedback control freedom, set [PWR_MIN]=0 and
+ [PWR_MAX]=0xff.)
+
+ [PWR_SETTING] equal to 0 forces the chip to consume near minimum
+ power. Increasing [PWR_SETTING] value from 0 to 0xFFFF increases the power that
+ the chip is allowed to consume linearly (roughly) from minimum to maximum. */
+#else /* Word 0 - Little Endian */
+ uint64_t pwr_setting : 16; /**< [ 15: 0](R/W) A power limiter for the chip. A limiter of the power consumption of the
+ chip. This power limiting is implemented by a closed-loop feedback control
+ system for the AVG_CHIP_POWER power approximation. The direct output of the
+ [PWR_SETTING] feedback control system is the CURRENT_POWER_SETTING value. The
+ power consumed by the chip (estimated currently by the AVG_CHIP_POWER value) is
+ an indirect output of the PWR_SETTING feedback control system. [PWR_SETTING] is
+ not used by the hardware when [PWR_MIN] equals [PWR_MAX]. [PWR_MIN] and
+ [PWR_MAX] threshold requirements always supersede [PWR_SETTING] limits. (For
+ maximum [PWR_SETTING] feedback control freedom, set [PWR_MIN]=0 and
+ [PWR_MAX]=0xff.)
+
+ [PWR_SETTING] equal to 0 forces the chip to consume near minimum
+ power. Increasing [PWR_SETTING] value from 0 to 0xFFFF increases the power that
+ the chip is allowed to consume linearly (roughly) from minimum to maximum. */
+ uint64_t pwr_min : 8; /**< [ 23: 16](R/W) Reserved. */
+ uint64_t pwr_max : 8; /**< [ 31: 24](R/W) Reserved. */
+ uint64_t pwr_bw : 2; /**< [ 33: 32](R/W) Configures the reaction time of the closed-loop feedback control system for the
+ AVG_CHIP_POWER power approximation. Higher numbers decrease bandwidth, reducing response
+ time, which could lead to greater tracking error, but reduce ringing. */
+ uint64_t reserved_34_63 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_chip_glb_pwr_throttle_s cn; */
+};
+typedef union bdk_iobnx_chip_glb_pwr_throttle bdk_iobnx_chip_glb_pwr_throttle_t;
+
+static inline uint64_t BDK_IOBNX_CHIP_GLB_PWR_THROTTLE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_CHIP_GLB_PWR_THROTTLE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f000a100ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f000a100ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f000a100ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_CHIP_GLB_PWR_THROTTLE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_CHIP_GLB_PWR_THROTTLE(a) bdk_iobnx_chip_glb_pwr_throttle_t
+#define bustype_BDK_IOBNX_CHIP_GLB_PWR_THROTTLE(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_CHIP_GLB_PWR_THROTTLE(a) "IOBNX_CHIP_GLB_PWR_THROTTLE"
+#define device_bar_BDK_IOBNX_CHIP_GLB_PWR_THROTTLE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_CHIP_GLB_PWR_THROTTLE(a) (a)
+#define arguments_BDK_IOBNX_CHIP_GLB_PWR_THROTTLE(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_chip_pwr_out
+ *
+ * IOBN Chip Power Out Register
+ * This register contains power numbers from the various partitions on the chip.
+ * Only index zero (IOB(0)) is used.
+ */
+union bdk_iobnx_chip_pwr_out
+{
+ uint64_t u;
+ struct bdk_iobnx_chip_pwr_out_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cpu_pwr : 16; /**< [ 63: 48](RO/H) An estimate of the current CPU core complex power consumption, including a sum
+ of all processor's AP_CVM_POWER_EL1[AVERAGE_POWER]. The CPU core complex
+ includes the caches and DRAM controller(s), as well as all CPU cores. Linearly
+ larger values indicate linearly higher power consumption. */
+ uint64_t chip_power : 16; /**< [ 47: 32](RO/H) An estimate of the current total power consumption by the chip. Linearly larger values
+ indicate linearly higher power consumption. [CHIP_POWER] is the sum of [CPU_PWR] and
+ [COPROC_POWER]. */
+ uint64_t coproc_power : 16; /**< [ 31: 16](RO/H) An estimate of the current coprocessor power consumption. Linearly larger values indicate
+ linearly higher power consumption. This estimate is energy per core clock, and will
+ generally decrease as the ratio of core to coprocessor clock speed increases. */
+ uint64_t avg_chip_power : 16; /**< [ 15: 0](RO/H) Average chip power.
+ An average of [CHIP_POWER], calculated using an IIR filter with an average
+ weight of 16K core clocks, in mA/GHz.
+
+ Accurate power numbers should be calculated using a platform-specific method which
+ e.g. reads the current consumption of the VRM.
+
+ Otherwise an approximation of this chip's power is calculated with:
+
+ _ power = chip_const + core_const * cores_powered_on + [AVG_CHIP_POWER] * voltage
+
+ Where:
+
+ _ power is in mW.
+
+ _ chip_const is in mW and represents the I/O power and chip excluding core_const.
+ This may vary as I/O and coprocessor loads vary, therefore only
+ platform methods can be used for accurate estimates.
+
+ _ core_const is a per-core constant leakage from the HRM power application note, and is in
+ mA.
+
+ _ cores_powered_on is a population count of all bits set in RST_PP_POWER.
+
+ _ voltage is determined by the platform, perhaps by reading a VRM setting. */
+#else /* Word 0 - Little Endian */
+ uint64_t avg_chip_power : 16; /**< [ 15: 0](RO/H) Average chip power.
+ An average of [CHIP_POWER], calculated using an IIR filter with an average
+ weight of 16K core clocks, in mA/GHz.
+
+ Accurate power numbers should be calculated using a platform-specific method which
+ e.g. reads the current consumption of the VRM.
+
+ Otherwise an approximation of this chip's power is calculated with:
+
+ _ power = chip_const + core_const * cores_powered_on + [AVG_CHIP_POWER] * voltage
+
+ Where:
+
+ _ power is in mW.
+
+ _ chip_const is in mW and represents the I/O power and chip excluding core_const.
+ This may vary as I/O and coprocessor loads vary, therefore only
+ platform methods can be used for accurate estimates.
+
+ _ core_const is a per-core constant leakage from the HRM power application note, and is in
+ mA.
+
+ _ cores_powered_on is a population count of all bits set in RST_PP_POWER.
+
+ _ voltage is determined by the platform, perhaps by reading a VRM setting. */
+ uint64_t coproc_power : 16; /**< [ 31: 16](RO/H) An estimate of the current coprocessor power consumption. Linearly larger values indicate
+ linearly higher power consumption. This estimate is energy per core clock, and will
+ generally decrease as the ratio of core to coprocessor clock speed increases. */
+ uint64_t chip_power : 16; /**< [ 47: 32](RO/H) An estimate of the current total power consumption by the chip. Linearly larger values
+ indicate linearly higher power consumption. [CHIP_POWER] is the sum of [CPU_PWR] and
+ [COPROC_POWER]. */
+ uint64_t cpu_pwr : 16; /**< [ 63: 48](RO/H) An estimate of the current CPU core complex power consumption, including a sum
+ of all processor's AP_CVM_POWER_EL1[AVERAGE_POWER]. The CPU core complex
+ includes the caches and DRAM controller(s), as well as all CPU cores. Linearly
+ larger values indicate linearly higher power consumption. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_chip_pwr_out_s cn; */
+};
+typedef union bdk_iobnx_chip_pwr_out bdk_iobnx_chip_pwr_out_t;
+
+static inline uint64_t BDK_IOBNX_CHIP_PWR_OUT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_CHIP_PWR_OUT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f000a108ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f000a108ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f000a108ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_CHIP_PWR_OUT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_CHIP_PWR_OUT(a) bdk_iobnx_chip_pwr_out_t
+#define bustype_BDK_IOBNX_CHIP_PWR_OUT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_CHIP_PWR_OUT(a) "IOBNX_CHIP_PWR_OUT"
+#define device_bar_BDK_IOBNX_CHIP_PWR_OUT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_CHIP_PWR_OUT(a) (a)
+#define arguments_BDK_IOBNX_CHIP_PWR_OUT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_cond_clk_cap#
+ *
+ * INTERNAL: IOBN Conditional Clock Capacitance Register
+ *
+ * This register is for diagnostic use only.
+ * Internal:
+ * Each index corresponds to a different net as follows:
+ * 0 = bgx0___bgx___bgx_clk___csclk_drv.
+ * 1 = bgx0___bgx___bgx_clk___ssclk_drv.
+ * 2 = bgx0___bgx___bgx_clk___gsclk_drv.
+ * 3 = bgx1___bgx___bgx_clk___csclk_drv.
+ * 4 = bgx1___bgx___bgx_clk___ssclk_drv.
+ * 5 = bgx1___bgx___bgx_clk___gsclk_drv.
+ * 6 = bgx2___bgx___bgx_clk___csclk_drv.
+ * 7 = bgx2___bgx___bgx_clk___ssclk_drv.
+ * 8 = bgx2___bgx___bgx_clk___gsclk_drv.
+ * 9 = bgx3___bgx___bgx_clk___csclk_drv.
+ * 10 = bgx3___bgx___bgx_clk___ssclk_drv.
+ * 11 = bgx3___bgx___bgx_clk___gsclk_drv.
+ * 12 = dpi___dpi___csclk_drv.
+ * 13 = fpa___fpa___gbl___csclk_drv.
+ * 14 = lbk___lbk___lbk_core_p0x0___csclk_drv.
+ * 15 = lbk___lbk___lbk_core_p0x1___csclk_drv.
+ * 16 = lbk___lbk___lbk_core_p1x0___csclk_drv.
+ * 17 = lbk___lbk___lbk_core_p1x1___csclk_drv.
+ * 18 = mio___mio___uaa0___u_csclk_drv.
+ * 19 = mio___mio___uaa1___u_csclk_drv.
+ * 20 = mio___mio___uaa2___u_csclk_drv.
+ * 21 = mio___mio___uaa3___u_csclk_drv.
+ * 22 = nic___nic___nic_l___nic_l1___nic_clk___csclk_drv.
+ * 23 = nic___nic___nic_l___nic_l2___nic_clk___csclk_drv.
+ * 24 = nic___nic___nic_u___nic_u1___nic_clk___csclk_drv.
+ * 25 = pem0___pem___pem_clks___csclk_drv.
+ * 26 = pem0___pem___pem_clks___sync_pwr_thr_pclk.
+ * 27 = pem1___pem___pem_clks___csclk_drv.
+ * 28 = pem1___pem___pem_clks___sync_pwr_thr_pclk.
+ * 29 = pem2___pem___pem_clks___csclk_drv.
+ * 30 = pem2___pem___pem_clks___sync_pwr_thr_pclk.
+ * 31 = pem3___pem___pem_clks___csclk_drv.
+ * 32 = pem3___pem___pem_clks___sync_pwr_thr_pclk.
+ * 33 = pki___pki___pdp___pfe___csclk_drv.
+ * 34 = pki___pki___pdp___pbe___csclk_drv.
+ * 35 = pki___pki___pix___ipec0___csclk_drv.
+ * 36 = pki___pki___pix___ipec1___csclk_drv.
+ * 37 = pki___pki___pix___mech___csclk_drv.
+ * 38 = roc_ocla___roc_ocla___core___clks___csclk_drv.
+ * 39 = rst___rst___mio_clk_ctl___csclk_drv.
+ * 40 = sata0___sata___u_csclk_drv.
+ * 41 = sata0___sata___u_csclk_drv.
+ * 42 = sata0___sata___u_csclk_drv.
+ * 43 = sata0___sata___u_csclk_drv.
+ * 44 = sata0___sata___u_csclk_drv.
+ * 45 = sata0___sata___u_csclk_drv.
+ * 46 = smmu___smmu___wcsr___gbl___crclk_drv.
+ * 47 = smmu___smmu___wcsr___gbl___u_c2rclk_drv.
+ * 48 = smmu___smmu___wcsr___gbl___u_c2rclk_drv_n.
+ * 49 = smmu___smmu___xl___ctl___crclk_drv.
+ * 50 = sso___sso___sso_pnr___sso_aw___clk___csclk_drv.
+ * 51 = sso___sso___sso_pnr___sso_gw___clk___csclk_drv.
+ * 52 = sso___sso___sso_pnr___sso_ws___clk___csclk_drv.
+ * 53 = usbdrd0___usbdrd_i___u_csclk_drv.
+ * 54 = usbdrd0___usbdrd_i___u_csclk_drv.
+ * 55 = zipc0___zipc___zipc_clk___zip_hash_csclk_drv.
+ * 56 = zipc0___zipc___zipc_clk___zip_history_csclk_drv.
+ * 57 = zipc0___zipc___zipc_clk___zip_state_csclk_drv.
+ * 58 = zipc0___zipc___zipc_clk___zip_sha_csclk_drv.
+ * 59 = zipc1___zipc___zipc_clk___zip_hash_csclk_drv.
+ * 60 = zipc1___zipc___zipc_clk___zip_history_csclk_drv.
+ * 61 = zipc1___zipc___zipc_clk___zip_state_csclk_drv.
+ * 62 = zipc1___zipc___zipc_clk___zip_sha_csclk_drv.
+ * 63 = zipc2___zipc___zipc_clk___zip_hash_csclk_drv.
+ * 64 = zipc2___zipc___zipc_clk___zip_history_csclk_drv.
+ * 65 = zipc2___zipc___zipc_clk___zip_state_csclk_drv.
+ * 66 = zipc2___zipc___zipc_clk___zip_sha_csclk_drv.
+ * 67 = zipd3___zipd___zipd_clk___zip_history_csclk_drv.
+ * 68 = zipd3___zipd___zipd_clk___zip_state_csclk_drv.
+ * 69 = zipd3___zipd___zipd_clk___zip_sha_csclk_drv.
+ * 70 = zipd4___zipd___zipd_clk___zip_history_csclk_drv.
+ * 71 = zipd4___zipd___zipd_clk___zip_state_csclk_drv.
+ * 72 = zipd4___zipd___zipd_clk___zip_sha_csclk_drv.
+ * 73 = zipd5___zipd___zipd_clk___zip_history_csclk_drv.
+ * 74 = zipd5___zipd___zipd_clk___zip_state_csclk_drv.
+ * 75 = zipd5___zipd___zipd_clk___zip_sha_csclk_drv.
+ */
+union bdk_iobnx_cond_clk_capx
+{
+ uint64_t u;
+ struct bdk_iobnx_cond_clk_capx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t cap : 16; /**< [ 15: 0](R/W) Conditional clock capacitance for drivers. (cap value * 0.9/128.)
+ For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t cap : 16; /**< [ 15: 0](R/W) Conditional clock capacitance for drivers. (cap value * 0.9/128.)
+ For diagnostic use only. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_cond_clk_capx_s cn; */
+};
+typedef union bdk_iobnx_cond_clk_capx bdk_iobnx_cond_clk_capx_t;
+
+static inline uint64_t BDK_IOBNX_COND_CLK_CAPX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_COND_CLK_CAPX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=75)))
+ return 0x87e0f000f000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x7f);
+ __bdk_csr_fatal("IOBNX_COND_CLK_CAPX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_COND_CLK_CAPX(a,b) bdk_iobnx_cond_clk_capx_t
+#define bustype_BDK_IOBNX_COND_CLK_CAPX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_COND_CLK_CAPX(a,b) "IOBNX_COND_CLK_CAPX"
+#define device_bar_BDK_IOBNX_COND_CLK_CAPX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_COND_CLK_CAPX(a,b) (a)
+#define arguments_BDK_IOBNX_COND_CLK_CAPX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_const
+ *
+ * IOBN Constant Registers
+ * This register returns discovery information.
+ */
+union bdk_iobnx_const
+{
+ uint64_t u;
+ struct bdk_iobnx_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t arbs : 8; /**< [ 23: 16](RO) Maximum number of grants on any NCB bus attached to this IOB. */
+ uint64_t ncbs : 8; /**< [ 15: 8](RO) Number of physical NCB busses attached to this IOB. */
+ uint64_t iobs : 8; /**< [ 7: 0](RO) Number of IOBs. */
+#else /* Word 0 - Little Endian */
+ uint64_t iobs : 8; /**< [ 7: 0](RO) Number of IOBs. */
+ uint64_t ncbs : 8; /**< [ 15: 8](RO) Number of physical NCB busses attached to this IOB. */
+ uint64_t arbs : 8; /**< [ 23: 16](RO) Maximum number of grants on any NCB bus attached to this IOB. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_const_s cn; */
+};
+typedef union bdk_iobnx_const bdk_iobnx_const_t;
+
+static inline uint64_t BDK_IOBNX_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0000000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_CONST(a) bdk_iobnx_const_t
+#define bustype_BDK_IOBNX_CONST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_CONST(a) "IOBNX_CONST"
+#define device_bar_BDK_IOBNX_CONST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_CONST(a) (a)
+#define arguments_BDK_IOBNX_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_core_bist_status
+ *
+ * IOBN Cores BIST Status Register
+ * This register contains the result of the BIST run on the cores.
+ */
+union bdk_iobnx_core_bist_status
+{
+ uint64_t u;
+ struct bdk_iobnx_core_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t core_bstat : 24; /**< [ 23: 0](RO/H) BIST status of the cores. IOBN0 contains the BIST status for the even numbered cores and
+ IOBN1 contains the BIST status for the odd numbered cores.
+
+ \<pre\>
+ BIT IOBN0 IOBN0 MASK BIT IOBN1 IOBN1 MASK BIT
+ [0] Core 0 \<0\> Core 1 \<1\>
+ [1] Core 2 \<2\> Core 3 \<3\>
+ ...
+ [23] Core 46 \<46\> Core 47 \<47\>
+ \</pre\>
+
+ Software must logically AND CORE_BSTAT bits with appropriate bits from RST_PP_AVAILABLE
+ before using them. The "IOBN0 MASK BIT" column in the table above shows the
+ RST_PP_AVAILABLE bits to use with IOBN0. The "IOBN1 MASK BIT" column in the
+ table above shows the RST_PP_AVAILABLE bits to use with IOBN1. */
+#else /* Word 0 - Little Endian */
+ uint64_t core_bstat : 24; /**< [ 23: 0](RO/H) BIST status of the cores. IOBN0 contains the BIST status for the even numbered cores and
+ IOBN1 contains the BIST status for the odd numbered cores.
+
+ \<pre\>
+ BIT IOBN0 IOBN0 MASK BIT IOBN1 IOBN1 MASK BIT
+ [0] Core 0 \<0\> Core 1 \<1\>
+ [1] Core 2 \<2\> Core 3 \<3\>
+ ...
+ [23] Core 46 \<46\> Core 47 \<47\>
+ \</pre\>
+
+ Software must logically AND CORE_BSTAT bits with appropriate bits from RST_PP_AVAILABLE
+ before using them. The "IOBN0 MASK BIT" column in the table above shows the
+ RST_PP_AVAILABLE bits to use with IOBN0. The "IOBN1 MASK BIT" column in the
+ table above shows the RST_PP_AVAILABLE bits to use with IOBN1. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_core_bist_status_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t core_bstat : 4; /**< [ 3: 0](RO/H) BIST status of the cores. IOBN0 contains the BIST status for the even numbered cores and
+ IOBN1 contains the BIST status for the odd numbered cores.
+
+ \<pre\>
+ BIT IOBN
+ [0] Core 0
+ [1] Core 1
+ [2] Core 2
+ [3] Core 3
+ \</pre\>
+
+ Software must bit-wise logical AND CORE_BSTAT with RST_PP_AVAILABLE before using it. */
+#else /* Word 0 - Little Endian */
+ uint64_t core_bstat : 4; /**< [ 3: 0](RO/H) BIST status of the cores. IOBN0 contains the BIST status for the even numbered cores and
+ IOBN1 contains the BIST status for the odd numbered cores.
+
+ \<pre\>
+ BIT IOBN
+ [0] Core 0
+ [1] Core 1
+ [2] Core 2
+ [3] Core 3
+ \</pre\>
+
+ Software must bit-wise logical AND CORE_BSTAT with RST_PP_AVAILABLE before using it. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_iobnx_core_bist_status_s cn88xx; */
+ struct bdk_iobnx_core_bist_status_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t core_bstat : 24; /**< [ 23: 0](RO/H) BIST status of the cores. IOBN0 contains the BIST status for all cores. IOBN1
+ always returns 0x0.
+
+ Software must bit-wise logical and CORE_BSTAT with RST_PP_AVAILABLE before using
+ it. */
+#else /* Word 0 - Little Endian */
+ uint64_t core_bstat : 24; /**< [ 23: 0](RO/H) BIST status of the cores. IOBN0 contains the BIST status for all cores. IOBN1
+ always returns 0x0.
+
+ Software must bit-wise logical and CORE_BSTAT with RST_PP_AVAILABLE before using
+ it. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_iobnx_core_bist_status bdk_iobnx_core_bist_status_t;
+
+static inline uint64_t BDK_IOBNX_CORE_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_CORE_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0005008ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0005008ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0005008ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_CORE_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_CORE_BIST_STATUS(a) bdk_iobnx_core_bist_status_t
+#define bustype_BDK_IOBNX_CORE_BIST_STATUS(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_CORE_BIST_STATUS(a) "IOBNX_CORE_BIST_STATUS"
+#define device_bar_BDK_IOBNX_CORE_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_CORE_BIST_STATUS(a) (a)
+#define arguments_BDK_IOBNX_CORE_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_dis_ncbi_io
+ *
+ * IOBN Disable NCBI IO Register
+ * IOBN control.
+ */
+union bdk_iobnx_dis_ncbi_io
+{
+ uint64_t u;
+ struct bdk_iobnx_dis_ncbi_io_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t sli_key_mem : 1; /**< [ 5: 5](R/W) SLI KEY memory isolation.
+ 0 = SLI initiated requests are allowed.
+ 1 = SLI initiated read and write requests are allowed to
+ KEY_MEM(0..2047) (e.g. 16KB, not all of KEY_MEM) only.
+ SLI requests to any other address (non-KEY_MEM(0..2047))
+ will be redirected to ECAM0_NOP_ZF. */
+ uint64_t sli_off : 1; /**< [ 4: 4](R/W) SLI isolation.
+ Resets to one in trusted mode, else zero.
+ 0 = Operation of NCBI transactions are not impacted.
+ 1 = NCBI transactions initiating at the SLI are disabled and
+ turn into accesses to ECAM0_NOP_ZF. When set, this bit
+ overrides [SLI_KEY_MEM]. */
+ uint64_t tlb_sync_dis : 1; /**< [ 3: 3](R/W) When set the IOBN will return SYNC-RDY to the SMMU without waiting for
+ outstanding request to receive responses. */
+ uint64_t oci_key_only : 1; /**< [ 2: 2](RO) Restrict CCPI-sourced I/O write requests.
+
+ 0 = CCPI-sourced I/O read and write requests are allowed to any device through
+ IOB, including allowing read/writes to all of KEY_MEM().
+
+ 1 = CCPI-sourced I/O write requests allowed to KEY_MEM(0..2047) (e.g. 16KB, not
+ all of KEY_MEM) only. CCPI-sourced writes to __any__ other address
+ (non-KEY_MEM(0..2047)), or any CCPI-source read will be redirected to
+ ECAM0_NOP_ZF (for non-ECAM) or ECAM0_NOP_ONNF (for-ECAM).
+
+ This setting does not affect local-node originated traffic.
+
+ In pass 1, read-only. */
+ uint64_t all_gic : 1; /**< [ 1: 1](R/W) All-to-GIC. For diagnostic use only.
+ Internal:
+ 0 = Normal operation. NCBI traffic to GIC interrupt delivery registers will be ordered
+ with other interrupt delivery traffic and over the RIB bus. NCBI traffic to normal non-
+ interrupt-delivery GIC registers will go via RSL.
+ 1 = All NCBI traffic to the GIC DID will be assumed to be interrupt delivery traffic.
+ This will break NCBI write transactions to non-interrupt-delivery GIC registers, but may
+ work around bugs whereby interrupt-delivery CSRs are mis-catagorized inside IOB. */
+ uint64_t ncbi_off : 1; /**< [ 0: 0](R/W) When set NCBI translation to I/O space (with exception of GIC traffic) will be disabled.
+ Disabled traffic will turn into access to ECAM0_NOP_ZF. */
+#else /* Word 0 - Little Endian */
+ uint64_t ncbi_off : 1; /**< [ 0: 0](R/W) When set NCBI translation to I/O space (with exception of GIC traffic) will be disabled.
+ Disabled traffic will turn into access to ECAM0_NOP_ZF. */
+ uint64_t all_gic : 1; /**< [ 1: 1](R/W) All-to-GIC. For diagnostic use only.
+ Internal:
+ 0 = Normal operation. NCBI traffic to GIC interrupt delivery registers will be ordered
+ with other interrupt delivery traffic and over the RIB bus. NCBI traffic to normal non-
+ interrupt-delivery GIC registers will go via RSL.
+ 1 = All NCBI traffic to the GIC DID will be assumed to be interrupt delivery traffic.
+ This will break NCBI write transactions to non-interrupt-delivery GIC registers, but may
+ work around bugs whereby interrupt-delivery CSRs are mis-catagorized inside IOB. */
+ uint64_t oci_key_only : 1; /**< [ 2: 2](RO) Restrict CCPI-sourced I/O write requests.
+
+ 0 = CCPI-sourced I/O read and write requests are allowed to any device through
+ IOB, including allowing read/writes to all of KEY_MEM().
+
+ 1 = CCPI-sourced I/O write requests allowed to KEY_MEM(0..2047) (e.g. 16KB, not
+ all of KEY_MEM) only. CCPI-sourced writes to __any__ other address
+ (non-KEY_MEM(0..2047)), or any CCPI-source read will be redirected to
+ ECAM0_NOP_ZF (for non-ECAM) or ECAM0_NOP_ONNF (for-ECAM).
+
+ This setting does not affect local-node originated traffic.
+
+ In pass 1, read-only. */
+ uint64_t tlb_sync_dis : 1; /**< [ 3: 3](R/W) When set the IOBN will return SYNC-RDY to the SMMU without waiting for
+ outstanding request to receive responses. */
+ uint64_t sli_off : 1; /**< [ 4: 4](R/W) SLI isolation.
+ Resets to one in trusted mode, else zero.
+ 0 = Operation of NCBI transactions are not impacted.
+ 1 = NCBI transactions initiating at the SLI are disabled and
+ turn into accesses to ECAM0_NOP_ZF. When set, this bit
+ overrides [SLI_KEY_MEM]. */
+ uint64_t sli_key_mem : 1; /**< [ 5: 5](R/W) SLI KEY memory isolation.
+ 0 = SLI initiated requests are allowed.
+ 1 = SLI initiated read and write requests are allowed to
+ KEY_MEM(0..2047) (e.g. 16KB, not all of KEY_MEM) only.
+ SLI requests to any other address (non-KEY_MEM(0..2047))
+ will be redirected to ECAM0_NOP_ZF. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_dis_ncbi_io_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t tlb_sync_dis : 1; /**< [ 3: 3](R/W) When set the IOBN will return SYNC-RDY to the SMMU without waiting for
+ outstanding request to receive responses. */
+ uint64_t oci_key_only : 1; /**< [ 2: 2](RO) Restrict CCPI-sourced I/O write requests.
+
+ 0 = CCPI-sourced I/O read and write requests are allowed to any device through
+ IOB, including allowing read/writes to all of KEY_MEM().
+
+ 1 = CCPI-sourced I/O write requests allowed to KEY_MEM(0..2047) (e.g. 16KB, not
+ all of KEY_MEM) only. CCPI-sourced writes to __any__ other address
+ (non-KEY_MEM(0..2047)), or any CCPI-source read will be redirected to
+ ECAM0_NOP_ZF (for non-ECAM) or ECAM0_NOP_ONNF (for-ECAM).
+
+ This setting does not affect local-node originated traffic.
+
+ In pass 1, read-only. */
+ uint64_t all_gic : 1; /**< [ 1: 1](R/W) All-to-GIC. For diagnostic use only.
+ Internal:
+ 0 = Normal operation. NCBI traffic to GIC interrupt delivery registers will be ordered
+ with other interrupt delivery traffic and over the RIB bus. NCBI traffic to normal non-
+ interrupt-delivery GIC registers will go via RSL.
+ 1 = All NCBI traffic to the GIC DID will be assumed to be interrupt delivery traffic.
+ This will break NCBI write transactions to non-interrupt-delivery GIC registers, but may
+ work around bugs whereby interrupt-delivery CSRs are mis-catagorized inside IOB. */
+ uint64_t ncbi_off : 1; /**< [ 0: 0](R/W) When set NCBI translation to I/O space (with exception of GIC traffic) will be disabled.
+ Disabled traffic will turn into access to ECAM0_NOP_ZF. */
+#else /* Word 0 - Little Endian */
+ uint64_t ncbi_off : 1; /**< [ 0: 0](R/W) When set NCBI translation to I/O space (with exception of GIC traffic) will be disabled.
+ Disabled traffic will turn into access to ECAM0_NOP_ZF. */
+ uint64_t all_gic : 1; /**< [ 1: 1](R/W) All-to-GIC. For diagnostic use only.
+ Internal:
+ 0 = Normal operation. NCBI traffic to GIC interrupt delivery registers will be ordered
+ with other interrupt delivery traffic and over the RIB bus. NCBI traffic to normal non-
+ interrupt-delivery GIC registers will go via RSL.
+ 1 = All NCBI traffic to the GIC DID will be assumed to be interrupt delivery traffic.
+ This will break NCBI write transactions to non-interrupt-delivery GIC registers, but may
+ work around bugs whereby interrupt-delivery CSRs are mis-catagorized inside IOB. */
+ uint64_t oci_key_only : 1; /**< [ 2: 2](RO) Restrict CCPI-sourced I/O write requests.
+
+ 0 = CCPI-sourced I/O read and write requests are allowed to any device through
+ IOB, including allowing read/writes to all of KEY_MEM().
+
+ 1 = CCPI-sourced I/O write requests allowed to KEY_MEM(0..2047) (e.g. 16KB, not
+ all of KEY_MEM) only. CCPI-sourced writes to __any__ other address
+ (non-KEY_MEM(0..2047)), or any CCPI-source read will be redirected to
+ ECAM0_NOP_ZF (for non-ECAM) or ECAM0_NOP_ONNF (for-ECAM).
+
+ This setting does not affect local-node originated traffic.
+
+ In pass 1, read-only. */
+ uint64_t tlb_sync_dis : 1; /**< [ 3: 3](R/W) When set the IOBN will return SYNC-RDY to the SMMU without waiting for
+ outstanding request to receive responses. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_iobnx_dis_ncbi_io_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t tlb_sync_dis : 1; /**< [ 3: 3](R/W) When set the IOBN will return SYNC-RDY to the SMMU without waiting for
+ outstanding request to receive responses. */
+ uint64_t oci_key_only : 1; /**< [ 2: 2](R/W) Restrict CCPI-sourced I/O write requests.
+
+ 0 = CCPI-sourced I/O read and write requests are allowed to any device through
+ IOB, including allowing read/writes to all of KEY_MEM().
+
+ 1 = CCPI-sourced I/O write requests allowed to KEY_MEM(0..2047) (e.g. 16KB, not
+ all of KEY_MEM) only. CCPI-sourced writes to __any__ other address
+ (non-KEY_MEM(0..2047)), or any CCPI-source read will be redirected to
+ ECAM0_NOP_ZF (for non-ECAM) or ECAM0_NOP_ONNF (for-ECAM).
+
+ This setting does not affect local-node originated traffic.
+
+ In pass 1, read-only. */
+ uint64_t all_gic : 1; /**< [ 1: 1](R/W) All-to-GIC. For diagnostic use only.
+ Internal:
+ 0 = Normal operation. NCBI traffic to GIC interrupt delivery registers will be ordered
+ with other interrupt delivery traffic and over the RIB bus. NCBI traffic to normal non-
+ interrupt-delivery GIC registers will go via RSL.
+ 1 = All NCBI traffic to the GIC DID will be assumed to be interrupt delivery traffic.
+ This will break NCBI write transactions to non-interrupt-delivery GIC registers, but may
+ work around bugs whereby interrupt-delivery CSRs are mis-catagorized inside IOB. */
+ uint64_t ncbi_off : 1; /**< [ 0: 0](R/W) When set NCBI translation to I/O space (with exception of GIC traffic) will be disabled.
+ Disabled traffic will turn into access to ECAM0_NOP_ZF. */
+#else /* Word 0 - Little Endian */
+ uint64_t ncbi_off : 1; /**< [ 0: 0](R/W) When set NCBI translation to I/O space (with exception of GIC traffic) will be disabled.
+ Disabled traffic will turn into access to ECAM0_NOP_ZF. */
+ uint64_t all_gic : 1; /**< [ 1: 1](R/W) All-to-GIC. For diagnostic use only.
+ Internal:
+ 0 = Normal operation. NCBI traffic to GIC interrupt delivery registers will be ordered
+ with other interrupt delivery traffic and over the RIB bus. NCBI traffic to normal non-
+ interrupt-delivery GIC registers will go via RSL.
+ 1 = All NCBI traffic to the GIC DID will be assumed to be interrupt delivery traffic.
+ This will break NCBI write transactions to non-interrupt-delivery GIC registers, but may
+ work around bugs whereby interrupt-delivery CSRs are mis-catagorized inside IOB. */
+ uint64_t oci_key_only : 1; /**< [ 2: 2](R/W) Restrict CCPI-sourced I/O write requests.
+
+ 0 = CCPI-sourced I/O read and write requests are allowed to any device through
+ IOB, including allowing read/writes to all of KEY_MEM().
+
+ 1 = CCPI-sourced I/O write requests allowed to KEY_MEM(0..2047) (e.g. 16KB, not
+ all of KEY_MEM) only. CCPI-sourced writes to __any__ other address
+ (non-KEY_MEM(0..2047)), or any CCPI-source read will be redirected to
+ ECAM0_NOP_ZF (for non-ECAM) or ECAM0_NOP_ONNF (for-ECAM).
+
+ This setting does not affect local-node originated traffic.
+
+ In pass 1, read-only. */
+ uint64_t tlb_sync_dis : 1; /**< [ 3: 3](R/W) When set the IOBN will return SYNC-RDY to the SMMU without waiting for
+ outstanding request to receive responses. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_iobnx_dis_ncbi_io_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t sli_key_mem : 1; /**< [ 5: 5](R/W) SLI KEY memory isolation.
+ 0 = SLI initiated requests are allowed.
+ 1 = SLI initiated read and write requests are allowed to
+ KEY_MEM(0..2047) (e.g. 16KB, not all of KEY_MEM) only.
+ SLI requests to any other address (non-KEY_MEM(0..2047))
+ will be redirected to ECAM0_NOP_ZF. */
+ uint64_t sli_off : 1; /**< [ 4: 4](R/W) SLI isolation.
+ Resets to one in trusted mode, else zero.
+ 0 = Operation of NCBI transactions are not impacted.
+ 1 = NCBI transactions initiating at the SLI are disabled and
+ turn into accesses to ECAM0_NOP_ZF. When set, this bit
+ overrides [SLI_KEY_MEM]. */
+ uint64_t tlb_sync_dis : 1; /**< [ 3: 3](R/W) When set the IOBN will return SYNC-RDY to the SMMU without waiting for
+ outstanding request to receive responses. */
+ uint64_t oci_key_only : 1; /**< [ 2: 2](R/W) Restrict CCPI-sourced I/O write requests.
+
+ 0 = CCPI-sourced I/O read and write requests are allowed to any device through
+ IOB, including allowing read/writes to all of KEY_MEM().
+
+ 1 = CCPI-sourced I/O write requests allowed to KEY_MEM(0..2047) (e.g. 16KB, not
+ all of KEY_MEM) only. CCPI-sourced writes to __any__ other address
+ (non-KEY_MEM(0..2047)), or any CCPI-source read will be redirected to
+ ECAM0_NOP_ZF (for non-ECAM) or ECAM0_NOP_ONNF (for-ECAM).
+
+ This setting does not affect local-node originated traffic.
+
+ In pass 1, read-only. */
+ uint64_t all_gic : 1; /**< [ 1: 1](R/W) All-to-GIC. For diagnostic use only.
+ Internal:
+ 0 = Normal operation. NCBI traffic to GIC interrupt delivery registers will be ordered
+ with other interrupt delivery traffic and over the RIB bus. NCBI traffic to normal non-
+ interrupt-delivery GIC registers will go via RSL.
+ 1 = All NCBI traffic to the GIC DID will be assumed to be interrupt delivery traffic.
+ This will break NCBI write transactions to non-interrupt-delivery GIC registers, but may
+ work around bugs whereby interrupt-delivery CSRs are mis-catagorized inside IOB. */
+ uint64_t ncbi_off : 1; /**< [ 0: 0](R/W) When set NCBI translation to I/O space (with exception of GIC traffic) will be disabled.
+ Disabled traffic will turn into access to ECAM0_NOP_ZF. */
+#else /* Word 0 - Little Endian */
+ uint64_t ncbi_off : 1; /**< [ 0: 0](R/W) When set NCBI translation to I/O space (with exception of GIC traffic) will be disabled.
+ Disabled traffic will turn into access to ECAM0_NOP_ZF. */
+ uint64_t all_gic : 1; /**< [ 1: 1](R/W) All-to-GIC. For diagnostic use only.
+ Internal:
+ 0 = Normal operation. NCBI traffic to GIC interrupt delivery registers will be ordered
+ with other interrupt delivery traffic and over the RIB bus. NCBI traffic to normal non-
+ interrupt-delivery GIC registers will go via RSL.
+ 1 = All NCBI traffic to the GIC DID will be assumed to be interrupt delivery traffic.
+ This will break NCBI write transactions to non-interrupt-delivery GIC registers, but may
+ work around bugs whereby interrupt-delivery CSRs are mis-catagorized inside IOB. */
+ uint64_t oci_key_only : 1; /**< [ 2: 2](R/W) Restrict CCPI-sourced I/O write requests.
+
+ 0 = CCPI-sourced I/O read and write requests are allowed to any device through
+ IOB, including allowing read/writes to all of KEY_MEM().
+
+ 1 = CCPI-sourced I/O write requests allowed to KEY_MEM(0..2047) (e.g. 16KB, not
+ all of KEY_MEM) only. CCPI-sourced writes to __any__ other address
+ (non-KEY_MEM(0..2047)), or any CCPI-source read will be redirected to
+ ECAM0_NOP_ZF (for non-ECAM) or ECAM0_NOP_ONNF (for-ECAM).
+
+ This setting does not affect local-node originated traffic.
+
+ In pass 1, read-only. */
+ uint64_t tlb_sync_dis : 1; /**< [ 3: 3](R/W) When set the IOBN will return SYNC-RDY to the SMMU without waiting for
+ outstanding request to receive responses. */
+ uint64_t sli_off : 1; /**< [ 4: 4](R/W) SLI isolation.
+ Resets to one in trusted mode, else zero.
+ 0 = Operation of NCBI transactions are not impacted.
+ 1 = NCBI transactions initiating at the SLI are disabled and
+ turn into accesses to ECAM0_NOP_ZF. When set, this bit
+ overrides [SLI_KEY_MEM]. */
+ uint64_t sli_key_mem : 1; /**< [ 5: 5](R/W) SLI KEY memory isolation.
+ 0 = SLI initiated requests are allowed.
+ 1 = SLI initiated read and write requests are allowed to
+ KEY_MEM(0..2047) (e.g. 16KB, not all of KEY_MEM) only.
+ SLI requests to any other address (non-KEY_MEM(0..2047))
+ will be redirected to ECAM0_NOP_ZF. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } cn83xx;
+ /* struct bdk_iobnx_dis_ncbi_io_cn81xx cn88xxp2; */
+};
+typedef union bdk_iobnx_dis_ncbi_io bdk_iobnx_dis_ncbi_io_t;
+
+static inline uint64_t BDK_IOBNX_DIS_NCBI_IO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_DIS_NCBI_IO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0003000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0003000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0003000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_DIS_NCBI_IO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_DIS_NCBI_IO(a) bdk_iobnx_dis_ncbi_io_t
+#define bustype_BDK_IOBNX_DIS_NCBI_IO(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_DIS_NCBI_IO(a) "IOBNX_DIS_NCBI_IO"
+#define device_bar_BDK_IOBNX_DIS_NCBI_IO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_DIS_NCBI_IO(a) (a)
+#define arguments_BDK_IOBNX_DIS_NCBI_IO(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_dll
+ *
+ * INTERNAL: IOBN Core-Clock DLL Status Register
+ *
+ * Status of the CCU core-clock DLL. For diagnostic use only.
+ */
+union bdk_iobnx_dll
+{
+ uint64_t u;
+ struct bdk_iobnx_dll_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t dbg_window : 3; /**< [ 34: 32](R/W/H) Defines a debug window, during which the DLL settings and the phase detector
+ outputs will be monitored. The min and the max DLL setting during that window is
+ going to be reported as well as any illegal phase detector outputs. Every write
+ to the [DBG_WINDOW] resets [ILLEGAL_PD_REVERSED], [ILLEGAL_PD_LATE],
+ [ILLEGAL_PD_EARLY], [MAX_DLL_SETTING] and [MIN_DLL_SETTING]. The debug window
+ will correspond to the following number of rclk cycles based on the [DBG_WINDOW]
+ value.
+ 0x0 = Indefinetly.
+ 0x1 = 2 ^ 8 core clock cycles.
+ 0x2 = 2 ^ 12 core clock cycles.
+ 0x3 = 2 ^ 16 core clock cycles.
+ 0x4 = 2 ^ 20 core clock cycles.
+ 0x5 = 2 ^ 24 core clock cycles.
+ 0x6 = 2 ^ 28 core clock cycles.
+ 0x7 = 2 ^ 32 core clock cycles. */
+ uint64_t dbg_window_done : 1; /**< [ 31: 31](RO/H) Indicates if the debug window set by [DBG_WINDOW] is completed. */
+ uint64_t illegal_pd_reversed : 1; /**< [ 30: 30](RO/H) clk_fast_rgt and clk_fast_lft outputs of the phase detector had concurrently an
+ illegal reading during the last debug window set by [DBG_WINDOW]. */
+ uint64_t illegal_pd_late : 1; /**< [ 29: 29](RO/H) clk_fast_rgt output of the phase detector had an illegal reading (1) during the
+ last debug window set by [DBG_WINDOW]. */
+ uint64_t illegal_pd_early : 1; /**< [ 28: 28](RO/H) clk_fast_lft output of the phase detector had an illegal reading (0) during the
+ last debug window set by [DBG_WINDOW]. */
+ uint64_t reserved_27 : 1;
+ uint64_t max_dll_setting : 7; /**< [ 26: 20](RO/H) Max reported DLL setting during the last debug window set by [DBG_WINDOW]. */
+ uint64_t reserved_19 : 1;
+ uint64_t min_dll_setting : 7; /**< [ 18: 12](RO/H) Min reported DLL setting during the last debug window set by [DBG_WINDOW]. */
+ uint64_t pd_out : 3; /**< [ 11: 9](RO/H) Synchronized output from CCU phase detector:
+ \<11\> = clk_fast_mid.
+ \<10\> = clk_fast_lft.
+ \<9\> = clk_fast_rgt. */
+ uint64_t dll_lock : 1; /**< [ 8: 8](RO/H) The dll_lock signal from ROC core-clock DLL, from the positive edge of refclk. */
+ uint64_t reserved_7 : 1;
+ uint64_t dll_setting : 7; /**< [ 6: 0](RO/H) The ROC core-clock DLL setting, from the negative edge of refclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t dll_setting : 7; /**< [ 6: 0](RO/H) The ROC core-clock DLL setting, from the negative edge of refclk. */
+ uint64_t reserved_7 : 1;
+ uint64_t dll_lock : 1; /**< [ 8: 8](RO/H) The dll_lock signal from ROC core-clock DLL, from the positive edge of refclk. */
+ uint64_t pd_out : 3; /**< [ 11: 9](RO/H) Synchronized output from CCU phase detector:
+ \<11\> = clk_fast_mid.
+ \<10\> = clk_fast_lft.
+ \<9\> = clk_fast_rgt. */
+ uint64_t min_dll_setting : 7; /**< [ 18: 12](RO/H) Min reported DLL setting during the last debug window set by [DBG_WINDOW]. */
+ uint64_t reserved_19 : 1;
+ uint64_t max_dll_setting : 7; /**< [ 26: 20](RO/H) Max reported DLL setting during the last debug window set by [DBG_WINDOW]. */
+ uint64_t reserved_27 : 1;
+ uint64_t illegal_pd_early : 1; /**< [ 28: 28](RO/H) clk_fast_lft output of the phase detector had an illegal reading (0) during the
+ last debug window set by [DBG_WINDOW]. */
+ uint64_t illegal_pd_late : 1; /**< [ 29: 29](RO/H) clk_fast_rgt output of the phase detector had an illegal reading (1) during the
+ last debug window set by [DBG_WINDOW]. */
+ uint64_t illegal_pd_reversed : 1; /**< [ 30: 30](RO/H) clk_fast_rgt and clk_fast_lft outputs of the phase detector had concurrently an
+ illegal reading during the last debug window set by [DBG_WINDOW]. */
+ uint64_t dbg_window_done : 1; /**< [ 31: 31](RO/H) Indicates if the debug window set by [DBG_WINDOW] is completed. */
+ uint64_t dbg_window : 3; /**< [ 34: 32](R/W/H) Defines a debug window, during which the DLL settings and the phase detector
+ outputs will be monitored. The min and the max DLL setting during that window is
+ going to be reported as well as any illegal phase detector outputs. Every write
+ to the [DBG_WINDOW] resets [ILLEGAL_PD_REVERSED], [ILLEGAL_PD_LATE],
+ [ILLEGAL_PD_EARLY], [MAX_DLL_SETTING] and [MIN_DLL_SETTING]. The debug window
+ will correspond to the following number of rclk cycles based on the [DBG_WINDOW]
+ value.
+ 0x0 = Indefinetly.
+ 0x1 = 2 ^ 8 core clock cycles.
+ 0x2 = 2 ^ 12 core clock cycles.
+ 0x3 = 2 ^ 16 core clock cycles.
+ 0x4 = 2 ^ 20 core clock cycles.
+ 0x5 = 2 ^ 24 core clock cycles.
+ 0x6 = 2 ^ 28 core clock cycles.
+ 0x7 = 2 ^ 32 core clock cycles. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_dll_s cn; */
+};
+typedef union bdk_iobnx_dll bdk_iobnx_dll_t;
+
+static inline uint64_t BDK_IOBNX_DLL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_DLL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0003040ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_DLL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_DLL(a) bdk_iobnx_dll_t
+#define bustype_BDK_IOBNX_DLL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_DLL(a) "IOBNX_DLL"
+#define device_bar_BDK_IOBNX_DLL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_DLL(a) (a)
+#define arguments_BDK_IOBNX_DLL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_dom#_bus#_streams
+ *
+ * IOBN Domain Bus Permit Registers
+ * This register sets the permissions for a NCBI transaction (which are DMA
+ * transactions or MSI-X writes), for requests for NCB device virtual-functions
+ * and bridges.
+ *
+ * Index {b} corresponds to the stream's domain (stream_id\<21:16\>).
+ *
+ * Index {c} corresponds to the stream's bus number (stream_id\<15:8\>).
+ *
+ * For each combination of index {b} and {c}, each index {a} (the IOB number) must be
+ * programmed to the same value.
+ *
+ * Streams which hit index {c}=0x0 are also affected by IOBN()_DOM()_DEV()_STREAMS.
+ * Streams which hit index {b}=PCC_DEV_CON_E::MRML\<21:16\>,
+ * {c}=PCC_DEV_CON_E::MRML\<15:8\> are also affected by IOBN()_RSL()_STREAMS.
+ * Both of those alternative registers provide better granularity, so those indices
+ * into this register should be left permissive (value of 0x0).
+ */
+union bdk_iobnx_domx_busx_streams
+{
+ uint64_t u;
+ struct bdk_iobnx_domx_busx_streams_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t strm_nsec : 1; /**< [ 1: 1](SR/W) Stream nonsecure.
+
+ 0 = The device's stream ID is marked secure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use secure world
+ lookup. The SMMU may, if properly configured, generate an outgoing physical
+ address that is secure.
+
+ 1 = The device's stream ID is marked nonsecure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use nonsecure world
+ lookup. The SMMU outgoing physical address will be nonsecure.
+
+ [STRM_NSEC] is ignored if the device is making a physical request (as these
+ transactions bypass the SMMU translation process).
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+ uint64_t phys_nsec : 1; /**< [ 0: 0](SR/W) Physical nonsecure.
+ 0 = When the device makes a physical request, IOB will use the device's
+ requested secure bit to determine if the request to DRAM/LLC is secure or not.
+ 1 = When the device makes a physical request, IOB will squash the
+ device's secure request and issue the request to DRAM/LLC as nonsecure.
+
+ Ignored if a device makes a non-physical request. (As non-physical requests
+ cause the SMMU to generate the SMMU-outgoing secure bit based on the SMMU
+ translation process, including [STRM_NSEC].)
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+#else /* Word 0 - Little Endian */
+ uint64_t phys_nsec : 1; /**< [ 0: 0](SR/W) Physical nonsecure.
+ 0 = When the device makes a physical request, IOB will use the device's
+ requested secure bit to determine if the request to DRAM/LLC is secure or not.
+ 1 = When the device makes a physical request, IOB will squash the
+ device's secure request and issue the request to DRAM/LLC as nonsecure.
+
+ Ignored if a device makes a non-physical request. (As non-physical requests
+ cause the SMMU to generate the SMMU-outgoing secure bit based on the SMMU
+ translation process, including [STRM_NSEC].)
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+ uint64_t strm_nsec : 1; /**< [ 1: 1](SR/W) Stream nonsecure.
+
+ 0 = The device's stream ID is marked secure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use secure world
+ lookup. The SMMU may, if properly configured, generate an outgoing physical
+ address that is secure.
+
+ 1 = The device's stream ID is marked nonsecure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use nonsecure world
+ lookup. The SMMU outgoing physical address will be nonsecure.
+
+ [STRM_NSEC] is ignored if the device is making a physical request (as these
+ transactions bypass the SMMU translation process).
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_domx_busx_streams_s cn; */
+};
+typedef union bdk_iobnx_domx_busx_streams bdk_iobnx_domx_busx_streams_t;
+
+static inline uint64_t BDK_IOBNX_DOMX_BUSX_STREAMS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_DOMX_BUSX_STREAMS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=6) && (c<=255)))
+ return 0x87e0f0040000ll + 0x1000000ll * ((a) & 0x1) + 0x800ll * ((b) & 0x7) + 8ll * ((c) & 0xff);
+ __bdk_csr_fatal("IOBNX_DOMX_BUSX_STREAMS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_IOBNX_DOMX_BUSX_STREAMS(a,b,c) bdk_iobnx_domx_busx_streams_t
+#define bustype_BDK_IOBNX_DOMX_BUSX_STREAMS(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_DOMX_BUSX_STREAMS(a,b,c) "IOBNX_DOMX_BUSX_STREAMS"
+#define device_bar_BDK_IOBNX_DOMX_BUSX_STREAMS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_DOMX_BUSX_STREAMS(a,b,c) (a)
+#define arguments_BDK_IOBNX_DOMX_BUSX_STREAMS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) iobn#_dom#_dev#_streams
+ *
+ * IOBN Device Bus Permit Registers
+ * This register sets the permissions for a NCBI transaction (which are DMA
+ * transactions or MSI-X writes), for requests for NCB device physicical-functions,
+ * i.e. those where:
+ *
+ * _ stream_id\<15:8\> = 0x0.
+ *
+ * Index {a} corresponds to the stream's domain number (stream_id\<21:16\>).
+ *
+ * Index {b} corresponds to the non-ARI ECAM device number (stream_id\<7:3\>).
+ *
+ * For each combination of index {b} and {c}, each index {a} (the IOB number) must be
+ * programmed to the same value.
+ */
+union bdk_iobnx_domx_devx_streams
+{
+ uint64_t u;
+ struct bdk_iobnx_domx_devx_streams_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t strm_nsec : 1; /**< [ 1: 1](SR/W) Stream nonsecure.
+
+ 0 = The device's stream ID is marked secure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use secure world
+ lookup. The SMMU may, if properly configured, generate an outgoing physical
+ address that is secure.
+
+ 1 = The device's stream ID is marked nonsecure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use nonsecure world
+ lookup. The SMMU outgoing physical address will be nonsecure.
+
+ [STRM_NSEC] is ignored if the device is making a physical request (as these
+ transactions bypass the SMMU translation process).
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+ uint64_t phys_nsec : 1; /**< [ 0: 0](SR/W) Physical nonsecure.
+ 0 = When the device makes a physical request, IOB will use the device's
+ requested secure bit to determine if the request to DRAM/LLC is secure or not.
+ 1 = When the device makes a physical request, IOB will squash the
+ device's secure request and issue the request to DRAM/LLC as nonsecure.
+
+ Ignored if a device makes a non-physical request. (As non-physical requests
+ cause the SMMU to generate the SMMU-outgoing secure bit based on the SMMU
+ translation process, including [STRM_NSEC].)
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+#else /* Word 0 - Little Endian */
+ uint64_t phys_nsec : 1; /**< [ 0: 0](SR/W) Physical nonsecure.
+ 0 = When the device makes a physical request, IOB will use the device's
+ requested secure bit to determine if the request to DRAM/LLC is secure or not.
+ 1 = When the device makes a physical request, IOB will squash the
+ device's secure request and issue the request to DRAM/LLC as nonsecure.
+
+ Ignored if a device makes a non-physical request. (As non-physical requests
+ cause the SMMU to generate the SMMU-outgoing secure bit based on the SMMU
+ translation process, including [STRM_NSEC].)
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+ uint64_t strm_nsec : 1; /**< [ 1: 1](SR/W) Stream nonsecure.
+
+ 0 = The device's stream ID is marked secure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use secure world
+ lookup. The SMMU may, if properly configured, generate an outgoing physical
+ address that is secure.
+
+ 1 = The device's stream ID is marked nonsecure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use nonsecure world
+ lookup. The SMMU outgoing physical address will be nonsecure.
+
+ [STRM_NSEC] is ignored if the device is making a physical request (as these
+ transactions bypass the SMMU translation process).
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_domx_devx_streams_s cn; */
+};
+typedef union bdk_iobnx_domx_devx_streams bdk_iobnx_domx_devx_streams_t;
+
+static inline uint64_t BDK_IOBNX_DOMX_DEVX_STREAMS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_DOMX_DEVX_STREAMS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=6) && (c<=31)))
+ return 0x87e0f0010000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x7) + 8ll * ((c) & 0x1f);
+ __bdk_csr_fatal("IOBNX_DOMX_DEVX_STREAMS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_IOBNX_DOMX_DEVX_STREAMS(a,b,c) bdk_iobnx_domx_devx_streams_t
+#define bustype_BDK_IOBNX_DOMX_DEVX_STREAMS(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_DOMX_DEVX_STREAMS(a,b,c) "IOBNX_DOMX_DEVX_STREAMS"
+#define device_bar_BDK_IOBNX_DOMX_DEVX_STREAMS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_DOMX_DEVX_STREAMS(a,b,c) (a)
+#define arguments_BDK_IOBNX_DOMX_DEVX_STREAMS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) iobn#_gbl_dll
+ *
+ * INTERNAL: IOBN Global Core-Clock DLL Status Register
+ *
+ * Status of the global core-clock DLL.
+ */
+union bdk_iobnx_gbl_dll
+{
+ uint64_t u;
+ struct bdk_iobnx_gbl_dll_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t pdr_rclk_refclk : 1; /**< [ 19: 19](RO/H) Synchronized pdr_rclk_refclk from global core-clock DLL cmb0 phase detectors. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 18: 18](RO/H) Synchronized pdl_rclk_refclk from global core-clock DLL cmb0 phase detectors. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 17: 17](RO/H) Synchronized pd_pos_rclk_refclk from global core-clock DLL cmb0 phase detectors. */
+ uint64_t dll_fsm_state_a : 3; /**< [ 16: 14](RO/H) State for the global core-clock DLL, from the positive edge of refclk.
+ 0x0 = TMD_IDLE.
+ 0x1 = TMD_STATE1.
+ 0x2 = TMD_STATE2.
+ 0x3 = TMD_STATE3.
+ 0x4 = TMD_STATE4.
+ 0x5 = TMD_LOCKED. */
+ uint64_t dll_lock : 1; /**< [ 13: 13](RO/H) The dll_lock signal from global core-clock DLL, from the positive edge of refclk. */
+ uint64_t dll_clk_invert_out : 1; /**< [ 12: 12](RO/H) The clk_invert setting from the global core-clock DLL, from the negative edge of refclk. */
+ uint64_t dll_setting : 12; /**< [ 11: 0](RO/H) The global core-clock DLL setting, from the negative edge of refclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t dll_setting : 12; /**< [ 11: 0](RO/H) The global core-clock DLL setting, from the negative edge of refclk. */
+ uint64_t dll_clk_invert_out : 1; /**< [ 12: 12](RO/H) The clk_invert setting from the global core-clock DLL, from the negative edge of refclk. */
+ uint64_t dll_lock : 1; /**< [ 13: 13](RO/H) The dll_lock signal from global core-clock DLL, from the positive edge of refclk. */
+ uint64_t dll_fsm_state_a : 3; /**< [ 16: 14](RO/H) State for the global core-clock DLL, from the positive edge of refclk.
+ 0x0 = TMD_IDLE.
+ 0x1 = TMD_STATE1.
+ 0x2 = TMD_STATE2.
+ 0x3 = TMD_STATE3.
+ 0x4 = TMD_STATE4.
+ 0x5 = TMD_LOCKED. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 17: 17](RO/H) Synchronized pd_pos_rclk_refclk from global core-clock DLL cmb0 phase detectors. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 18: 18](RO/H) Synchronized pdl_rclk_refclk from global core-clock DLL cmb0 phase detectors. */
+ uint64_t pdr_rclk_refclk : 1; /**< [ 19: 19](RO/H) Synchronized pdr_rclk_refclk from global core-clock DLL cmb0 phase detectors. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_gbl_dll_s cn; */
+};
+typedef union bdk_iobnx_gbl_dll bdk_iobnx_gbl_dll_t;
+
+static inline uint64_t BDK_IOBNX_GBL_DLL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_GBL_DLL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f000a000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f000a000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f000a000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_GBL_DLL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_GBL_DLL(a) bdk_iobnx_gbl_dll_t
+#define bustype_BDK_IOBNX_GBL_DLL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_GBL_DLL(a) "IOBNX_GBL_DLL"
+#define device_bar_BDK_IOBNX_GBL_DLL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_GBL_DLL(a) (a)
+#define arguments_BDK_IOBNX_GBL_DLL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_int1
+ *
+ * IOBN Interrupt Summary Register
+ * This register contains the different interrupt-summary bits of the IOBN.
+ */
+union bdk_iobnx_int1
+{
+ uint64_t u;
+ struct bdk_iobnx_int1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t gibm_f : 1; /**< [ 0: 0](R/W1C/H) GIBM on NCB0 received a STDN with fault. */
+#else /* Word 0 - Little Endian */
+ uint64_t gibm_f : 1; /**< [ 0: 0](R/W1C/H) GIBM on NCB0 received a STDN with fault. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_int1_s cn; */
+};
+typedef union bdk_iobnx_int1 bdk_iobnx_int1_t;
+
+static inline uint64_t BDK_IOBNX_INT1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_INT1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f000a000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_INT1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_INT1(a) bdk_iobnx_int1_t
+#define bustype_BDK_IOBNX_INT1(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_INT1(a) "IOBNX_INT1"
+#define device_bar_BDK_IOBNX_INT1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_INT1(a) (a)
+#define arguments_BDK_IOBNX_INT1(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_int1_ena_w1c
+ *
+ * IOBN Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_iobnx_int1_ena_w1c
+{
+ uint64_t u;
+ struct bdk_iobnx_int1_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t gibm_f : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT1[GIBM_F]. */
+#else /* Word 0 - Little Endian */
+ uint64_t gibm_f : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT1[GIBM_F]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_int1_ena_w1c_s cn; */
+};
+typedef union bdk_iobnx_int1_ena_w1c bdk_iobnx_int1_ena_w1c_t;
+
+static inline uint64_t BDK_IOBNX_INT1_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_INT1_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f000a010ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_INT1_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_INT1_ENA_W1C(a) bdk_iobnx_int1_ena_w1c_t
+#define bustype_BDK_IOBNX_INT1_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_INT1_ENA_W1C(a) "IOBNX_INT1_ENA_W1C"
+#define device_bar_BDK_IOBNX_INT1_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_INT1_ENA_W1C(a) (a)
+#define arguments_BDK_IOBNX_INT1_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_int1_ena_w1s
+ *
+ * IOBN Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_iobnx_int1_ena_w1s
+{
+ uint64_t u;
+ struct bdk_iobnx_int1_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t gibm_f : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT1[GIBM_F]. */
+#else /* Word 0 - Little Endian */
+ uint64_t gibm_f : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT1[GIBM_F]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_int1_ena_w1s_s cn; */
+};
+typedef union bdk_iobnx_int1_ena_w1s bdk_iobnx_int1_ena_w1s_t;
+
+static inline uint64_t BDK_IOBNX_INT1_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_INT1_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f000a018ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_INT1_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_INT1_ENA_W1S(a) bdk_iobnx_int1_ena_w1s_t
+#define bustype_BDK_IOBNX_INT1_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_INT1_ENA_W1S(a) "IOBNX_INT1_ENA_W1S"
+#define device_bar_BDK_IOBNX_INT1_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_INT1_ENA_W1S(a) (a)
+#define arguments_BDK_IOBNX_INT1_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_int1_w1s
+ *
+ * IOBN Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_iobnx_int1_w1s
+{
+ uint64_t u;
+ struct bdk_iobnx_int1_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t gibm_f : 1; /**< [ 0: 0](R/W1S/H) Reads or sets IOBN(0..1)_INT1[GIBM_F]. */
+#else /* Word 0 - Little Endian */
+ uint64_t gibm_f : 1; /**< [ 0: 0](R/W1S/H) Reads or sets IOBN(0..1)_INT1[GIBM_F]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_int1_w1s_s cn; */
+};
+typedef union bdk_iobnx_int1_w1s bdk_iobnx_int1_w1s_t;
+
+static inline uint64_t BDK_IOBNX_INT1_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_INT1_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f000a008ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_INT1_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_INT1_W1S(a) bdk_iobnx_int1_w1s_t
+#define bustype_BDK_IOBNX_INT1_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_INT1_W1S(a) "IOBNX_INT1_W1S"
+#define device_bar_BDK_IOBNX_INT1_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_INT1_W1S(a) (a)
+#define arguments_BDK_IOBNX_INT1_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_int_ena_w1c
+ *
+ * IOBN Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_iobnx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_iobnx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_31 : 32;
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_int_ena_w1c_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ied0_dbe : 28; /**< [ 59: 32](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ied0_sbe : 28; /**< [ 27: 0](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 28; /**< [ 27: 0](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ied0_dbe : 28; /**< [ 59: 32](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_iobnx_int_ena_w1c_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ncbo_ncb2_psn : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[NCBO_NCB2_PSN]. */
+ uint64_t ncbo_ncb1_psn : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[NCBO_NCB1_PSN]. */
+ uint64_t ncbo_ncb0_psn : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[NCBO_NCB0_PSN]. */
+ uint64_t ncbo_to : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[NCBO_TO]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ncbo_to : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[NCBO_TO]. */
+ uint64_t ncbo_ncb0_psn : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[NCBO_NCB0_PSN]. */
+ uint64_t ncbo_ncb1_psn : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[NCBO_NCB1_PSN]. */
+ uint64_t ncbo_ncb2_psn : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[NCBO_NCB2_PSN]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_iobnx_int_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) Reads or clears enable for IOBN(0)_INT_SUM[PEM_SIE]. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1C/H) Reads or clears enable for IOBN(0)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1C/H) Reads or clears enable for IOBN(0)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1C/H) Reads or clears enable for IOBN(0)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1C/H) Reads or clears enable for IOBN(0)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) Reads or clears enable for IOBN(0)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_iobnx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ sli_preq_3_dbe_sclk,
+ sli_req_3_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_31 : 1;
+ uint64_t ied0_sbe : 31; /**< [ 30: 0](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ sli_preq_3_sbe_sclk,
+ sli_req_3_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 31; /**< [ 30: 0](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ sli_preq_3_sbe_sclk,
+ sli_req_3_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_31 : 1;
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ sli_preq_3_dbe_sclk,
+ sli_req_3_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_iobnx_int_ena_w1c_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) Reads or clears enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_iobnx_int_ena_w1c bdk_iobnx_int_ena_w1c_t;
+
+static inline uint64_t BDK_IOBNX_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_INT_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0008000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0008000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0008000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0008000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_INT_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_INT_ENA_W1C(a) bdk_iobnx_int_ena_w1c_t
+#define bustype_BDK_IOBNX_INT_ENA_W1C(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_INT_ENA_W1C(a) "IOBNX_INT_ENA_W1C"
+#define device_bar_BDK_IOBNX_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_INT_ENA_W1C(a) (a)
+#define arguments_BDK_IOBNX_INT_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_int_ena_w1s
+ *
+ * IOBN Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_iobnx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_iobnx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_31 : 32;
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_int_ena_w1s_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ied0_dbe : 28; /**< [ 59: 32](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ied0_sbe : 28; /**< [ 27: 0](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 28; /**< [ 27: 0](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ied0_dbe : 28; /**< [ 59: 32](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_iobnx_int_ena_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ncbo_ncb2_psn : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[NCBO_NCB2_PSN]. */
+ uint64_t ncbo_ncb1_psn : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[NCBO_NCB1_PSN]. */
+ uint64_t ncbo_ncb0_psn : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[NCBO_NCB0_PSN]. */
+ uint64_t ncbo_to : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[NCBO_TO]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ncbo_to : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[NCBO_TO]. */
+ uint64_t ncbo_ncb0_psn : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[NCBO_NCB0_PSN]. */
+ uint64_t ncbo_ncb1_psn : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[NCBO_NCB1_PSN]. */
+ uint64_t ncbo_ncb2_psn : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[NCBO_NCB2_PSN]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_iobnx_int_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets enable for IOBN(0)_INT_SUM[PEM_SIE]. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1S/H) Reads or sets enable for IOBN(0)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1S/H) Reads or sets enable for IOBN(0)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1S/H) Reads or sets enable for IOBN(0)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1S/H) Reads or sets enable for IOBN(0)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets enable for IOBN(0)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_iobnx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ sli_preq_3_dbe_sclk,
+ sli_req_3_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_31 : 1;
+ uint64_t ied0_sbe : 31; /**< [ 30: 0](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ sli_preq_3_sbe_sclk,
+ sli_req_3_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 31; /**< [ 30: 0](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ sli_preq_3_sbe_sclk,
+ sli_req_3_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_31 : 1;
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ sli_preq_3_dbe_sclk,
+ sli_req_3_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_iobnx_int_ena_w1s_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets enable for IOBN(0..1)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_iobnx_int_ena_w1s bdk_iobnx_int_ena_w1s_t;
+
+static inline uint64_t BDK_IOBNX_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_INT_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0009000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0009000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0009000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0009000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_INT_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_INT_ENA_W1S(a) bdk_iobnx_int_ena_w1s_t
+#define bustype_BDK_IOBNX_INT_ENA_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_INT_ENA_W1S(a) "IOBNX_INT_ENA_W1S"
+#define device_bar_BDK_IOBNX_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_INT_ENA_W1S(a) (a)
+#define arguments_BDK_IOBNX_INT_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_int_sum
+ *
+ * IOBN Interrupt Summary Register
+ * This register contains the different interrupt-summary bits of the IOBN.
+ */
+union bdk_iobnx_int_sum
+{
+ uint64_t u;
+ struct bdk_iobnx_int_sum_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) PEM sent in an invalid stream ID, the transaction was returned with fault. Advisory
+ notification only. */
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1C/H) IED0 double-bit error. When set, an IED0 double-bit error has occurred.
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_31 : 32;
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1C/H) IED0 double-bit error. When set, an IED0 double-bit error has occurred.
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) PEM sent in an invalid stream ID, the transaction was returned with fault. Advisory
+ notification only. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_int_sum_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ied0_dbe : 28; /**< [ 59: 32](R/W1C/H) IED0 double-bit error. When set, an IED0 double-bit error has occurred.
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ied0_sbe : 28; /**< [ 27: 0](R/W1C/H) IED0 single-bit error. When set, an IED0 single-bit error has occurred.
+ Internal:
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 28; /**< [ 27: 0](R/W1C/H) IED0 single-bit error. When set, an IED0 single-bit error has occurred.
+ Internal:
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ied0_dbe : 28; /**< [ 59: 32](R/W1C/H) IED0 double-bit error. When set, an IED0 double-bit error has occurred.
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_iobnx_int_sum_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ncbo_ncb2_psn : 1; /**< [ 3: 3](R/W1C/H) NCBO bus2 store data with poison. IOBN()_NCBO()_PSN_STATUS saves the first error information. */
+ uint64_t ncbo_ncb1_psn : 1; /**< [ 2: 2](R/W1C/H) NCB1 bus0 store data with poison. IOBN()_NCBO()_PSN_STATUS saves the first error information. */
+ uint64_t ncbo_ncb0_psn : 1; /**< [ 1: 1](R/W1C/H) NCBO bus0 store data with poison. IOBN()_NCBO()_PSN_STATUS saves the first error information. */
+ uint64_t ncbo_to : 1; /**< [ 0: 0](R/W1C/H) NPR to a NCB-DEVICE has timed out. See IOBN()_NCBO_TO[SUB_TIME]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ncbo_to : 1; /**< [ 0: 0](R/W1C/H) NPR to a NCB-DEVICE has timed out. See IOBN()_NCBO_TO[SUB_TIME]. */
+ uint64_t ncbo_ncb0_psn : 1; /**< [ 1: 1](R/W1C/H) NCBO bus0 store data with poison. IOBN()_NCBO()_PSN_STATUS saves the first error information. */
+ uint64_t ncbo_ncb1_psn : 1; /**< [ 2: 2](R/W1C/H) NCB1 bus0 store data with poison. IOBN()_NCBO()_PSN_STATUS saves the first error information. */
+ uint64_t ncbo_ncb2_psn : 1; /**< [ 3: 3](R/W1C/H) NCBO bus2 store data with poison. IOBN()_NCBO()_PSN_STATUS saves the first error information. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_iobnx_int_sum_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) PEM sent in an invalid stream ID, the transaction was returned with fault. Advisory
+ notification only. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1C/H) IED0 double-bit error. When set, an IED0 double-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1C/H) IED0 single-bit error. When set, an IED0 single-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1C/H) IED0 single-bit error. When set, an IED0 single-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1C/H) IED0 double-bit error. When set, an IED0 double-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) PEM sent in an invalid stream ID, the transaction was returned with fault. Advisory
+ notification only. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_iobnx_int_sum_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) PEM sent in an invalid stream ID, the transaction was returned with fault. Advisory
+ notification only. */
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1C/H) IED0 double-bit error. When set, an IED0 double-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ sli_preq_3_dbe_sclk,
+ sli_req_3_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_31 : 1;
+ uint64_t ied0_sbe : 31; /**< [ 30: 0](R/W1C/H) IED0 single-bit error. When set, an IED0 single-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ sli_preq_3_sbe_sclk,
+ sli_req_3_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 31; /**< [ 30: 0](R/W1C/H) IED0 single-bit error. When set, an IED0 single-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ sli_preq_3_sbe_sclk,
+ sli_req_3_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_31 : 1;
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1C/H) IED0 double-bit error. When set, an IED0 double-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ sli_preq_3_dbe_sclk,
+ sli_req_3_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) PEM sent in an invalid stream ID, the transaction was returned with fault. Advisory
+ notification only. */
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_iobnx_int_sum_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) PEM sent in an invalid stream ID. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1C/H) IED0 double-bit error. When set, an IED0 double-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1C/H) IED0 single-bit error. When set, an IED0 single-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1C/H) IED0 single-bit error. When set, an IED0 single-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1C/H) IED0 double-bit error. When set, an IED0 double-bit error has occurred.
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1C/H) PEM sent in an invalid stream ID. */
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_iobnx_int_sum bdk_iobnx_int_sum_t;
+
+static inline uint64_t BDK_IOBNX_INT_SUM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_INT_SUM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0006000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0006000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0006000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0006000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_INT_SUM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_INT_SUM(a) bdk_iobnx_int_sum_t
+#define bustype_BDK_IOBNX_INT_SUM(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_INT_SUM(a) "IOBNX_INT_SUM"
+#define device_bar_BDK_IOBNX_INT_SUM(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_INT_SUM(a) (a)
+#define arguments_BDK_IOBNX_INT_SUM(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_int_sum_w1s
+ *
+ * IOBN Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_iobnx_int_sum_w1s
+{
+ uint64_t u;
+ struct bdk_iobnx_int_sum_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[PEM_SIE]. */
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_31 : 32;
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_int_sum_w1s_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ied0_dbe : 28; /**< [ 59: 32](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ied0_sbe : 28; /**< [ 27: 0](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 28; /**< [ 27: 0](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_28_31 : 4;
+ uint64_t ied0_dbe : 28; /**< [ 59: 32](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_iobnx_int_sum_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t ncbo_ncb2_psn : 1; /**< [ 3: 3](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[NCBO_NCB2_PSN]. */
+ uint64_t ncbo_ncb1_psn : 1; /**< [ 2: 2](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[NCBO_NCB1_PSN]. */
+ uint64_t ncbo_ncb0_psn : 1; /**< [ 1: 1](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[NCBO_NCB0_PSN]. */
+ uint64_t ncbo_to : 1; /**< [ 0: 0](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[NCBO_TO]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ncbo_to : 1; /**< [ 0: 0](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[NCBO_TO]. */
+ uint64_t ncbo_ncb0_psn : 1; /**< [ 1: 1](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[NCBO_NCB0_PSN]. */
+ uint64_t ncbo_ncb1_psn : 1; /**< [ 2: 2](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[NCBO_NCB1_PSN]. */
+ uint64_t ncbo_ncb2_psn : 1; /**< [ 3: 3](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[NCBO_NCB2_PSN]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_iobnx_int_sum_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets IOBN(0)_INT_SUM[PEM_SIE]. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1S/H) Reads or sets IOBN(0)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1S/H) Reads or sets IOBN(0)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1S/H) Reads or sets IOBN(0)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1S/H) Reads or sets IOBN(0)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets IOBN(0)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_iobnx_int_sum_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[PEM_SIE]. */
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ sli_preq_3_dbe_sclk,
+ sli_req_3_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_31 : 1;
+ uint64_t ied0_sbe : 31; /**< [ 30: 0](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ sli_preq_3_sbe_sclk,
+ sli_req_3_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 31; /**< [ 30: 0](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ sli_preq_3_sbe_sclk,
+ sli_req_3_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_31 : 1;
+ uint64_t ied0_dbe : 31; /**< [ 62: 32](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ sli_preq_3_dbe_sclk,
+ sli_req_3_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_iobnx_int_sum_w1s_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[PEM_SIE]. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t ied0_sbe : 29; /**< [ 28: 0](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_SBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_sbe_sclk,
+ icc0_xmc_fif_sbe,
+ icc1_xmc_fif_sbe,
+ icc_xmc_fifo_ecc_sbe,
+ sli_preq_0_sbe_sclk,
+ sli_req_0_sbe_sclk,
+ sli_preq_1_sbe_sclk,
+ sli_req_1_sbe_sclk,
+ sli_preq_2_sbe_sclk,
+ sli_req_2_sbe_sclk,
+ ixo_smmu_mem0_sbe_sclk,
+ iop_breq_fifo0_sbe,
+ iop_breq_fifo1_sbe ,
+ iop_breq_fifo2_sbe,
+ iop_breq_fifo3_sbe ,
+ iop_ffifo_sbe_sclk,
+ rsd_mem0_sbe,
+ rsd_mem1_sbe,
+ ics_cmd_fifo_sbe_sclk,
+ ixo_xmd_mem1_sbe_sclk,
+ ixo_xmd_mem0_sbe_sclk,
+ iobn_iorn_ffifo0__sbe_sclk,
+ iobn_iorn_ffifo1__sbe_sclk,
+ irp1_flid_mem_sbe,
+ irp0_flid_mem_sbe,
+ ixo_icc_fifo0_sbe_in_sclk,
+ ixo_icc_fifo1_sbe_in_sclk,
+ ixo_ics_mem_sbe_in_sclk. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t ied0_dbe : 29; /**< [ 60: 32](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[IED0_DBE].
+ Internal:
+ iob_mem_data_xmd_sbe_sclk,
+ gmr_ixofifo_dbe_sclk,
+ icc0_xmc_fif_dbe,
+ icc1_xmc_fif_dbe,
+ icc_xmc_fifo_ecc_dbe,
+ sli_preq_0_dbe_sclk,
+ sli_req_0_dbe_sclk,
+ sli_preq_1_dbe_sclk,
+ sli_req_1_dbe_sclk,
+ sli_preq_2_dbe_sclk,
+ sli_req_2_dbe_sclk,
+ ixo_smmu_mem0_dbe_sclk,
+ iop_breq_fifo0_dbe,
+ iop_breq_fifo1_dbe ,
+ iop_breq_fifo2_dbe,
+ iop_breq_fifo3_dbe ,
+ iop_ffifo_dbe_sclk,
+ rsd_mem0_dbe,
+ rsd_mem1_dbe,
+ ics_cmd_fifo_dbe_sclk,
+ ixo_xmd_mem1_dbe_sclk,
+ ixo_xmd_mem0_dbe_sclk,
+ iobn_iorn_ffifo0__dbe_sclk,
+ iobn_iorn_ffifo1__dbe_sclk,
+ irp1_flid_mem_dbe,
+ irp0_flid_mem_dbe,
+ ixo_icc_fifo0_dbe_in_sclk,
+ ixo_icc_fifo1_dbe_in_sclk,
+ ixo_ics_mem_dbe_in_sclk. */
+ uint64_t reserved_61_62 : 2;
+ uint64_t pem_sie : 1; /**< [ 63: 63](R/W1S/H) Reads or sets IOBN(0..1)_INT_SUM[PEM_SIE]. */
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_iobnx_int_sum_w1s bdk_iobnx_int_sum_w1s_t;
+
+static inline uint64_t BDK_IOBNX_INT_SUM_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_INT_SUM_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0007000ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0007000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0007000ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0007000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_INT_SUM_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_INT_SUM_W1S(a) bdk_iobnx_int_sum_w1s_t
+#define bustype_BDK_IOBNX_INT_SUM_W1S(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_INT_SUM_W1S(a) "IOBNX_INT_SUM_W1S"
+#define device_bar_BDK_IOBNX_INT_SUM_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_INT_SUM_W1S(a) (a)
+#define arguments_BDK_IOBNX_INT_SUM_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_mctlr_reg
+ *
+ * IOBN Memory SControl Register
+ * Contains the sclk memory control for memories.
+ */
+union bdk_iobnx_mctlr_reg
+{
+ uint64_t u;
+ struct bdk_iobnx_mctlr_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t dis : 17; /**< [ 48: 32](R/W) Memory ECC disable.
+ Internal:
+ \<47:32\> = iobn_gmr_ixofifo_csr_cor_dis,
+ sli_req_2_cor_dis and sli_preq_2_cor_dis,
+ sli_req_1_cor_dis and sli_preq_1_cor_dis,
+ sli_req_0_cor_dis and sli_preq_0_cor_dis,
+ iob__iob_xmd_csr_cor_dis_rclk,
+ ixo_smmu_mem0_csr_cor_dis,
+ ixo_smmu_mem1_csr_cor_dis,
+ ixo_ics_mem_csr_cor_dis,
+ ixo_icc_fifo0_csr_cor_dis,
+ ixo_icc_fifo1_csr_cor_dis,
+ ixo_xmd_mem0_csr_cor_dis,
+ ixo_xmd_mem1_csr_cor_dis,
+ iobn_iorn_ffifo0__csr_cor_dis,
+ iobn_iorn_ffifo1__csr_cor_dis,
+ iop_ffifo_csr_cor_dis,
+ ics_cmd_fifo_csr_cor_dis */
+ uint64_t reserved_30_31 : 2;
+ uint64_t flip : 30; /**< [ 29: 0](R/W) Memory ECC flip.
+ Internal:
+ \<27:0\> = iobn_gmr_ixofifo_csr_flip_synd,
+ sli_req_2_flip_synd and sli_preq_2_flip_synd,
+ sli_req_1_flip_synd and sli_preq_1_flip_synd,
+ sli_req_0_flip_synd and sli_preq_0_flip_synd,
+ iobn_rsd_mem0_csr_flip_synd_rclk,
+ iobn_rsd_mem1_csr_flip_synd_rclk,
+ ixo_smmu_mem0_csr_flip_synd,
+ ixo_smmu_mem1_csr_flip_synd,
+ ixo_ics_mem_csr_flip_synd,
+ iop_ffifo_csr_flip_synd,
+ iop_breq_fifo0_csr_flip_synd,
+ iop_breq_fifo1_csr_flip_synd,
+ iop_breq_fifo2_csr_flip_synd,
+ iop_breq_fifo3_csr_flip_synd */
+#else /* Word 0 - Little Endian */
+ uint64_t flip : 30; /**< [ 29: 0](R/W) Memory ECC flip.
+ Internal:
+ \<27:0\> = iobn_gmr_ixofifo_csr_flip_synd,
+ sli_req_2_flip_synd and sli_preq_2_flip_synd,
+ sli_req_1_flip_synd and sli_preq_1_flip_synd,
+ sli_req_0_flip_synd and sli_preq_0_flip_synd,
+ iobn_rsd_mem0_csr_flip_synd_rclk,
+ iobn_rsd_mem1_csr_flip_synd_rclk,
+ ixo_smmu_mem0_csr_flip_synd,
+ ixo_smmu_mem1_csr_flip_synd,
+ ixo_ics_mem_csr_flip_synd,
+ iop_ffifo_csr_flip_synd,
+ iop_breq_fifo0_csr_flip_synd,
+ iop_breq_fifo1_csr_flip_synd,
+ iop_breq_fifo2_csr_flip_synd,
+ iop_breq_fifo3_csr_flip_synd */
+ uint64_t reserved_30_31 : 2;
+ uint64_t dis : 17; /**< [ 48: 32](R/W) Memory ECC disable.
+ Internal:
+ \<47:32\> = iobn_gmr_ixofifo_csr_cor_dis,
+ sli_req_2_cor_dis and sli_preq_2_cor_dis,
+ sli_req_1_cor_dis and sli_preq_1_cor_dis,
+ sli_req_0_cor_dis and sli_preq_0_cor_dis,
+ iob__iob_xmd_csr_cor_dis_rclk,
+ ixo_smmu_mem0_csr_cor_dis,
+ ixo_smmu_mem1_csr_cor_dis,
+ ixo_ics_mem_csr_cor_dis,
+ ixo_icc_fifo0_csr_cor_dis,
+ ixo_icc_fifo1_csr_cor_dis,
+ ixo_xmd_mem0_csr_cor_dis,
+ ixo_xmd_mem1_csr_cor_dis,
+ iobn_iorn_ffifo0__csr_cor_dis,
+ iobn_iorn_ffifo1__csr_cor_dis,
+ iop_ffifo_csr_cor_dis,
+ ics_cmd_fifo_csr_cor_dis */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_mctlr_reg_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t dis : 16; /**< [ 47: 32](R/W) Memory ECC disable.
+ Internal:
+ \<47:32\> = iobn_gmr_ixofifo_csr_cor_dis,
+ sli_req_2_cor_dis and sli_preq_2_cor_dis,
+ sli_req_1_cor_dis and sli_preq_1_cor_dis,
+ sli_req_0_cor_dis and sli_preq_0_cor_dis,
+ iob__iob_xmd_csr_cor_dis_rclk,
+ ixo_smmu_mem0_csr_cor_dis,
+ ixo_smmu_mem1_csr_cor_dis,
+ ixo_ics_mem_csr_cor_dis,
+ ixo_icc_fifo0_csr_cor_dis,
+ ixo_icc_fifo1_csr_cor_dis,
+ ixo_xmd_mem0_csr_cor_dis,
+ ixo_xmd_mem1_csr_cor_dis,
+ iobn_iorn_ffifo0__csr_cor_dis,
+ iobn_iorn_ffifo1__csr_cor_dis,
+ iop_ffifo_csr_cor_dis,
+ ics_cmd_fifo_csr_cor_dis */
+ uint64_t reserved_28_31 : 4;
+ uint64_t flip : 28; /**< [ 27: 0](R/W) Memory ECC flip.
+ Internal:
+ \<27:0\> = iobn_gmr_ixofifo_csr_flip_synd,
+ sli_req_2_flip_synd and sli_preq_2_flip_synd,
+ sli_req_1_flip_synd and sli_preq_1_flip_synd,
+ sli_req_0_flip_synd and sli_preq_0_flip_synd,
+ iobn_rsd_mem0_csr_flip_synd_rclk,
+ iobn_rsd_mem1_csr_flip_synd_rclk,
+ ixo_smmu_mem0_csr_flip_synd,
+ ixo_smmu_mem1_csr_flip_synd,
+ ixo_ics_mem_csr_flip_synd,
+ iop_ffifo_csr_flip_synd,
+ iop_breq_fifo0_csr_flip_synd,
+ iop_breq_fifo1_csr_flip_synd,
+ iop_breq_fifo2_csr_flip_synd,
+ iop_breq_fifo3_csr_flip_synd */
+#else /* Word 0 - Little Endian */
+ uint64_t flip : 28; /**< [ 27: 0](R/W) Memory ECC flip.
+ Internal:
+ \<27:0\> = iobn_gmr_ixofifo_csr_flip_synd,
+ sli_req_2_flip_synd and sli_preq_2_flip_synd,
+ sli_req_1_flip_synd and sli_preq_1_flip_synd,
+ sli_req_0_flip_synd and sli_preq_0_flip_synd,
+ iobn_rsd_mem0_csr_flip_synd_rclk,
+ iobn_rsd_mem1_csr_flip_synd_rclk,
+ ixo_smmu_mem0_csr_flip_synd,
+ ixo_smmu_mem1_csr_flip_synd,
+ ixo_ics_mem_csr_flip_synd,
+ iop_ffifo_csr_flip_synd,
+ iop_breq_fifo0_csr_flip_synd,
+ iop_breq_fifo1_csr_flip_synd,
+ iop_breq_fifo2_csr_flip_synd,
+ iop_breq_fifo3_csr_flip_synd */
+ uint64_t reserved_28_31 : 4;
+ uint64_t dis : 16; /**< [ 47: 32](R/W) Memory ECC disable.
+ Internal:
+ \<47:32\> = iobn_gmr_ixofifo_csr_cor_dis,
+ sli_req_2_cor_dis and sli_preq_2_cor_dis,
+ sli_req_1_cor_dis and sli_preq_1_cor_dis,
+ sli_req_0_cor_dis and sli_preq_0_cor_dis,
+ iob__iob_xmd_csr_cor_dis_rclk,
+ ixo_smmu_mem0_csr_cor_dis,
+ ixo_smmu_mem1_csr_cor_dis,
+ ixo_ics_mem_csr_cor_dis,
+ ixo_icc_fifo0_csr_cor_dis,
+ ixo_icc_fifo1_csr_cor_dis,
+ ixo_xmd_mem0_csr_cor_dis,
+ ixo_xmd_mem1_csr_cor_dis,
+ iobn_iorn_ffifo0__csr_cor_dis,
+ iobn_iorn_ffifo1__csr_cor_dis,
+ iop_ffifo_csr_cor_dis,
+ ics_cmd_fifo_csr_cor_dis */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_iobnx_mctlr_reg_cn81xx cn88xx; */
+ struct bdk_iobnx_mctlr_reg_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t dis : 17; /**< [ 48: 32](R/W) Memory ECC disable.
+ Internal:
+ \<48:32\> = iobn_gmr_ixofifo_csr_cor_dis,
+ sli_req_3_cor_dis and sli_preq_3_cor_dis,
+ sli_req_2_cor_dis and sli_preq_2_cor_dis,
+ sli_req_1_cor_dis and sli_preq_1_cor_dis,
+ sli_req_0_cor_dis and sli_preq_0_cor_dis,
+ iob__iob_xmd_csr_cor_dis_rclk,
+ ixo_smmu_mem0_csr_cor_dis,
+ ixo_smmu_mem1_csr_cor_dis,
+ ixo_ics_mem_csr_cor_dis,
+ ixo_icc_fifo0_csr_cor_dis,
+ ixo_icc_fifo1_csr_cor_dis,
+ ixo_xmd_mem0_csr_cor_dis,
+ ixo_xmd_mem1_csr_cor_dis,
+ iobn_iorn_ffifo0__csr_cor_dis,
+ iobn_iorn_ffifo1__csr_cor_dis,
+ iop_ffifo_csr_cor_dis,
+ ics_cmd_fifo_csr_cor_dis */
+ uint64_t reserved_30_31 : 2;
+ uint64_t flip : 30; /**< [ 29: 0](R/W) Memory ECC flip.
+ Internal:
+ \<29:0\> = iobn_gmr_ixofifo_csr_flip_synd,
+ sli_req_3_flip_synd and sli_preq_3_flip_synd,
+ sli_req_2_flip_synd and sli_preq_2_flip_synd,
+ sli_req_1_flip_synd and sli_preq_1_flip_synd,
+ sli_req_0_flip_synd and sli_preq_0_flip_synd,
+ iobn_rsd_mem0_csr_flip_synd_rclk,
+ iobn_rsd_mem1_csr_flip_synd_rclk,
+ ixo_smmu_mem0_csr_flip_synd,
+ ixo_smmu_mem1_csr_flip_synd,
+ ixo_ics_mem_csr_flip_synd,
+ iop_ffifo_csr_flip_synd,
+ iop_breq_fifo0_csr_flip_synd,
+ iop_breq_fifo1_csr_flip_synd,
+ iop_breq_fifo2_csr_flip_synd,
+ iop_breq_fifo3_csr_flip_synd */
+#else /* Word 0 - Little Endian */
+ uint64_t flip : 30; /**< [ 29: 0](R/W) Memory ECC flip.
+ Internal:
+ \<29:0\> = iobn_gmr_ixofifo_csr_flip_synd,
+ sli_req_3_flip_synd and sli_preq_3_flip_synd,
+ sli_req_2_flip_synd and sli_preq_2_flip_synd,
+ sli_req_1_flip_synd and sli_preq_1_flip_synd,
+ sli_req_0_flip_synd and sli_preq_0_flip_synd,
+ iobn_rsd_mem0_csr_flip_synd_rclk,
+ iobn_rsd_mem1_csr_flip_synd_rclk,
+ ixo_smmu_mem0_csr_flip_synd,
+ ixo_smmu_mem1_csr_flip_synd,
+ ixo_ics_mem_csr_flip_synd,
+ iop_ffifo_csr_flip_synd,
+ iop_breq_fifo0_csr_flip_synd,
+ iop_breq_fifo1_csr_flip_synd,
+ iop_breq_fifo2_csr_flip_synd,
+ iop_breq_fifo3_csr_flip_synd */
+ uint64_t reserved_30_31 : 2;
+ uint64_t dis : 17; /**< [ 48: 32](R/W) Memory ECC disable.
+ Internal:
+ \<48:32\> = iobn_gmr_ixofifo_csr_cor_dis,
+ sli_req_3_cor_dis and sli_preq_3_cor_dis,
+ sli_req_2_cor_dis and sli_preq_2_cor_dis,
+ sli_req_1_cor_dis and sli_preq_1_cor_dis,
+ sli_req_0_cor_dis and sli_preq_0_cor_dis,
+ iob__iob_xmd_csr_cor_dis_rclk,
+ ixo_smmu_mem0_csr_cor_dis,
+ ixo_smmu_mem1_csr_cor_dis,
+ ixo_ics_mem_csr_cor_dis,
+ ixo_icc_fifo0_csr_cor_dis,
+ ixo_icc_fifo1_csr_cor_dis,
+ ixo_xmd_mem0_csr_cor_dis,
+ ixo_xmd_mem1_csr_cor_dis,
+ iobn_iorn_ffifo0__csr_cor_dis,
+ iobn_iorn_ffifo1__csr_cor_dis,
+ iop_ffifo_csr_cor_dis,
+ ics_cmd_fifo_csr_cor_dis */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_iobnx_mctlr_reg bdk_iobnx_mctlr_reg_t;
+
+static inline uint64_t BDK_IOBNX_MCTLR_REG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_MCTLR_REG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0005108ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0005108ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0005108ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_MCTLR_REG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_MCTLR_REG(a) bdk_iobnx_mctlr_reg_t
+#define bustype_BDK_IOBNX_MCTLR_REG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_MCTLR_REG(a) "IOBNX_MCTLR_REG"
+#define device_bar_BDK_IOBNX_MCTLR_REG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_MCTLR_REG(a) (a)
+#define arguments_BDK_IOBNX_MCTLR_REG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_mctls_reg
+ *
+ * IOBN Memory SControl Register
+ * Contains the sclk memory control for memories.
+ */
+union bdk_iobnx_mctls_reg
+{
+ uint64_t u;
+ struct bdk_iobnx_mctls_reg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_43_63 : 21;
+ uint64_t dis : 11; /**< [ 42: 32](R/W) Memory ECC disable.
+ Internal:
+ \<42:32\> = iobn_rsd_mem0_csr_cor_dis,
+ iobn_rsd_mem1_csr_cor_dis,
+ irp0_flid_mem_csr_cor_dis,
+ irp1_flid_mem_csr_cor_dis,
+ iop_breq_fifo0_csr_cor_dis,
+ iop_breq_fifo1_csr_cor_dis,
+ iop_breq_fifo2_csr_cor_dis,
+ iop_breq_fifo3_csr_cor_dis,
+ icc_xmc_fifo_ecc_csr_cor_dis,
+ icc0_xmc_fifo_csr_cor_dis,
+ icc1_xmc_fifo_csr_cor_dis */
+ uint64_t reserved_26_31 : 6;
+ uint64_t flip : 26; /**< [ 25: 0](R/W) Memory ECC flip.
+ Internal:
+ \<25:0\> = iob__iob_xmd_csr_flip_synd_sclk,
+ ixo_icc_fifo0_csr_flip_synd,
+ ixo_icc_fifo1_csr_flip_synd,
+ ixo_xmd_mem0_csr_flip_synd,
+ ixo_xmd_mem1_csr_flip_synd,
+ irp0_flid_mem_csr_flip_synd,
+ irp1_flid_mem_csr_flip_synd,
+ iobn_iorn_ffifo0__csr_flip_synd,
+ iobn_iorn_ffifo1__csr_flip_synd,
+ icc_xmc_fifo_ecc_csr_flip_synd,
+ ics_cmd_fifo_csr_flip_synd,
+ icc0_xmc_fifo_csr_flip_synd,
+ icc1_xmc_fifo_csr_flip_synd */
+#else /* Word 0 - Little Endian */
+ uint64_t flip : 26; /**< [ 25: 0](R/W) Memory ECC flip.
+ Internal:
+ \<25:0\> = iob__iob_xmd_csr_flip_synd_sclk,
+ ixo_icc_fifo0_csr_flip_synd,
+ ixo_icc_fifo1_csr_flip_synd,
+ ixo_xmd_mem0_csr_flip_synd,
+ ixo_xmd_mem1_csr_flip_synd,
+ irp0_flid_mem_csr_flip_synd,
+ irp1_flid_mem_csr_flip_synd,
+ iobn_iorn_ffifo0__csr_flip_synd,
+ iobn_iorn_ffifo1__csr_flip_synd,
+ icc_xmc_fifo_ecc_csr_flip_synd,
+ ics_cmd_fifo_csr_flip_synd,
+ icc0_xmc_fifo_csr_flip_synd,
+ icc1_xmc_fifo_csr_flip_synd */
+ uint64_t reserved_26_31 : 6;
+ uint64_t dis : 11; /**< [ 42: 32](R/W) Memory ECC disable.
+ Internal:
+ \<42:32\> = iobn_rsd_mem0_csr_cor_dis,
+ iobn_rsd_mem1_csr_cor_dis,
+ irp0_flid_mem_csr_cor_dis,
+ irp1_flid_mem_csr_cor_dis,
+ iop_breq_fifo0_csr_cor_dis,
+ iop_breq_fifo1_csr_cor_dis,
+ iop_breq_fifo2_csr_cor_dis,
+ iop_breq_fifo3_csr_cor_dis,
+ icc_xmc_fifo_ecc_csr_cor_dis,
+ icc0_xmc_fifo_csr_cor_dis,
+ icc1_xmc_fifo_csr_cor_dis */
+ uint64_t reserved_43_63 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_mctls_reg_s cn; */
+};
+typedef union bdk_iobnx_mctls_reg bdk_iobnx_mctls_reg_t;
+
+static inline uint64_t BDK_IOBNX_MCTLS_REG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_MCTLS_REG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0005100ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0005100ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0005100ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_MCTLS_REG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_MCTLS_REG(a) bdk_iobnx_mctls_reg_t
+#define bustype_BDK_IOBNX_MCTLS_REG(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_MCTLS_REG(a) "IOBNX_MCTLS_REG"
+#define device_bar_BDK_IOBNX_MCTLS_REG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_MCTLS_REG(a) (a)
+#define arguments_BDK_IOBNX_MCTLS_REG(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_msix_pba#
+ *
+ * IOBN MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table; the bit number is indexed by the IOBN_INT_VEC_E enumeration.
+ */
+union bdk_iobnx_msix_pbax
+{
+ uint64_t u;
+ struct bdk_iobnx_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO) Pending message for the associated IOBN()_MSIX_VEC()_CTL, enumerated by IOBN_INT_VEC_E.
+ Bits that have no associated IOBN_INT_VEC_E are zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO) Pending message for the associated IOBN()_MSIX_VEC()_CTL, enumerated by IOBN_INT_VEC_E.
+ Bits that have no associated IOBN_INT_VEC_E are zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_msix_pbax_s cn; */
+};
+typedef union bdk_iobnx_msix_pbax bdk_iobnx_msix_pbax_t;
+
+static inline uint64_t BDK_IOBNX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e0f0ff0000ll + 0x1000000ll * ((a) & 0x0) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e0f0ff0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x87e0f0ff0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x87e0f0ff0000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("IOBNX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_MSIX_PBAX(a,b) bdk_iobnx_msix_pbax_t
+#define bustype_BDK_IOBNX_MSIX_PBAX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_MSIX_PBAX(a,b) "IOBNX_MSIX_PBAX"
+#define device_bar_BDK_IOBNX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_IOBNX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_IOBNX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_msix_vec#_addr
+ *
+ * IOBN MSI-X Vector-Table Address Register
+ * This register is the MSI-X vector table, indexed by the IOBN_INT_VEC_E enumeration.
+ */
+union bdk_iobnx_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_iobnx_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's IOBN_MSIX_VEC()_ADDR, IOBN_MSIX_VEC()_CTL, and corresponding
+ bit of IOBN_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_IOBN_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's IOBN_MSIX_VEC()_ADDR, IOBN_MSIX_VEC()_CTL, and corresponding
+ bit of IOBN_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_IOBN_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_msix_vecx_addr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's IOBN_MSIX_VEC()_ADDR, IOBN_MSIX_VEC()_CTL, and corresponding
+ bit of IOBN_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_IOBN_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's IOBN_MSIX_VEC()_ADDR, IOBN_MSIX_VEC()_CTL, and corresponding
+ bit of IOBN_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_IOBN_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_iobnx_msix_vecx_addr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's IOBN()_MSIX_VEC()_ADDR, IOBN()_MSIX_VEC()_CTL, and
+ corresponding bit of IOBN()_MSIX_PBA() are RAZ/WI and does not cause a fault
+ when accessed by the nonsecure world.
+
+ If PCCPF_IOBN_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's IOBN()_MSIX_VEC()_ADDR, IOBN()_MSIX_VEC()_CTL, and
+ corresponding bit of IOBN()_MSIX_PBA() are RAZ/WI and does not cause a fault
+ when accessed by the nonsecure world.
+
+ If PCCPF_IOBN_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC])
+ is set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_iobnx_msix_vecx_addr bdk_iobnx_msix_vecx_addr_t;
+
+static inline uint64_t BDK_IOBNX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e0f0f00000ll + 0x1000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e0f0f00000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x87e0f0f00000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0f0f00000ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("IOBNX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_MSIX_VECX_ADDR(a,b) bdk_iobnx_msix_vecx_addr_t
+#define bustype_BDK_IOBNX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_MSIX_VECX_ADDR(a,b) "IOBNX_MSIX_VECX_ADDR"
+#define device_bar_BDK_IOBNX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_IOBNX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_IOBNX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_msix_vec#_ctl
+ *
+ * IOBN MSI-X Vector-Table Control and Data Register
+ * This register is the MSI-X vector table, indexed by the IOBN_INT_VEC_E enumeration.
+ */
+union bdk_iobnx_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_iobnx_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_iobnx_msix_vecx_ctl_s cn9; */
+};
+typedef union bdk_iobnx_msix_vecx_ctl bdk_iobnx_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_IOBNX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e0f0f00008ll + 0x1000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x87e0f0f00008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x87e0f0f00008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x87e0f0f00008ll + 0x1000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("IOBNX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_MSIX_VECX_CTL(a,b) bdk_iobnx_msix_vecx_ctl_t
+#define bustype_BDK_IOBNX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_MSIX_VECX_CTL(a,b) "IOBNX_MSIX_VECX_CTL"
+#define device_bar_BDK_IOBNX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_IOBNX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_IOBNX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_ncb#_acc
+ *
+ * IOBN NCB Access Registers
+ * This register sets attributes of NCBDIDs address bits \<43:36\>.
+ */
+union bdk_iobnx_ncbx_acc
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_acc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t all_cmds : 1; /**< [ 0: 0](R/W) Device supports all commends.
+ 0 = Only naturally aligned loads and stores that are 64-bit or smaller are
+ permitted to the NCB device. This setting is used for non-PEM devices.
+ 1 = Allow all size accesses, plus atomics and LMTSTs. This setting is used for
+ PEM.
+
+ Reset value of this field varies for different devices.
+ Using non-reset values is for diagnostic use only.
+
+ Internal:
+ FIXME resets to be added. */
+#else /* Word 0 - Little Endian */
+ uint64_t all_cmds : 1; /**< [ 0: 0](R/W) Device supports all commends.
+ 0 = Only naturally aligned loads and stores that are 64-bit or smaller are
+ permitted to the NCB device. This setting is used for non-PEM devices.
+ 1 = Allow all size accesses, plus atomics and LMTSTs. This setting is used for
+ PEM.
+
+ Reset value of this field varies for different devices.
+ Using non-reset values is for diagnostic use only.
+
+ Internal:
+ FIXME resets to be added. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_acc_s cn; */
+};
+typedef union bdk_iobnx_ncbx_acc bdk_iobnx_ncbx_acc_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_ACC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_ACC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=255)))
+ return 0x87e0f0080000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xff);
+ __bdk_csr_fatal("IOBNX_NCBX_ACC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBX_ACC(a,b) bdk_iobnx_ncbx_acc_t
+#define bustype_BDK_IOBNX_NCBX_ACC(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_ACC(a,b) "IOBNX_NCBX_ACC"
+#define device_bar_BDK_IOBNX_NCBX_ACC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_ACC(a,b) (a)
+#define arguments_BDK_IOBNX_NCBX_ACC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_ncb#_arb#_bp_test
+ *
+ * IOBN Back Pressure Register
+ */
+union bdk_iobnx_ncbx_arbx_bp_test
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_arbx_bp_test_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t bp_cfg : 64; /**< [ 63: 0](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<etc\> up to max number of supported ports per arbiter
+ \<7:6\> = Config 3.
+ \<25:4\> = Config 2.
+ \<3:2\> = Config 1.
+ \<1:0\> = Config 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t bp_cfg : 64; /**< [ 63: 0](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<etc\> up to max number of supported ports per arbiter
+ \<7:6\> = Config 3.
+ \<25:4\> = Config 2.
+ \<3:2\> = Config 1.
+ \<1:0\> = Config 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_arbx_bp_test_s cn; */
+};
+typedef union bdk_iobnx_ncbx_arbx_bp_test bdk_iobnx_ncbx_arbx_bp_test_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_ARBX_BP_TEST(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_ARBX_BP_TEST(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=2) && (c<=31)))
+ return 0x87e0f00f8000ll + 0x1000000ll * ((a) & 0x1) + 0x400ll * ((b) & 0x3) + 8ll * ((c) & 0x1f);
+ __bdk_csr_fatal("IOBNX_NCBX_ARBX_BP_TEST", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBX_ARBX_BP_TEST(a,b,c) bdk_iobnx_ncbx_arbx_bp_test_t
+#define bustype_BDK_IOBNX_NCBX_ARBX_BP_TEST(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_ARBX_BP_TEST(a,b,c) "IOBNX_NCBX_ARBX_BP_TEST"
+#define device_bar_BDK_IOBNX_NCBX_ARBX_BP_TEST(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_ARBX_BP_TEST(a,b,c) (a)
+#define arguments_BDK_IOBNX_NCBX_ARBX_BP_TEST(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) iobn#_ncb#_arb#_crds
+ *
+ * IOBN NREQ Priority Register
+ */
+union bdk_iobnx_ncbx_arbx_crds
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_arbx_crds_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t max : 8; /**< [ 15: 8](R/W) Maximum number of FLIDs available to the requestor.
+ Decreasing this number will limit the maximum burst performance of this device. */
+ uint64_t min : 8; /**< [ 7: 0](R/W) Minimum number of FLIDs available to the requestor. From the total available
+ credits this many will be set aside for this NREQID to use.
+ Increasing this number will insure this device has dedicated bandwidth over
+ other devices. Must be 0x1 or larger for GIC. Recommend 0x1 or larger for
+ all devices that are used. */
+#else /* Word 0 - Little Endian */
+ uint64_t min : 8; /**< [ 7: 0](R/W) Minimum number of FLIDs available to the requestor. From the total available
+ credits this many will be set aside for this NREQID to use.
+ Increasing this number will insure this device has dedicated bandwidth over
+ other devices. Must be 0x1 or larger for GIC. Recommend 0x1 or larger for
+ all devices that are used. */
+ uint64_t max : 8; /**< [ 15: 8](R/W) Maximum number of FLIDs available to the requestor.
+ Decreasing this number will limit the maximum burst performance of this device. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_arbx_crds_s cn; */
+};
+typedef union bdk_iobnx_ncbx_arbx_crds bdk_iobnx_ncbx_arbx_crds_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_ARBX_CRDS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_ARBX_CRDS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=2) && (c<=31)))
+ return 0x87e0f00f0000ll + 0x1000000ll * ((a) & 0x1) + 0x400ll * ((b) & 0x3) + 8ll * ((c) & 0x1f);
+ __bdk_csr_fatal("IOBNX_NCBX_ARBX_CRDS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBX_ARBX_CRDS(a,b,c) bdk_iobnx_ncbx_arbx_crds_t
+#define bustype_BDK_IOBNX_NCBX_ARBX_CRDS(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_ARBX_CRDS(a,b,c) "IOBNX_NCBX_ARBX_CRDS"
+#define device_bar_BDK_IOBNX_NCBX_ARBX_CRDS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_ARBX_CRDS(a,b,c) (a)
+#define arguments_BDK_IOBNX_NCBX_ARBX_CRDS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) iobn#_ncb#_arb#_rw#_lat_pc
+ *
+ * IOBN NCB Latency Performance Counter Registers
+ */
+union bdk_iobnx_ncbx_arbx_rwx_lat_pc
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_arbx_rwx_lat_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Latency performance counter.
+
+ _ RW(0) increments every cycle by the number of read transactions that have been
+ granted from the given NCB, but have not had there credit returned to the NGNT.
+
+ _ RW(0) increments every cycle by the number of write transactions that have been
+ granted from the given NCB, but have not had there credit returned to the NGNT.
+
+ This counter should be divided by IOBN()_NCB()_ARB()_RW()_REQ_PC to determine each NCB
+ bus's average read and write latency. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Latency performance counter.
+
+ _ RW(0) increments every cycle by the number of read transactions that have been
+ granted from the given NCB, but have not had there credit returned to the NGNT.
+
+ _ RW(0) increments every cycle by the number of write transactions that have been
+ granted from the given NCB, but have not had there credit returned to the NGNT.
+
+ This counter should be divided by IOBN()_NCB()_ARB()_RW()_REQ_PC to determine each NCB
+ bus's average read and write latency. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_arbx_rwx_lat_pc_s cn; */
+};
+typedef union bdk_iobnx_ncbx_arbx_rwx_lat_pc bdk_iobnx_ncbx_arbx_rwx_lat_pc_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_ARBX_RWX_LAT_PC(unsigned long a, unsigned long b, unsigned long c, unsigned long d) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_ARBX_RWX_LAT_PC(unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=2) && (c<=31) && (d<=1)))
+ return 0x87e0f00f4000ll + 0x1000000ll * ((a) & 0x1) + 0x400ll * ((b) & 0x3) + 0x20ll * ((c) & 0x1f) + 8ll * ((d) & 0x1);
+ __bdk_csr_fatal("IOBNX_NCBX_ARBX_RWX_LAT_PC", 4, a, b, c, d);
+}
+
+#define typedef_BDK_IOBNX_NCBX_ARBX_RWX_LAT_PC(a,b,c,d) bdk_iobnx_ncbx_arbx_rwx_lat_pc_t
+#define bustype_BDK_IOBNX_NCBX_ARBX_RWX_LAT_PC(a,b,c,d) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_ARBX_RWX_LAT_PC(a,b,c,d) "IOBNX_NCBX_ARBX_RWX_LAT_PC"
+#define device_bar_BDK_IOBNX_NCBX_ARBX_RWX_LAT_PC(a,b,c,d) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_ARBX_RWX_LAT_PC(a,b,c,d) (a)
+#define arguments_BDK_IOBNX_NCBX_ARBX_RWX_LAT_PC(a,b,c,d) (a),(b),(c),(d)
+
+/**
+ * Register (RSL) iobn#_ncb#_arb#_rw#_req_pc
+ *
+ * IOBN NCB Request Performance Counter Registers
+ */
+union bdk_iobnx_ncbx_arbx_rwx_req_pc
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_arbx_rwx_req_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Request performance counter.
+
+ _ RW(0) increments on read transaction being granted by NGNT.
+
+ _ RW(1) increments on write transaction being granted by NGNT. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Request performance counter.
+
+ _ RW(0) increments on read transaction being granted by NGNT.
+
+ _ RW(1) increments on write transaction being granted by NGNT. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_arbx_rwx_req_pc_s cn; */
+};
+typedef union bdk_iobnx_ncbx_arbx_rwx_req_pc bdk_iobnx_ncbx_arbx_rwx_req_pc_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_ARBX_RWX_REQ_PC(unsigned long a, unsigned long b, unsigned long c, unsigned long d) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_ARBX_RWX_REQ_PC(unsigned long a, unsigned long b, unsigned long c, unsigned long d)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=2) && (c<=31) && (d<=1)))
+ return 0x87e0f00f2000ll + 0x1000000ll * ((a) & 0x1) + 0x400ll * ((b) & 0x3) + 0x20ll * ((c) & 0x1f) + 8ll * ((d) & 0x1);
+ __bdk_csr_fatal("IOBNX_NCBX_ARBX_RWX_REQ_PC", 4, a, b, c, d);
+}
+
+#define typedef_BDK_IOBNX_NCBX_ARBX_RWX_REQ_PC(a,b,c,d) bdk_iobnx_ncbx_arbx_rwx_req_pc_t
+#define bustype_BDK_IOBNX_NCBX_ARBX_RWX_REQ_PC(a,b,c,d) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_ARBX_RWX_REQ_PC(a,b,c,d) "IOBNX_NCBX_ARBX_RWX_REQ_PC"
+#define device_bar_BDK_IOBNX_NCBX_ARBX_RWX_REQ_PC(a,b,c,d) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_ARBX_RWX_REQ_PC(a,b,c,d) (a)
+#define arguments_BDK_IOBNX_NCBX_ARBX_RWX_REQ_PC(a,b,c,d) (a),(b),(c),(d)
+
+/**
+ * Register (RSL) iobn#_ncb#_const
+ *
+ * IOBN NCB Constant Registers
+ * This register returns discovery information indexed by each NCB ID (physical address
+ * bits \<43:36\>). Each index {a} (IOB) returns identical information for a given index {b}.
+ */
+union bdk_iobnx_ncbx_const
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t farbid : 8; /**< [ 23: 16](RO) Flat ARBID for the given NCB ID. */
+ uint64_t arbid : 8; /**< [ 15: 8](RO) NCB bus ARBID for the given NCB ID. */
+ uint64_t ncb : 4; /**< [ 7: 4](RO) Physical bus number for the given NCB ID. */
+ uint64_t iob : 3; /**< [ 3: 1](RO) IOB number for the given NCB ID. */
+ uint64_t valid : 1; /**< [ 0: 0](RO) Set if this NCB ID is a valid ID. */
+#else /* Word 0 - Little Endian */
+ uint64_t valid : 1; /**< [ 0: 0](RO) Set if this NCB ID is a valid ID. */
+ uint64_t iob : 3; /**< [ 3: 1](RO) IOB number for the given NCB ID. */
+ uint64_t ncb : 4; /**< [ 7: 4](RO) Physical bus number for the given NCB ID. */
+ uint64_t arbid : 8; /**< [ 15: 8](RO) NCB bus ARBID for the given NCB ID. */
+ uint64_t farbid : 8; /**< [ 23: 16](RO) Flat ARBID for the given NCB ID. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_const_s cn; */
+};
+typedef union bdk_iobnx_ncbx_const bdk_iobnx_ncbx_const_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_CONST(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_CONST(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=255)))
+ return 0x87e0f0001000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xff);
+ __bdk_csr_fatal("IOBNX_NCBX_CONST", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBX_CONST(a,b) bdk_iobnx_ncbx_const_t
+#define bustype_BDK_IOBNX_NCBX_CONST(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_CONST(a,b) "IOBNX_NCBX_CONST"
+#define device_bar_BDK_IOBNX_NCBX_CONST(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_CONST(a,b) (a)
+#define arguments_BDK_IOBNX_NCBX_CONST(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_ncb#_credits
+ *
+ * IOBN NCB Credits Register
+ * This register controls the number of loads and stores each NCB can have to the L2.
+ */
+union bdk_iobnx_ncbx_credits
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_credits_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_23_63 : 41;
+ uint64_t ncb_wr_buf_crd : 7; /**< [ 22: 16](R/W) NCB write buffer credit. Each NCB can have 64 write buffers in flight to the L2; this is
+ the number by which to decrease the 64. */
+ uint64_t reserved_15 : 1;
+ uint64_t ncb0_wr_crd : 7; /**< [ 14: 8](R/W) NCB write credit. Each NCB can have 64 writes in flight to the L2; this is the number by
+ which to
+ decrease the 64. */
+ uint64_t reserved_7 : 1;
+ uint64_t ncb0_rd_crd : 7; /**< [ 6: 0](R/W) NCB read credit. Each NCB can have 64 reads in flight to the L2; this is the number to
+ decrease the 64 by. */
+#else /* Word 0 - Little Endian */
+ uint64_t ncb0_rd_crd : 7; /**< [ 6: 0](R/W) NCB read credit. Each NCB can have 64 reads in flight to the L2; this is the number to
+ decrease the 64 by. */
+ uint64_t reserved_7 : 1;
+ uint64_t ncb0_wr_crd : 7; /**< [ 14: 8](R/W) NCB write credit. Each NCB can have 64 writes in flight to the L2; this is the number by
+ which to
+ decrease the 64. */
+ uint64_t reserved_15 : 1;
+ uint64_t ncb_wr_buf_crd : 7; /**< [ 22: 16](R/W) NCB write buffer credit. Each NCB can have 64 write buffers in flight to the L2; this is
+ the number by which to decrease the 64. */
+ uint64_t reserved_23_63 : 41;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_credits_s cn; */
+};
+typedef union bdk_iobnx_ncbx_credits bdk_iobnx_ncbx_credits_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_CREDITS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_CREDITS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e0f0001000ll + 0x1000000ll * ((a) & 0x0) + 0x100ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0f0001000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0f0001000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x1);
+ __bdk_csr_fatal("IOBNX_NCBX_CREDITS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBX_CREDITS(a,b) bdk_iobnx_ncbx_credits_t
+#define bustype_BDK_IOBNX_NCBX_CREDITS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_CREDITS(a,b) "IOBNX_NCBX_CREDITS"
+#define device_bar_BDK_IOBNX_NCBX_CREDITS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_CREDITS(a,b) (a)
+#define arguments_BDK_IOBNX_NCBX_CREDITS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_ncb#_ctl
+ *
+ * IOBN NCB Control Registers
+ */
+union bdk_iobnx_ncbx_ctl
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t stp : 2; /**< [ 9: 8](R/W) When a complete cache block is written a STP will be converted to:
+ 0 = STF.
+ 1 = STY.
+ 2 = STT.
+ 3 = Reserved. */
+ uint64_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_7 : 8;
+ uint64_t stp : 2; /**< [ 9: 8](R/W) When a complete cache block is written a STP will be converted to:
+ 0 = STF.
+ 1 = STY.
+ 2 = STT.
+ 3 = Reserved. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_ncbx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_10_63 : 54;
+ uint64_t stp : 2; /**< [ 9: 8](R/W) When a complete cache block is written a STP will be converted to:
+ 0 = STF.
+ 1 = STY.
+ 2 = STT.
+ 3 = Reserved. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t rstp : 2; /**< [ 1: 0](R/W) When a complete cache block is written a RSTP will be converted to:
+ 0 = STY.
+ 1 = STT.
+ 2 = STF.
+ 3 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t rstp : 2; /**< [ 1: 0](R/W) When a complete cache block is written a RSTP will be converted to:
+ 0 = STY.
+ 1 = STT.
+ 2 = STF.
+ 3 = Reserved. */
+ uint64_t reserved_2_7 : 6;
+ uint64_t stp : 2; /**< [ 9: 8](R/W) When a complete cache block is written a STP will be converted to:
+ 0 = STF.
+ 1 = STY.
+ 2 = STT.
+ 3 = Reserved. */
+ uint64_t reserved_10_63 : 54;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_iobnx_ncbx_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t dis : 1; /**< [ 1: 1](R/W/H) Disable the opportunistic low latency mode for all ports. For diagnostic use only. */
+ uint64_t cal : 1; /**< [ 0: 0](R/W/H) Calibration active. Write one to field to start calibration. Cleared when
+ calibration is complete. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t cal : 1; /**< [ 0: 0](R/W/H) Calibration active. Write one to field to start calibration. Cleared when
+ calibration is complete. For diagnostic use only. */
+ uint64_t dis : 1; /**< [ 1: 1](R/W/H) Disable the opportunistic low latency mode for all ports. For diagnostic use only. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_iobnx_ncbx_ctl bdk_iobnx_ncbx_ctl_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x87e0f0004000ll + 0x1000000ll * ((a) & 0x0) + 0x100ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x87e0f0004000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x87e0f0004000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=2)))
+ return 0x87e0f00f6000ll + 0x1000000ll * ((a) & 0x1) + 0x400ll * ((b) & 0x3);
+ __bdk_csr_fatal("IOBNX_NCBX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBX_CTL(a,b) bdk_iobnx_ncbx_ctl_t
+#define bustype_BDK_IOBNX_NCBX_CTL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_CTL(a,b) "IOBNX_NCBX_CTL"
+#define device_bar_BDK_IOBNX_NCBX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_CTL(a,b) (a)
+#define arguments_BDK_IOBNX_NCBX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_ncb#_mrml_permit_shadow
+ *
+ * INTERNAL: IOBN NCB Access Registers
+ *
+ * This register provides a way to read back IOB's IOB captures writes to MRML's
+ * MRML_NCB()_PERMIT. For diagnostic use only.
+ */
+union bdk_iobnx_ncbx_mrml_permit_shadow
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_mrml_permit_shadow_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t kill : 1; /**< [ 7: 7](SRO/H) Kill the device. Once written with one, stays
+ set until warm chip reset. If set, no access
+ allowed by any initiator. */
+ uint64_t reserved_2_6 : 5;
+ uint64_t nsec_dis : 1; /**< [ 1: 1](SRO/H) Nonsecure disable. As with [SEC_DIS], but for accesses initiated by non-secure devices */
+ uint64_t sec_dis : 1; /**< [ 0: 0](SRO/H) Secure disable. */
+#else /* Word 0 - Little Endian */
+ uint64_t sec_dis : 1; /**< [ 0: 0](SRO/H) Secure disable. */
+ uint64_t nsec_dis : 1; /**< [ 1: 1](SRO/H) Nonsecure disable. As with [SEC_DIS], but for accesses initiated by non-secure devices */
+ uint64_t reserved_2_6 : 5;
+ uint64_t kill : 1; /**< [ 7: 7](SRO/H) Kill the device. Once written with one, stays
+ set until warm chip reset. If set, no access
+ allowed by any initiator. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_mrml_permit_shadow_s cn; */
+};
+typedef union bdk_iobnx_ncbx_mrml_permit_shadow bdk_iobnx_ncbx_mrml_permit_shadow_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_MRML_PERMIT_SHADOW(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_MRML_PERMIT_SHADOW(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=255)))
+ return 0x87e0f0090000ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xff);
+ __bdk_csr_fatal("IOBNX_NCBX_MRML_PERMIT_SHADOW", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBX_MRML_PERMIT_SHADOW(a,b) bdk_iobnx_ncbx_mrml_permit_shadow_t
+#define bustype_BDK_IOBNX_NCBX_MRML_PERMIT_SHADOW(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_MRML_PERMIT_SHADOW(a,b) "IOBNX_NCBX_MRML_PERMIT_SHADOW"
+#define device_bar_BDK_IOBNX_NCBX_MRML_PERMIT_SHADOW(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_MRML_PERMIT_SHADOW(a,b) (a)
+#define arguments_BDK_IOBNX_NCBX_MRML_PERMIT_SHADOW(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_ncb#_rw#_lat_pc
+ *
+ * IOBN NCB Latency Performance Counter Registers
+ */
+union bdk_iobnx_ncbx_rwx_lat_pc
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_rwx_lat_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Latency performance counter.
+
+ _ RW(0) increments every cycle by the number of read transactions that have
+ entered IOB from the given NCB, but have not returned read data to the device.
+
+ _ RW(1) increments every cycle by the number of write transactions that have
+ entered IOB from the given NCB, but have not returned write commits to the
+ device.
+
+ This counter should be divided by IOBN()_NCB()_RW()_REQ_PC to determine each NCB
+ bus's average read and write latency. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Latency performance counter.
+
+ _ RW(0) increments every cycle by the number of read transactions that have
+ entered IOB from the given NCB, but have not returned read data to the device.
+
+ _ RW(1) increments every cycle by the number of write transactions that have
+ entered IOB from the given NCB, but have not returned write commits to the
+ device.
+
+ This counter should be divided by IOBN()_NCB()_RW()_REQ_PC to determine each NCB
+ bus's average read and write latency. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_rwx_lat_pc_s cn; */
+};
+typedef union bdk_iobnx_ncbx_rwx_lat_pc bdk_iobnx_ncbx_rwx_lat_pc_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_RWX_LAT_PC(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_RWX_LAT_PC(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0) && (c<=1)))
+ return 0x87e0f000d000ll + 0x1000000ll * ((a) & 0x0) + 0x100ll * ((b) & 0x0) + 0x10ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1) && (c<=1)))
+ return 0x87e0f000d000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x1) + 0x10ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=2) && (c<=1)))
+ return 0x87e0f000d000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x3) + 0x10ll * ((c) & 0x1);
+ __bdk_csr_fatal("IOBNX_NCBX_RWX_LAT_PC", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBX_RWX_LAT_PC(a,b,c) bdk_iobnx_ncbx_rwx_lat_pc_t
+#define bustype_BDK_IOBNX_NCBX_RWX_LAT_PC(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_RWX_LAT_PC(a,b,c) "IOBNX_NCBX_RWX_LAT_PC"
+#define device_bar_BDK_IOBNX_NCBX_RWX_LAT_PC(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_RWX_LAT_PC(a,b,c) (a)
+#define arguments_BDK_IOBNX_NCBX_RWX_LAT_PC(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) iobn#_ncb#_rw#_req_pc
+ *
+ * IOBN NCB Request Performance Counter Registers
+ */
+union bdk_iobnx_ncbx_rwx_req_pc
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_rwx_req_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Request performance counter.
+
+ _ RW(0) increments on read transaction entering IOB on given NCB bus.
+
+ _ RW(1) increments on write transaction entering IOB on given NCB bus. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Request performance counter.
+
+ _ RW(0) increments on read transaction entering IOB on given NCB bus.
+
+ _ RW(1) increments on write transaction entering IOB on given NCB bus. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_rwx_req_pc_s cn; */
+};
+typedef union bdk_iobnx_ncbx_rwx_req_pc bdk_iobnx_ncbx_rwx_req_pc_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_RWX_REQ_PC(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_RWX_REQ_PC(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0) && (c<=1)))
+ return 0x87e0f000c000ll + 0x1000000ll * ((a) & 0x0) + 0x100ll * ((b) & 0x0) + 0x10ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1) && (c<=1)))
+ return 0x87e0f000c000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x1) + 0x10ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && ((a<=1) && (b<=1) && (c<=1)))
+ return 0x87e0f000c000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x1) + 0x10ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=2) && (c<=1)))
+ return 0x87e0f000c000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x3) + 0x10ll * ((c) & 0x1);
+ __bdk_csr_fatal("IOBNX_NCBX_RWX_REQ_PC", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBX_RWX_REQ_PC(a,b,c) bdk_iobnx_ncbx_rwx_req_pc_t
+#define bustype_BDK_IOBNX_NCBX_RWX_REQ_PC(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_RWX_REQ_PC(a,b,c) "IOBNX_NCBX_RWX_REQ_PC"
+#define device_bar_BDK_IOBNX_NCBX_RWX_REQ_PC(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_RWX_REQ_PC(a,b,c) (a)
+#define arguments_BDK_IOBNX_NCBX_RWX_REQ_PC(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) iobn#_ncb#_rw#_smmu_lat_pc
+ *
+ * IOBN NCB SMMU Latency Performance Counter Registers
+ */
+union bdk_iobnx_ncbx_rwx_smmu_lat_pc
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbx_rwx_smmu_lat_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) SMMU latency performance counter.
+
+ _ RW(0) increments every cycle by the number of read transactions that have
+ entered IOB from the given NCB, but have not been address translated by the
+ SMMU.
+
+ _ RW(1) increments by the number of write transactions that have entered IOB
+ from the given NCB, but have not been address translated by the SMMU.
+
+ This counter should be divided by IOBN()_NCB()_RW()_REQ_PC to determine each NCB
+ bus's average read and write SMMU plus IOB front-end latency. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) SMMU latency performance counter.
+
+ _ RW(0) increments every cycle by the number of read transactions that have
+ entered IOB from the given NCB, but have not been address translated by the
+ SMMU.
+
+ _ RW(1) increments by the number of write transactions that have entered IOB
+ from the given NCB, but have not been address translated by the SMMU.
+
+ This counter should be divided by IOBN()_NCB()_RW()_REQ_PC to determine each NCB
+ bus's average read and write SMMU plus IOB front-end latency. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbx_rwx_smmu_lat_pc_s cn; */
+};
+typedef union bdk_iobnx_ncbx_rwx_smmu_lat_pc bdk_iobnx_ncbx_rwx_smmu_lat_pc_t;
+
+static inline uint64_t BDK_IOBNX_NCBX_RWX_SMMU_LAT_PC(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBX_RWX_SMMU_LAT_PC(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0) && (c<=1)))
+ return 0x87e0f000e000ll + 0x1000000ll * ((a) & 0x0) + 0x100ll * ((b) & 0x0) + 0x10ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1) && (c<=1)))
+ return 0x87e0f000e000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x1) + 0x10ll * ((c) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=2) && (c<=1)))
+ return 0x87e0f000e000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x3) + 0x10ll * ((c) & 0x1);
+ __bdk_csr_fatal("IOBNX_NCBX_RWX_SMMU_LAT_PC", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBX_RWX_SMMU_LAT_PC(a,b,c) bdk_iobnx_ncbx_rwx_smmu_lat_pc_t
+#define bustype_BDK_IOBNX_NCBX_RWX_SMMU_LAT_PC(a,b,c) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBX_RWX_SMMU_LAT_PC(a,b,c) "IOBNX_NCBX_RWX_SMMU_LAT_PC"
+#define device_bar_BDK_IOBNX_NCBX_RWX_SMMU_LAT_PC(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBX_RWX_SMMU_LAT_PC(a,b,c) (a)
+#define arguments_BDK_IOBNX_NCBX_RWX_SMMU_LAT_PC(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (RSL) iobn#_ncb0_hp
+ *
+ * IOBN NCBI0 High Performance Register
+ */
+union bdk_iobnx_ncb0_hp
+{
+ uint64_t u;
+ struct bdk_iobnx_ncb0_hp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lp : 1; /**< [ 3: 3](R/W) For IOBN0 the reset value for this is 0x1. For IOBN1 the reset value is 0x0.
+ When set, NCBI 0 ARB 0 request port 3 will use the low performance path through ARB 0. */
+ uint64_t hp : 3; /**< [ 2: 0](R/W) When set, NCBI 0 ARB 0 will use the high performance path through the IOBN.
+ Software typically must have IOB(0)_NCB0_HP[HP] set, and IOB(1)_NCB0_HP[HP] clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t hp : 3; /**< [ 2: 0](R/W) When set, NCBI 0 ARB 0 will use the high performance path through the IOBN.
+ Software typically must have IOB(0)_NCB0_HP[HP] set, and IOB(1)_NCB0_HP[HP] clear. */
+ uint64_t lp : 1; /**< [ 3: 3](R/W) For IOBN0 the reset value for this is 0x1. For IOBN1 the reset value is 0x0.
+ When set, NCBI 0 ARB 0 request port 3 will use the low performance path through ARB 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_ncb0_hp_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lp : 1; /**< [ 3: 3](R/W) When set, NCBI 0 ARB 0 request port 3 will use the low performance path through ARB 0. */
+ uint64_t hp : 3; /**< [ 2: 0](R/W) When set, NCBI 0 ARB 0 for request ports 2..0 will use the high performance path through
+ the IOBN.
+ Software typically must have IOB(0)_NCB0_HP[HP] = 0x1. */
+#else /* Word 0 - Little Endian */
+ uint64_t hp : 3; /**< [ 2: 0](R/W) When set, NCBI 0 ARB 0 for request ports 2..0 will use the high performance path through
+ the IOBN.
+ Software typically must have IOB(0)_NCB0_HP[HP] = 0x1. */
+ uint64_t lp : 1; /**< [ 3: 3](R/W) When set, NCBI 0 ARB 0 request port 3 will use the low performance path through ARB 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_iobnx_ncb0_hp_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t hp : 1; /**< [ 0: 0](R/W) When set, NCBI 0 ARB 0 will use the high performance path through the IOBN.
+ Software typically must have IOB(0)_NCB0_HP[HP] set, and IOB(1)_NCB0_HP[HP] clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t hp : 1; /**< [ 0: 0](R/W) When set, NCBI 0 ARB 0 will use the high performance path through the IOBN.
+ Software typically must have IOB(0)_NCB0_HP[HP] set, and IOB(1)_NCB0_HP[HP] clear. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_iobnx_ncb0_hp_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t lp : 1; /**< [ 3: 3](R/W) For IOBN0 the reset value for this is 0x1. For IOBN1 the reset value is 0x0.
+ When set, NCBI 0 ARB 0 request port 3 will use the low performance path through ARB 0. */
+ uint64_t hp : 3; /**< [ 2: 0](R/W) For IOBN0 the reset value for this is 0x7. For IOBN1 the reset value is 0x0.
+ When set, NCBI 0 ARB 0 for request ports 2..0 will use the high performance path through
+ the IOBN.
+ Software typically must have IOB(0)_NCB0_HP[HP] = 0x7, and IOB(1)_NCB0_HP[HP] = 0x0. */
+#else /* Word 0 - Little Endian */
+ uint64_t hp : 3; /**< [ 2: 0](R/W) For IOBN0 the reset value for this is 0x7. For IOBN1 the reset value is 0x0.
+ When set, NCBI 0 ARB 0 for request ports 2..0 will use the high performance path through
+ the IOBN.
+ Software typically must have IOB(0)_NCB0_HP[HP] = 0x7, and IOB(1)_NCB0_HP[HP] = 0x0. */
+ uint64_t lp : 1; /**< [ 3: 3](R/W) For IOBN0 the reset value for this is 0x1. For IOBN1 the reset value is 0x0.
+ When set, NCBI 0 ARB 0 request port 3 will use the low performance path through ARB 0. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_iobnx_ncb0_hp bdk_iobnx_ncb0_hp_t;
+
+static inline uint64_t BDK_IOBNX_NCB0_HP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCB0_HP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0003008ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0003008ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0003008ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_NCB0_HP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCB0_HP(a) bdk_iobnx_ncb0_hp_t
+#define bustype_BDK_IOBNX_NCB0_HP(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCB0_HP(a) "IOBNX_NCB0_HP"
+#define device_bar_BDK_IOBNX_NCB0_HP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCB0_HP(a) (a)
+#define arguments_BDK_IOBNX_NCB0_HP(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_ncb0_sdis#
+ *
+ * IOBN NCB Secure Disable Register
+ */
+union bdk_iobnx_ncb0_sdisx
+{
+ uint64_t u;
+ struct bdk_iobnx_ncb0_sdisx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t did : 64; /**< [ 63: 0](SR/W) When set a secure operation is required to access the NCBDID. If a nonsecure
+ operation occurs it will result in a R/W to ECAM0_NOP_ZF.
+
+ Index 0 corresponds to DIDs 63:0, index 1 to DIDs 127:64, index 2 to DISs
+ 191:128, and index 3 to DIDs 255:192.
+
+ Each IOB instance should be programmed identically, and should also be identical
+ to MRML_NCB()_SDEV. */
+#else /* Word 0 - Little Endian */
+ uint64_t did : 64; /**< [ 63: 0](SR/W) When set a secure operation is required to access the NCBDID. If a nonsecure
+ operation occurs it will result in a R/W to ECAM0_NOP_ZF.
+
+ Index 0 corresponds to DIDs 63:0, index 1 to DIDs 127:64, index 2 to DISs
+ 191:128, and index 3 to DIDs 255:192.
+
+ Each IOB instance should be programmed identically, and should also be identical
+ to MRML_NCB()_SDEV. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncb0_sdisx_s cn; */
+};
+typedef union bdk_iobnx_ncb0_sdisx bdk_iobnx_ncb0_sdisx_t;
+
+static inline uint64_t BDK_IOBNX_NCB0_SDISX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCB0_SDISX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=3)))
+ return 0x87e0f0002000ll + 0x1000000ll * ((a) & 0x0) + 0x100ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=3)))
+ return 0x87e0f0002000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x87e0f0002000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x3);
+ __bdk_csr_fatal("IOBNX_NCB0_SDISX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCB0_SDISX(a,b) bdk_iobnx_ncb0_sdisx_t
+#define bustype_BDK_IOBNX_NCB0_SDISX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCB0_SDISX(a,b) "IOBNX_NCB0_SDISX"
+#define device_bar_BDK_IOBNX_NCB0_SDISX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCB0_SDISX(a,b) (a)
+#define arguments_BDK_IOBNX_NCB0_SDISX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_ncb0_skill#
+ *
+ * IOBN NCB Secure Kill-Device Registers
+ */
+union bdk_iobnx_ncb0_skillx
+{
+ uint64_t u;
+ struct bdk_iobnx_ncb0_skillx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t skill : 64; /**< [ 63: 0](SR/W1S) NCB function kill. If set, any operation to this device will be will be directed
+ to ECAM0_NOP_ZF. Write one to set, once set cannot be cleared until soft reset.
+
+ Index 0 corresponds to DIDs 63:0, index 1 to DIDs 127:64, index 2 to DISs
+ 191:128, and index 3 to DIDs 255:192.
+
+ Each IOB instance should be programmed identically, and should also be identical
+ to MRML_NCB()_SKILL. */
+#else /* Word 0 - Little Endian */
+ uint64_t skill : 64; /**< [ 63: 0](SR/W1S) NCB function kill. If set, any operation to this device will be will be directed
+ to ECAM0_NOP_ZF. Write one to set, once set cannot be cleared until soft reset.
+
+ Index 0 corresponds to DIDs 63:0, index 1 to DIDs 127:64, index 2 to DISs
+ 191:128, and index 3 to DIDs 255:192.
+
+ Each IOB instance should be programmed identically, and should also be identical
+ to MRML_NCB()_SKILL. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncb0_skillx_s cn; */
+};
+typedef union bdk_iobnx_ncb0_skillx bdk_iobnx_ncb0_skillx_t;
+
+static inline uint64_t BDK_IOBNX_NCB0_SKILLX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCB0_SKILLX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=3)))
+ return 0x87e0f000b000ll + 0x1000000ll * ((a) & 0x0) + 0x100ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=3)))
+ return 0x87e0f000b000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && ((a<=1) && (b<=3)))
+ return 0x87e0f000b000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x3);
+ __bdk_csr_fatal("IOBNX_NCB0_SKILLX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCB0_SKILLX(a,b) bdk_iobnx_ncb0_skillx_t
+#define bustype_BDK_IOBNX_NCB0_SKILLX(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCB0_SKILLX(a,b) "IOBNX_NCB0_SKILLX"
+#define device_bar_BDK_IOBNX_NCB0_SKILLX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCB0_SKILLX(a,b) (a)
+#define arguments_BDK_IOBNX_NCB0_SKILLX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_ncbo#_psn_status
+ *
+ * IOBN NCBO Poison Status Register
+ */
+union bdk_iobnx_ncbox_psn_status
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbox_psn_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t address : 52; /**< [ 51: 0](RO/H) Captured address when poison transaction was sent on NCBO. Valid when
+ corresponding bit is set in IOBN()_INT_SUM .
+ When corresponding bit in IOBN()_INT_SUM is cleared allows a new poison error to be latched. */
+#else /* Word 0 - Little Endian */
+ uint64_t address : 52; /**< [ 51: 0](RO/H) Captured address when poison transaction was sent on NCBO. Valid when
+ corresponding bit is set in IOBN()_INT_SUM .
+ When corresponding bit in IOBN()_INT_SUM is cleared allows a new poison error to be latched. */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbox_psn_status_s cn; */
+};
+typedef union bdk_iobnx_ncbox_psn_status bdk_iobnx_ncbox_psn_status_t;
+
+static inline uint64_t BDK_IOBNX_NCBOX_PSN_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBOX_PSN_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=2)))
+ return 0x87e0f0003060ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x3);
+ __bdk_csr_fatal("IOBNX_NCBOX_PSN_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBOX_PSN_STATUS(a,b) bdk_iobnx_ncbox_psn_status_t
+#define bustype_BDK_IOBNX_NCBOX_PSN_STATUS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBOX_PSN_STATUS(a,b) "IOBNX_NCBOX_PSN_STATUS"
+#define device_bar_BDK_IOBNX_NCBOX_PSN_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBOX_PSN_STATUS(a,b) (a)
+#define arguments_BDK_IOBNX_NCBOX_PSN_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_ncbo_ctl
+ *
+ * IOBN NCBO control Registers
+ * This register set controls for NCBO processing.
+ */
+union bdk_iobnx_ncbo_ctl
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbo_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t clken : 3; /**< [ 6: 4](R/W) Force the NCBO clock enable to be always on. For diagnostic use only.
+ Each bit is control for a given ncb bus, where bit 4 is for ncb bus 0 and bit 6 is for ncb bus 2. */
+ uint64_t reserved_3 : 1;
+ uint64_t bypass_ena : 3; /**< [ 2: 0](R/W) When set allows requests to bypass FIFO and go directly to output of NCBO bus, when FIFO is empty.
+ Each bit is control for a given ncb bus, where bit 0 is for ncb bus 0 and bit 2 is for ncb bus 2.
+
+ Internal:
+ FIXME rename bypass_dis and default to on (0x0), and then describe as diag only. */
+#else /* Word 0 - Little Endian */
+ uint64_t bypass_ena : 3; /**< [ 2: 0](R/W) When set allows requests to bypass FIFO and go directly to output of NCBO bus, when FIFO is empty.
+ Each bit is control for a given ncb bus, where bit 0 is for ncb bus 0 and bit 2 is for ncb bus 2.
+
+ Internal:
+ FIXME rename bypass_dis and default to on (0x0), and then describe as diag only. */
+ uint64_t reserved_3 : 1;
+ uint64_t clken : 3; /**< [ 6: 4](R/W) Force the NCBO clock enable to be always on. For diagnostic use only.
+ Each bit is control for a given ncb bus, where bit 4 is for ncb bus 0 and bit 6 is for ncb bus 2. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbo_ctl_s cn; */
+};
+typedef union bdk_iobnx_ncbo_ctl bdk_iobnx_ncbo_ctl_t;
+
+static inline uint64_t BDK_IOBNX_NCBO_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBO_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0002200ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_NCBO_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBO_CTL(a) bdk_iobnx_ncbo_ctl_t
+#define bustype_BDK_IOBNX_NCBO_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBO_CTL(a) "IOBNX_NCBO_CTL"
+#define device_bar_BDK_IOBNX_NCBO_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBO_CTL(a) (a)
+#define arguments_BDK_IOBNX_NCBO_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_ncbo_to
+ *
+ * IOBN NCBO Timeout Counter Registers
+ * This register set the counter value for expected return data on NCBI.
+ */
+union bdk_iobnx_ncbo_to
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbo_to_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t sub_time : 32; /**< [ 31: 0](R/W) Timeout value. When a load operation is sent on NCBO, the timeout counter is
+ started, and if return data on NCBI does not return within between three to four
+ times the value of [SUB_TIME] in coprocessor-clocks, a timeout occurs and
+ IOBN()_INT_SUM[NCBO_TO] is set. 0x0 disables. */
+#else /* Word 0 - Little Endian */
+ uint64_t sub_time : 32; /**< [ 31: 0](R/W) Timeout value. When a load operation is sent on NCBO, the timeout counter is
+ started, and if return data on NCBI does not return within between three to four
+ times the value of [SUB_TIME] in coprocessor-clocks, a timeout occurs and
+ IOBN()_INT_SUM[NCBO_TO] is set. 0x0 disables. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbo_to_s cn; */
+};
+typedef union bdk_iobnx_ncbo_to bdk_iobnx_ncbo_to_t;
+
+static inline uint64_t BDK_IOBNX_NCBO_TO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBO_TO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0000008ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_NCBO_TO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBO_TO(a) bdk_iobnx_ncbo_to_t
+#define bustype_BDK_IOBNX_NCBO_TO(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBO_TO(a) "IOBNX_NCBO_TO"
+#define device_bar_BDK_IOBNX_NCBO_TO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBO_TO(a) (a)
+#define arguments_BDK_IOBNX_NCBO_TO(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_ncbo_to_err
+ *
+ * IOBN NCB Timeout Error Register
+ * This register captures error information for a non-posted request that times out on
+ * NCBO (when IOBN()_INT_SUM[NCBO_TO] is set).
+ */
+union bdk_iobnx_ncbo_to_err
+{
+ uint64_t u;
+ struct bdk_iobnx_ncbo_to_err_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_17_63 : 47;
+ uint64_t cpid : 9; /**< [ 16: 8](RO/H) CPID for NP request that timed out on NCBO. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t arbid : 5; /**< [ 4: 0](RO/H) Flat ARBID for NP request that timed out on NCBO. */
+#else /* Word 0 - Little Endian */
+ uint64_t arbid : 5; /**< [ 4: 0](RO/H) Flat ARBID for NP request that timed out on NCBO. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t cpid : 9; /**< [ 16: 8](RO/H) CPID for NP request that timed out on NCBO. */
+ uint64_t reserved_17_63 : 47;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_ncbo_to_err_s cn; */
+};
+typedef union bdk_iobnx_ncbo_to_err bdk_iobnx_ncbo_to_err_t;
+
+static inline uint64_t BDK_IOBNX_NCBO_TO_ERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_NCBO_TO_ERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f00a0000ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_NCBO_TO_ERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_NCBO_TO_ERR(a) bdk_iobnx_ncbo_to_err_t
+#define bustype_BDK_IOBNX_NCBO_TO_ERR(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_NCBO_TO_ERR(a) "IOBNX_NCBO_TO_ERR"
+#define device_bar_BDK_IOBNX_NCBO_TO_ERR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_NCBO_TO_ERR(a) (a)
+#define arguments_BDK_IOBNX_NCBO_TO_ERR(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_psn_ctl
+ *
+ * Poison Control Register
+ */
+union bdk_iobnx_psn_ctl
+{
+ uint64_t u;
+ struct bdk_iobnx_psn_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dispsn : 1; /**< [ 0: 0](R/W) Disable poison code creation and detection in the mesh / NCB ECC
+ checkers/generators. */
+#else /* Word 0 - Little Endian */
+ uint64_t dispsn : 1; /**< [ 0: 0](R/W) Disable poison code creation and detection in the mesh / NCB ECC
+ checkers/generators. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_psn_ctl_s cn; */
+};
+typedef union bdk_iobnx_psn_ctl bdk_iobnx_psn_ctl_t;
+
+static inline uint64_t BDK_IOBNX_PSN_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_PSN_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0003050ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_PSN_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_PSN_CTL(a) bdk_iobnx_psn_ctl_t
+#define bustype_BDK_IOBNX_PSN_CTL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_PSN_CTL(a) "IOBNX_PSN_CTL"
+#define device_bar_BDK_IOBNX_PSN_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_PSN_CTL(a) (a)
+#define arguments_BDK_IOBNX_PSN_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_roc_dll
+ *
+ * INTERNAL: IOBN Global Core-Clock DLL Status Register
+ *
+ * Status of the ROC core-clock DLL.
+ */
+union bdk_iobnx_roc_dll
+{
+ uint64_t u;
+ struct bdk_iobnx_roc_dll_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t max_dll_setting : 12; /**< [ 59: 48](RO/H) Max reported DLL setting. */
+ uint64_t min_dll_setting : 12; /**< [ 47: 36](RO/H) Min reported DLL setting. */
+ uint64_t reserved_32_35 : 4;
+ uint64_t pdr_rclk_refclk : 1; /**< [ 31: 31](RO/H) Synchronized pdr_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 30: 30](RO/H) Synchronized pdl_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 29: 29](RO/H) Synchronized pd_pos_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t dll_lock : 1; /**< [ 28: 28](RO/H) The dll_lock signal from ROC core-clock DLL, from the positive edge of refclk. */
+ uint64_t dll_dly_elem_en : 16; /**< [ 27: 12](RO/H) The ROC core-clock delay element enable setting, from the negative edge of refclk. */
+ uint64_t dll_setting : 12; /**< [ 11: 0](RO/H) The ROC core-clock DLL setting, from the negative edge of refclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t dll_setting : 12; /**< [ 11: 0](RO/H) The ROC core-clock DLL setting, from the negative edge of refclk. */
+ uint64_t dll_dly_elem_en : 16; /**< [ 27: 12](RO/H) The ROC core-clock delay element enable setting, from the negative edge of refclk. */
+ uint64_t dll_lock : 1; /**< [ 28: 28](RO/H) The dll_lock signal from ROC core-clock DLL, from the positive edge of refclk. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 29: 29](RO/H) Synchronized pd_pos_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 30: 30](RO/H) Synchronized pdl_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t pdr_rclk_refclk : 1; /**< [ 31: 31](RO/H) Synchronized pdr_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t reserved_32_35 : 4;
+ uint64_t min_dll_setting : 12; /**< [ 47: 36](RO/H) Min reported DLL setting. */
+ uint64_t max_dll_setting : 12; /**< [ 59: 48](RO/H) Max reported DLL setting. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_roc_dll_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t pdr_rclk_refclk : 1; /**< [ 31: 31](RO/H) Synchronized pdr_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 30: 30](RO/H) Synchronized pdl_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 29: 29](RO/H) Synchronized pd_pos_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t dll_lock : 1; /**< [ 28: 28](RO/H) The dll_lock signal from ROC core-clock DLL, from the positive edge of refclk. */
+ uint64_t dll_dly_elem_en : 16; /**< [ 27: 12](RO/H) The ROC core-clock delay element enable setting, from the negative edge of refclk. */
+ uint64_t dll_setting : 12; /**< [ 11: 0](RO/H) The ROC core-clock DLL setting, from the negative edge of refclk. */
+#else /* Word 0 - Little Endian */
+ uint64_t dll_setting : 12; /**< [ 11: 0](RO/H) The ROC core-clock DLL setting, from the negative edge of refclk. */
+ uint64_t dll_dly_elem_en : 16; /**< [ 27: 12](RO/H) The ROC core-clock delay element enable setting, from the negative edge of refclk. */
+ uint64_t dll_lock : 1; /**< [ 28: 28](RO/H) The dll_lock signal from ROC core-clock DLL, from the positive edge of refclk. */
+ uint64_t pd_pos_rclk_refclk : 1; /**< [ 29: 29](RO/H) Synchronized pd_pos_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t pdl_rclk_refclk : 1; /**< [ 30: 30](RO/H) Synchronized pdl_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t pdr_rclk_refclk : 1; /**< [ 31: 31](RO/H) Synchronized pdr_rclk_refclk from ROC core-clock DLL cmb0 phase detectors. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_iobnx_roc_dll_s cn81xx; */
+ /* struct bdk_iobnx_roc_dll_s cn83xx; */
+ /* struct bdk_iobnx_roc_dll_s cn88xxp2; */
+};
+typedef union bdk_iobnx_roc_dll bdk_iobnx_roc_dll_t;
+
+static inline uint64_t BDK_IOBNX_ROC_DLL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_ROC_DLL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f000a008ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f000a008ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f000a008ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_ROC_DLL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_ROC_DLL(a) bdk_iobnx_roc_dll_t
+#define bustype_BDK_IOBNX_ROC_DLL(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_ROC_DLL(a) "IOBNX_ROC_DLL"
+#define device_bar_BDK_IOBNX_ROC_DLL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_ROC_DLL(a) (a)
+#define arguments_BDK_IOBNX_ROC_DLL(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_rsl#_streams
+ *
+ * IOBN RSL Stream Permission Registers
+ * This register sets the permissions for a NCBI transaction (which are DMA
+ * transactions or MSI-X writes), for requests from a RSL device, i.e.
+ * those where:
+ *
+ * _ stream_id\<21:8\> = PCC_DEV_CON_E::MRML\<21:8\>
+ *
+ * Index {b} corresponds to the PCC function number for the RSL device
+ * (stream_id\<7:0\>).
+ *
+ * For each given index {b} (the RSL function number), each index {a} (the IOB number)
+ * must be programmed to the same value.
+ */
+union bdk_iobnx_rslx_streams
+{
+ uint64_t u;
+ struct bdk_iobnx_rslx_streams_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t strm_nsec : 1; /**< [ 1: 1](SR/W) Stream nonsecure.
+
+ 0 = The device's stream ID is marked secure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use secure world
+ lookup. The SMMU may, if properly configured, generate an outgoing physical
+ address that is secure.
+
+ 1 = The device's stream ID is marked nonsecure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use nonsecure world
+ lookup. The SMMU outgoing physical address will be nonsecure.
+
+ [STRM_NSEC] is ignored if the device is making a physical request (as these
+ transactions bypass the SMMU translation process).
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+ uint64_t phys_nsec : 1; /**< [ 0: 0](SR/W) Physical nonsecure.
+ 0 = When the device makes a physical request, IOB will use the device's
+ requested secure bit to determine if the request to DRAM/LLC is secure or not.
+ 1 = When the device makes a physical request, IOB will squash the
+ device's secure request and issue the request to DRAM/LLC as nonsecure.
+
+ Ignored if a device makes a non-physical request. (As non-physical requests
+ cause the SMMU to generate the SMMU-outgoing secure bit based on the SMMU
+ translation process, including [STRM_NSEC].)
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+#else /* Word 0 - Little Endian */
+ uint64_t phys_nsec : 1; /**< [ 0: 0](SR/W) Physical nonsecure.
+ 0 = When the device makes a physical request, IOB will use the device's
+ requested secure bit to determine if the request to DRAM/LLC is secure or not.
+ 1 = When the device makes a physical request, IOB will squash the
+ device's secure request and issue the request to DRAM/LLC as nonsecure.
+
+ Ignored if a device makes a non-physical request. (As non-physical requests
+ cause the SMMU to generate the SMMU-outgoing secure bit based on the SMMU
+ translation process, including [STRM_NSEC].)
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+ uint64_t strm_nsec : 1; /**< [ 1: 1](SR/W) Stream nonsecure.
+
+ 0 = The device's stream ID is marked secure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use secure world
+ lookup. The SMMU may, if properly configured, generate an outgoing physical
+ address that is secure.
+
+ 1 = The device's stream ID is marked nonsecure headed into the SMMU. If the
+ device is making a non-physical request, the SMMU will use nonsecure world
+ lookup. The SMMU outgoing physical address will be nonsecure.
+
+ [STRM_NSEC] is ignored if the device is making a physical request (as these
+ transactions bypass the SMMU translation process).
+
+ Typically firmware sets [PHYS_NSEC] and [STRM_NSEC] the same. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_rslx_streams_s cn; */
+};
+typedef union bdk_iobnx_rslx_streams bdk_iobnx_rslx_streams_t;
+
+static inline uint64_t BDK_IOBNX_RSLX_STREAMS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_RSLX_STREAMS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=255)))
+ return 0x87e0f0002800ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0xff);
+ __bdk_csr_fatal("IOBNX_RSLX_STREAMS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_RSLX_STREAMS(a,b) bdk_iobnx_rslx_streams_t
+#define bustype_BDK_IOBNX_RSLX_STREAMS(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_RSLX_STREAMS(a,b) "IOBNX_RSLX_STREAMS"
+#define device_bar_BDK_IOBNX_RSLX_STREAMS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_RSLX_STREAMS(a,b) (a)
+#define arguments_BDK_IOBNX_RSLX_STREAMS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_rvu_block#_const
+ *
+ * IOBN RVU BLOCK Constant Registers
+ * This register returns discovery information for each RVU BLOCK, where RVU BLOCK is
+ * enumerated by RVU_BLOCK_ADDR_E, in rvu.csr
+ * Each IOB returns identical information.
+ */
+union bdk_iobnx_rvu_blockx_const
+{
+ uint64_t u;
+ struct bdk_iobnx_rvu_blockx_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t farbid : 8; /**< [ 23: 16](RO) Flat ARBID for the given NCB ID. */
+ uint64_t arbid : 8; /**< [ 15: 8](RO) ARBID for the given RVU BLOCK. */
+ uint64_t ncb : 4; /**< [ 7: 4](RO) Physical bus number for the given RVU BLOCK. */
+ uint64_t iob : 3; /**< [ 3: 1](RO) IOB number for the given RVU BLOCK. */
+ uint64_t valid : 1; /**< [ 0: 0](RO) Set if this RVU BLOCK is a valid ID. */
+#else /* Word 0 - Little Endian */
+ uint64_t valid : 1; /**< [ 0: 0](RO) Set if this RVU BLOCK is a valid ID. */
+ uint64_t iob : 3; /**< [ 3: 1](RO) IOB number for the given RVU BLOCK. */
+ uint64_t ncb : 4; /**< [ 7: 4](RO) Physical bus number for the given RVU BLOCK. */
+ uint64_t arbid : 8; /**< [ 15: 8](RO) ARBID for the given RVU BLOCK. */
+ uint64_t farbid : 8; /**< [ 23: 16](RO) Flat ARBID for the given NCB ID. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_rvu_blockx_const_s cn; */
+};
+typedef union bdk_iobnx_rvu_blockx_const bdk_iobnx_rvu_blockx_const_t;
+
+static inline uint64_t BDK_IOBNX_RVU_BLOCKX_CONST(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_RVU_BLOCKX_CONST(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=31)))
+ return 0x87e0f0001800ll + 0x1000000ll * ((a) & 0x1) + 8ll * ((b) & 0x1f);
+ __bdk_csr_fatal("IOBNX_RVU_BLOCKX_CONST", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_RVU_BLOCKX_CONST(a,b) bdk_iobnx_rvu_blockx_const_t
+#define bustype_BDK_IOBNX_RVU_BLOCKX_CONST(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_RVU_BLOCKX_CONST(a,b) "IOBNX_RVU_BLOCKX_CONST"
+#define device_bar_BDK_IOBNX_RVU_BLOCKX_CONST(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_RVU_BLOCKX_CONST(a,b) (a)
+#define arguments_BDK_IOBNX_RVU_BLOCKX_CONST(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_scratch
+ *
+ * INTERNAL: IOBN Scratch Register
+ */
+union bdk_iobnx_scratch
+{
+ uint64_t u;
+ struct bdk_iobnx_scratch_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Test register for CSR access. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Test register for CSR access. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_scratch_s cn; */
+};
+typedef union bdk_iobnx_scratch bdk_iobnx_scratch_t;
+
+static inline uint64_t BDK_IOBNX_SCRATCH(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_SCRATCH(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0003020ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0003020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0003020ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e0f0003020ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_SCRATCH", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_SCRATCH(a) bdk_iobnx_scratch_t
+#define bustype_BDK_IOBNX_SCRATCH(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_SCRATCH(a) "IOBNX_SCRATCH"
+#define device_bar_BDK_IOBNX_SCRATCH(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_SCRATCH(a) (a)
+#define arguments_BDK_IOBNX_SCRATCH(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) iobn#_slitag#_control
+ *
+ * IOBN Control Register
+ * This register contains various control bits for IOBN functionality.
+ */
+union bdk_iobnx_slitagx_control
+{
+ uint64_t u;
+ struct bdk_iobnx_slitagx_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t bits_dis : 1; /**< [ 8: 8](RAZ) Reserved. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t ld_ld_ord : 1; /**< [ 3: 3](R/W) Enforce load-following-load ordering for SLI operations. A load operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t ld_st_ord : 1; /**< [ 2: 2](R/W) Enforce load-following-store ordering for SLI operations. A load operation must
+ wait for all previous store operations' STDNs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t st_ld_ord : 1; /**< [ 1: 1](R/W) Enforce store-following-load ordering for SLI operations. A store operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t st_st_ord : 1; /**< [ 0: 0](R/W) Enforce store-following-store ordering for SLI operations. A store operation must
+ wait for all previous store operations' STDNs before issuing. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_st_ord : 1; /**< [ 0: 0](R/W) Enforce store-following-store ordering for SLI operations. A store operation must
+ wait for all previous store operations' STDNs before issuing. */
+ uint64_t st_ld_ord : 1; /**< [ 1: 1](R/W) Enforce store-following-load ordering for SLI operations. A store operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t ld_st_ord : 1; /**< [ 2: 2](R/W) Enforce load-following-store ordering for SLI operations. A load operation must
+ wait for all previous store operations' STDNs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t ld_ld_ord : 1; /**< [ 3: 3](R/W) Enforce load-following-load ordering for SLI operations. A load operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t bits_dis : 1; /**< [ 8: 8](RAZ) Reserved. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_iobnx_slitagx_control_s cn88xxp1; */
+ struct bdk_iobnx_slitagx_control_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t bits_dis : 1; /**< [ 8: 8](R/W) When set, disables stream validity checking. For diagnostic use only. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t ld_ld_ord : 1; /**< [ 3: 3](R/W) Enforce load-following-load ordering for SLI operations. A load operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t ld_st_ord : 1; /**< [ 2: 2](R/W) Enforce load-following-store ordering for SLI operations. A load operation must
+ wait for all previous store operations' STDNs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t st_ld_ord : 1; /**< [ 1: 1](R/W) Enforce store-following-load ordering for SLI operations. A store operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t st_st_ord : 1; /**< [ 0: 0](R/W) Enforce store-following-store ordering for SLI operations. A store operation must
+ wait for all previous store operations' STDNs before issuing. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_st_ord : 1; /**< [ 0: 0](R/W) Enforce store-following-store ordering for SLI operations. A store operation must
+ wait for all previous store operations' STDNs before issuing. */
+ uint64_t st_ld_ord : 1; /**< [ 1: 1](R/W) Enforce store-following-load ordering for SLI operations. A store operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t ld_st_ord : 1; /**< [ 2: 2](R/W) Enforce load-following-store ordering for SLI operations. A load operation must
+ wait for all previous store operations' STDNs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t ld_ld_ord : 1; /**< [ 3: 3](R/W) Enforce load-following-load ordering for SLI operations. A load operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t bits_dis : 1; /**< [ 8: 8](R/W) When set, disables stream validity checking. For diagnostic use only. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_iobnx_slitagx_control_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t bits_dis : 1; /**< [ 8: 8](SR/W) Bitstream disable.
+ 0 = Check inbound stream IDs from a PEM are between the secondary and
+ subordinate bus numbers corresponding to that PEM (used when PEM is in host
+ mode), or from the stream ID PCC_DEV_CON_E::PCIERC() (used when PEM is in
+ endpoint mode). This prevents SR-IOV security issues.
+ 1 = Do not check inbound stream IDs. See PEM()_CTL_STREAM[EPSBBASE]. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t ld_ld_ord : 1; /**< [ 3: 3](R/W) Enforce load-following-load ordering for SLI operations. A load operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t ld_st_ord : 1; /**< [ 2: 2](R/W) Enforce load-following-store ordering for SLI operations. A load operation must
+ wait for all previous store operations' STDNs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t st_ld_ord : 1; /**< [ 1: 1](R/W) Enforce store-following-load ordering for SLI operations. A store operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t st_st_ord : 1; /**< [ 0: 0](R/W) Enforce store-following-store ordering for SLI operations. A store operation must
+ wait for all previous store operations' STDNs before issuing. */
+#else /* Word 0 - Little Endian */
+ uint64_t st_st_ord : 1; /**< [ 0: 0](R/W) Enforce store-following-store ordering for SLI operations. A store operation must
+ wait for all previous store operations' STDNs before issuing. */
+ uint64_t st_ld_ord : 1; /**< [ 1: 1](R/W) Enforce store-following-load ordering for SLI operations. A store operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t ld_st_ord : 1; /**< [ 2: 2](R/W) Enforce load-following-store ordering for SLI operations. A load operation must
+ wait for all previous store operations' STDNs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t ld_ld_ord : 1; /**< [ 3: 3](R/W) Enforce load-following-load ordering for SLI operations. A load operation must
+ wait for all previous load operations' FILLs before issuing.
+
+ Atomic transactions (which for PCI are non-posted so not part of normal store
+ ordering) are also considered loads for the purpose of this bit. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t bits_dis : 1; /**< [ 8: 8](SR/W) Bitstream disable.
+ 0 = Check inbound stream IDs from a PEM are between the secondary and
+ subordinate bus numbers corresponding to that PEM (used when PEM is in host
+ mode), or from the stream ID PCC_DEV_CON_E::PCIERC() (used when PEM is in
+ endpoint mode). This prevents SR-IOV security issues.
+ 1 = Do not check inbound stream IDs. See PEM()_CTL_STREAM[EPSBBASE]. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } cn83xx;
+ /* struct bdk_iobnx_slitagx_control_cn81xx cn88xxp2; */
+};
+typedef union bdk_iobnx_slitagx_control bdk_iobnx_slitagx_control_t;
+
+static inline uint64_t BDK_IOBNX_SLITAGX_CONTROL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_SLITAGX_CONTROL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=2)))
+ return 0x87e0f0000000ll + 0x1000000ll * ((a) & 0x0) + 0x100ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=3)))
+ return 0x87e0f0000000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=2)))
+ return 0x87e0f0000000ll + 0x1000000ll * ((a) & 0x1) + 0x100ll * ((b) & 0x3);
+ __bdk_csr_fatal("IOBNX_SLITAGX_CONTROL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_SLITAGX_CONTROL(a,b) bdk_iobnx_slitagx_control_t
+#define bustype_BDK_IOBNX_SLITAGX_CONTROL(a,b) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_SLITAGX_CONTROL(a,b) "IOBNX_SLITAGX_CONTROL"
+#define device_bar_BDK_IOBNX_SLITAGX_CONTROL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_SLITAGX_CONTROL(a,b) (a)
+#define arguments_BDK_IOBNX_SLITAGX_CONTROL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RSL) iobn#_test
+ *
+ * INTERNAL: IOBN Test Register
+ */
+union bdk_iobnx_test
+{
+ uint64_t u;
+ struct bdk_iobnx_test_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t gibarb_testmode : 1; /**< [ 0: 0](R/W) When set, the IOBN GIB arbiters will only grant one requestor at a time. */
+#else /* Word 0 - Little Endian */
+ uint64_t gibarb_testmode : 1; /**< [ 0: 0](R/W) When set, the IOBN GIB arbiters will only grant one requestor at a time. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_iobnx_test_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t gibarb_testmode : 1; /**< [ 0: 0](RO) When set, the IOBN GIB arbiters will only grant one requestor at a time. */
+#else /* Word 0 - Little Endian */
+ uint64_t gibarb_testmode : 1; /**< [ 0: 0](RO) When set, the IOBN GIB arbiters will only grant one requestor at a time. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_iobnx_test_s cn88xx; */
+ /* struct bdk_iobnx_test_cn81xx cn83xx; */
+};
+typedef union bdk_iobnx_test bdk_iobnx_test_t;
+
+static inline uint64_t BDK_IOBNX_TEST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_IOBNX_TEST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x87e0f0003010ll + 0x1000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x87e0f0003010ll + 0x1000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x87e0f0003010ll + 0x1000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("IOBNX_TEST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_IOBNX_TEST(a) bdk_iobnx_test_t
+#define bustype_BDK_IOBNX_TEST(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_IOBNX_TEST(a) "IOBNX_TEST"
+#define device_bar_BDK_IOBNX_TEST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_IOBNX_TEST(a) (a)
+#define arguments_BDK_IOBNX_TEST(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_IOBN_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmc.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmc.h
index b7e01a32db..2660d2255c 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmc.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-lmc.h
@@ -41,6 +41,12 @@
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+/* FIXME(dhendrix) added to make compiler happy. However this introduces a
+ * circular dependency and the typdef'd bdk_lmcx_modereg_params2_t makes
+ * forward declaration impossible. */
+//#include <libdram/libdram-config.h>
+#include <bdk-minimal.h>
+#include <libbdk-arch/bdk-model.h>
/**
* @file
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_fus.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_fus.h
index 387a3937dd..22b31a0c18 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_fus.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-mio_fus.h
@@ -41,6 +41,7 @@
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <bdk-minimal.h> /* FIXME: added by dhendrix */
/**
* @file
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccbr.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccbr.h
new file mode 100644
index 0000000000..98a2cfd8fd
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccbr.h
@@ -0,0 +1,1000 @@
+#ifndef __BDK_CSRS_PCCBR_H__
+#define __BDK_CSRS_PCCBR_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium PCCBR.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Register (PCCBR) pccbr_xxx_acs_cap_ctl
+ *
+ * PCC PF ACS Capability and Control Register
+ * This register is the header of the eight-byte PCI access control services
+ * capability structure.
+ *
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccbr_xxx_acs_cap_ctl
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_acs_cap_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t dte : 1; /**< [ 22: 22](R/W) ACS direct translated P2P enable. Value ignored by hardware. */
+ uint32_t ece : 1; /**< [ 21: 21](RO) ACS P2P egress control enable. Always clear. */
+ uint32_t ufe : 1; /**< [ 20: 20](R/W) ACS upstream forwarding enable. Value ignored by hardware. */
+ uint32_t cre : 1; /**< [ 19: 19](R/W) ACS completion redirect enable. Value ignored by hardware. */
+ uint32_t rre : 1; /**< [ 18: 18](R/W) ACS P2P request redirect enable. Value ignored by hardware. */
+ uint32_t tbe : 1; /**< [ 17: 17](R/W) ACS transaction blocking enable. Value ignored by hardware. */
+ uint32_t sve : 1; /**< [ 16: 16](R/W) ACS source validation enable. Value ignored by hardware. */
+ uint32_t ecvs : 8; /**< [ 15: 8](RO) Egress control vector size. Always zero. */
+ uint32_t reserved_7 : 1;
+ uint32_t dt : 1; /**< [ 6: 6](RO) ACS direct translated P2P. Always set. */
+ uint32_t ec : 1; /**< [ 5: 5](RO) ACS P2P egress control. Always clear. */
+ uint32_t uf : 1; /**< [ 4: 4](RO) ACS upstream forwarding. Always set. */
+ uint32_t cr : 1; /**< [ 3: 3](RO) ACS completion redirect. Always set. */
+ uint32_t rr : 1; /**< [ 2: 2](RO) ACS P2P request redirect. Always set. */
+ uint32_t tb : 1; /**< [ 1: 1](RO) ACS transaction blocking. Always set. */
+ uint32_t sv : 1; /**< [ 0: 0](RO) ACS source validation. Always set. */
+#else /* Word 0 - Little Endian */
+ uint32_t sv : 1; /**< [ 0: 0](RO) ACS source validation. Always set. */
+ uint32_t tb : 1; /**< [ 1: 1](RO) ACS transaction blocking. Always set. */
+ uint32_t rr : 1; /**< [ 2: 2](RO) ACS P2P request redirect. Always set. */
+ uint32_t cr : 1; /**< [ 3: 3](RO) ACS completion redirect. Always set. */
+ uint32_t uf : 1; /**< [ 4: 4](RO) ACS upstream forwarding. Always set. */
+ uint32_t ec : 1; /**< [ 5: 5](RO) ACS P2P egress control. Always clear. */
+ uint32_t dt : 1; /**< [ 6: 6](RO) ACS direct translated P2P. Always set. */
+ uint32_t reserved_7 : 1;
+ uint32_t ecvs : 8; /**< [ 15: 8](RO) Egress control vector size. Always zero. */
+ uint32_t sve : 1; /**< [ 16: 16](R/W) ACS source validation enable. Value ignored by hardware. */
+ uint32_t tbe : 1; /**< [ 17: 17](R/W) ACS transaction blocking enable. Value ignored by hardware. */
+ uint32_t rre : 1; /**< [ 18: 18](R/W) ACS P2P request redirect enable. Value ignored by hardware. */
+ uint32_t cre : 1; /**< [ 19: 19](R/W) ACS completion redirect enable. Value ignored by hardware. */
+ uint32_t ufe : 1; /**< [ 20: 20](R/W) ACS upstream forwarding enable. Value ignored by hardware. */
+ uint32_t ece : 1; /**< [ 21: 21](RO) ACS P2P egress control enable. Always clear. */
+ uint32_t dte : 1; /**< [ 22: 22](R/W) ACS direct translated P2P enable. Value ignored by hardware. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_acs_cap_ctl_s cn; */
+};
+typedef union bdk_pccbr_xxx_acs_cap_ctl bdk_pccbr_xxx_acs_cap_ctl_t;
+
+#define BDK_PCCBR_XXX_ACS_CAP_CTL BDK_PCCBR_XXX_ACS_CAP_CTL_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_ACS_CAP_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_ACS_CAP_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x144;
+ __bdk_csr_fatal("PCCBR_XXX_ACS_CAP_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCBR_XXX_ACS_CAP_CTL bdk_pccbr_xxx_acs_cap_ctl_t
+#define bustype_BDK_PCCBR_XXX_ACS_CAP_CTL BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_ACS_CAP_CTL "PCCBR_XXX_ACS_CAP_CTL"
+#define busnum_BDK_PCCBR_XXX_ACS_CAP_CTL 0
+#define arguments_BDK_PCCBR_XXX_ACS_CAP_CTL -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_acs_cap_hdr
+ *
+ * PCC PF ACS Capability Header Register
+ * This register is the header of the eight-byte PCI ACS capability structure.
+ */
+union bdk_pccbr_xxx_acs_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_acs_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. None. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t acsid : 16; /**< [ 15: 0](RO) PCIE extended capability. Indicates ACS capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t acsid : 16; /**< [ 15: 0](RO) PCIE extended capability. Indicates ACS capability. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. None. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_acs_cap_hdr_s cn; */
+};
+typedef union bdk_pccbr_xxx_acs_cap_hdr bdk_pccbr_xxx_acs_cap_hdr_t;
+
+#define BDK_PCCBR_XXX_ACS_CAP_HDR BDK_PCCBR_XXX_ACS_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_ACS_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_ACS_CAP_HDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x140;
+ __bdk_csr_fatal("PCCBR_XXX_ACS_CAP_HDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCBR_XXX_ACS_CAP_HDR bdk_pccbr_xxx_acs_cap_hdr_t
+#define bustype_BDK_PCCBR_XXX_ACS_CAP_HDR BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_ACS_CAP_HDR "PCCBR_XXX_ACS_CAP_HDR"
+#define busnum_BDK_PCCBR_XXX_ACS_CAP_HDR 0
+#define arguments_BDK_PCCBR_XXX_ACS_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_ari_cap_hdr
+ *
+ * PCC Bridge ARI Capability Header Register
+ * This register is the header of the 8-byte PCI ARI capability structure.
+ */
+union bdk_pccbr_xxx_ari_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_ari_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. Points to PCCBR_XXX_VSEC_CAP_HDR. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t ariid : 16; /**< [ 15: 0](RO) PCIE extended capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t ariid : 16; /**< [ 15: 0](RO) PCIE extended capability. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. Points to PCCBR_XXX_VSEC_CAP_HDR. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_ari_cap_hdr_s cn; */
+};
+typedef union bdk_pccbr_xxx_ari_cap_hdr bdk_pccbr_xxx_ari_cap_hdr_t;
+
+#define BDK_PCCBR_XXX_ARI_CAP_HDR BDK_PCCBR_XXX_ARI_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_ARI_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_ARI_CAP_HDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x100;
+ __bdk_csr_fatal("PCCBR_XXX_ARI_CAP_HDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCBR_XXX_ARI_CAP_HDR bdk_pccbr_xxx_ari_cap_hdr_t
+#define bustype_BDK_PCCBR_XXX_ARI_CAP_HDR BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_ARI_CAP_HDR "PCCBR_XXX_ARI_CAP_HDR"
+#define busnum_BDK_PCCBR_XXX_ARI_CAP_HDR 0
+#define arguments_BDK_PCCBR_XXX_ARI_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_bus
+ *
+ * PCC Bridge Bus Register
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccbr_xxx_bus
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_bus_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t slt : 8; /**< [ 31: 24](RO) Secondary latency timer. Not applicable to PCI Express, hardwired to 0x0. */
+ uint32_t subbnum : 8; /**< [ 23: 16](R/W) Subordinate bus number. Resets to PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM].
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ If PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM] != 0, this field is read-write only for software;
+ hardware has a fixed topology below this bridge and will always act as if this field is
+ programmed to the value in PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM].
+
+ If PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM] = 0, which is true only for PCCBR_PCIEP, this field
+ operates as specified by PCIe to direct which configuration transactions are presented to
+ downstream busses. */
+ uint32_t sbnum : 8; /**< [ 15: 8](R/W) Secondary bus number. Resets to PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM].
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ If PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM] != 0, this field is read-write only for software;
+ hardware has a fixed topology below this bridge and will always act as if this field is
+ programmed to the value in PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM].
+
+ If PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM] = 0, which is true only for PCCBR_PCIEP, this field
+ operates as specified by PCIe to direct which configuration transactions are presented to
+ downstream busses. */
+ uint32_t pbnum : 8; /**< [ 7: 0](R/W) Primary bus number.
+ This field is read-write only for software;
+ hardware has a fixed topology where all PCCBR's are always off primary bus number
+ zero, and does not use this register for configuration decoding. */
+#else /* Word 0 - Little Endian */
+ uint32_t pbnum : 8; /**< [ 7: 0](R/W) Primary bus number.
+ This field is read-write only for software;
+ hardware has a fixed topology where all PCCBR's are always off primary bus number
+ zero, and does not use this register for configuration decoding. */
+ uint32_t sbnum : 8; /**< [ 15: 8](R/W) Secondary bus number. Resets to PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM].
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ If PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM] != 0, this field is read-write only for software;
+ hardware has a fixed topology below this bridge and will always act as if this field is
+ programmed to the value in PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM].
+
+ If PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM] = 0, which is true only for PCCBR_PCIEP, this field
+ operates as specified by PCIe to direct which configuration transactions are presented to
+ downstream busses. */
+ uint32_t subbnum : 8; /**< [ 23: 16](R/W) Subordinate bus number. Resets to PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM].
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ If PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM] != 0, this field is read-write only for software;
+ hardware has a fixed topology below this bridge and will always act as if this field is
+ programmed to the value in PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM].
+
+ If PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM] = 0, which is true only for PCCBR_PCIEP, this field
+ operates as specified by PCIe to direct which configuration transactions are presented to
+ downstream busses. */
+ uint32_t slt : 8; /**< [ 31: 24](RO) Secondary latency timer. Not applicable to PCI Express, hardwired to 0x0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_bus_s cn; */
+};
+typedef union bdk_pccbr_xxx_bus bdk_pccbr_xxx_bus_t;
+
+#define BDK_PCCBR_XXX_BUS BDK_PCCBR_XXX_BUS_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_BUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_BUS_FUNC(void)
+{
+ return 0x18;
+}
+
+#define typedef_BDK_PCCBR_XXX_BUS bdk_pccbr_xxx_bus_t
+#define bustype_BDK_PCCBR_XXX_BUS BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_BUS "PCCBR_XXX_BUS"
+#define busnum_BDK_PCCBR_XXX_BUS 0
+#define arguments_BDK_PCCBR_XXX_BUS -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_cap_ptr
+ *
+ * PCC Bridge Capability Pointer Register
+ */
+union bdk_pccbr_xxx_cap_ptr
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_cap_ptr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cp : 8; /**< [ 7: 0](RO) First capability pointer. Points to PCCBR_XXX_E_CAP_HDR. */
+#else /* Word 0 - Little Endian */
+ uint32_t cp : 8; /**< [ 7: 0](RO) First capability pointer. Points to PCCBR_XXX_E_CAP_HDR. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_cap_ptr_s cn; */
+};
+typedef union bdk_pccbr_xxx_cap_ptr bdk_pccbr_xxx_cap_ptr_t;
+
+#define BDK_PCCBR_XXX_CAP_PTR BDK_PCCBR_XXX_CAP_PTR_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_CAP_PTR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_CAP_PTR_FUNC(void)
+{
+ return 0x34;
+}
+
+#define typedef_BDK_PCCBR_XXX_CAP_PTR bdk_pccbr_xxx_cap_ptr_t
+#define bustype_BDK_PCCBR_XXX_CAP_PTR BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_CAP_PTR "PCCBR_XXX_CAP_PTR"
+#define busnum_BDK_PCCBR_XXX_CAP_PTR 0
+#define arguments_BDK_PCCBR_XXX_CAP_PTR -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_clsize
+ *
+ * PCC Bridge Cache Line Size Register
+ */
+union bdk_pccbr_xxx_clsize
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_clsize_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bist : 8; /**< [ 31: 24](RO) BIST. */
+ uint32_t mfd : 1; /**< [ 23: 23](RO) Multi function device. */
+ uint32_t chf : 7; /**< [ 22: 16](RO) Configuration header format. Hardwired to 0x1 for type 1, bridge. */
+ uint32_t lt : 8; /**< [ 15: 8](RO) Master latency timer. Not applicable for PCI Express, hardwired to 0x0. */
+ uint32_t cls : 8; /**< [ 7: 0](RO) Cache line size. Not implemented. */
+#else /* Word 0 - Little Endian */
+ uint32_t cls : 8; /**< [ 7: 0](RO) Cache line size. Not implemented. */
+ uint32_t lt : 8; /**< [ 15: 8](RO) Master latency timer. Not applicable for PCI Express, hardwired to 0x0. */
+ uint32_t chf : 7; /**< [ 22: 16](RO) Configuration header format. Hardwired to 0x1 for type 1, bridge. */
+ uint32_t mfd : 1; /**< [ 23: 23](RO) Multi function device. */
+ uint32_t bist : 8; /**< [ 31: 24](RO) BIST. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_clsize_s cn; */
+};
+typedef union bdk_pccbr_xxx_clsize bdk_pccbr_xxx_clsize_t;
+
+#define BDK_PCCBR_XXX_CLSIZE BDK_PCCBR_XXX_CLSIZE_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_CLSIZE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_CLSIZE_FUNC(void)
+{
+ return 0xc;
+}
+
+#define typedef_BDK_PCCBR_XXX_CLSIZE bdk_pccbr_xxx_clsize_t
+#define bustype_BDK_PCCBR_XXX_CLSIZE BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_CLSIZE "PCCBR_XXX_CLSIZE"
+#define busnum_BDK_PCCBR_XXX_CLSIZE 0
+#define arguments_BDK_PCCBR_XXX_CLSIZE -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_cmd
+ *
+ * PCC Bridge Command/Status Register
+ */
+union bdk_pccbr_xxx_cmd
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_cmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_21_31 : 11;
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. */
+ uint32_t reserved_3_19 : 17;
+ uint32_t me : 1; /**< [ 2: 2](RO) Master enable.
+ Internal:
+ For simplicity always one; we do not disable NCB transactions. */
+ uint32_t msae : 1; /**< [ 1: 1](RO) Memory space access enable.
+ Internal:
+ NCB/RSL always decoded; have hardcoded BARs. */
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t msae : 1; /**< [ 1: 1](RO) Memory space access enable.
+ Internal:
+ NCB/RSL always decoded; have hardcoded BARs. */
+ uint32_t me : 1; /**< [ 2: 2](RO) Master enable.
+ Internal:
+ For simplicity always one; we do not disable NCB transactions. */
+ uint32_t reserved_3_19 : 17;
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. */
+ uint32_t reserved_21_31 : 11;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_cmd_s cn; */
+};
+typedef union bdk_pccbr_xxx_cmd bdk_pccbr_xxx_cmd_t;
+
+#define BDK_PCCBR_XXX_CMD BDK_PCCBR_XXX_CMD_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_CMD_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_CMD_FUNC(void)
+{
+ return 4;
+}
+
+#define typedef_BDK_PCCBR_XXX_CMD bdk_pccbr_xxx_cmd_t
+#define bustype_BDK_PCCBR_XXX_CMD BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_CMD "PCCBR_XXX_CMD"
+#define busnum_BDK_PCCBR_XXX_CMD 0
+#define arguments_BDK_PCCBR_XXX_CMD -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_e_cap2
+ *
+ * PCC Bridge PCI Express Capabilities 2 Register
+ */
+union bdk_pccbr_xxx_e_cap2
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_e_cap2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_7_31 : 25;
+ uint32_t atomfwd : 1; /**< [ 6: 6](RO) Atomic operation forwarding. The bridge does forwarding. */
+ uint32_t arifwd : 1; /**< [ 5: 5](RO) ARI forwarding. The bridge does forwarding. */
+ uint32_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_4 : 5;
+ uint32_t arifwd : 1; /**< [ 5: 5](RO) ARI forwarding. The bridge does forwarding. */
+ uint32_t atomfwd : 1; /**< [ 6: 6](RO) Atomic operation forwarding. The bridge does forwarding. */
+ uint32_t reserved_7_31 : 25;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_e_cap2_s cn9; */
+ /* struct bdk_pccbr_xxx_e_cap2_s cn81xx; */
+ struct bdk_pccbr_xxx_e_cap2_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_6_31 : 26;
+ uint32_t arifwd : 1; /**< [ 5: 5](RO) ARI forwarding. The bridge does forwarding. */
+ uint32_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_4 : 5;
+ uint32_t arifwd : 1; /**< [ 5: 5](RO) ARI forwarding. The bridge does forwarding. */
+ uint32_t reserved_6_31 : 26;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pccbr_xxx_e_cap2_s cn83xx; */
+};
+typedef union bdk_pccbr_xxx_e_cap2 bdk_pccbr_xxx_e_cap2_t;
+
+#define BDK_PCCBR_XXX_E_CAP2 BDK_PCCBR_XXX_E_CAP2_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_E_CAP2_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_E_CAP2_FUNC(void)
+{
+ return 0x94;
+}
+
+#define typedef_BDK_PCCBR_XXX_E_CAP2 bdk_pccbr_xxx_e_cap2_t
+#define bustype_BDK_PCCBR_XXX_E_CAP2 BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_E_CAP2 "PCCBR_XXX_E_CAP2"
+#define busnum_BDK_PCCBR_XXX_E_CAP2 0
+#define arguments_BDK_PCCBR_XXX_E_CAP2 -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_e_cap_hdr
+ *
+ * PCC Bridge PCI Express Capabilities Register
+ * This register is the header of the 64-byte PCIe capability header.
+ */
+union bdk_pccbr_xxx_e_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_e_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t porttype : 4; /**< [ 23: 20](RO) Indicates a root port of a PCIe root complex. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. No additional PCI capabilities. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. No additional PCI capabilities. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t porttype : 4; /**< [ 23: 20](RO) Indicates a root port of a PCIe root complex. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_e_cap_hdr_s cn88xxp1; */
+ struct bdk_pccbr_xxx_e_cap_hdr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t porttype : 4; /**< [ 23: 20](RO) Indicates a root port of a PCIe root complex. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. Points to PCCBR_XXX_EA_CAP_HDR. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCIe capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. Points to PCCBR_XXX_EA_CAP_HDR. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCIe capability version. */
+ uint32_t porttype : 4; /**< [ 23: 20](RO) Indicates a root port of a PCIe root complex. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } cn9;
+ /* struct bdk_pccbr_xxx_e_cap_hdr_cn9 cn81xx; */
+ /* struct bdk_pccbr_xxx_e_cap_hdr_cn9 cn83xx; */
+ /* struct bdk_pccbr_xxx_e_cap_hdr_cn9 cn88xxp2; */
+};
+typedef union bdk_pccbr_xxx_e_cap_hdr bdk_pccbr_xxx_e_cap_hdr_t;
+
+#define BDK_PCCBR_XXX_E_CAP_HDR BDK_PCCBR_XXX_E_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_E_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_E_CAP_HDR_FUNC(void)
+{
+ return 0x70;
+}
+
+#define typedef_BDK_PCCBR_XXX_E_CAP_HDR bdk_pccbr_xxx_e_cap_hdr_t
+#define bustype_BDK_PCCBR_XXX_E_CAP_HDR BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_E_CAP_HDR "PCCBR_XXX_E_CAP_HDR"
+#define busnum_BDK_PCCBR_XXX_E_CAP_HDR 0
+#define arguments_BDK_PCCBR_XXX_E_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_e_dev_cap
+ *
+ * PCC Bridge PCI Express Device Capabilities Register
+ */
+union bdk_pccbr_xxx_e_dev_cap
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_e_dev_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t rber : 1; /**< [ 15: 15](RO) Role-based error reporting. Required to be set by PCIe 3.1. */
+ uint32_t reserved_0_14 : 15;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_14 : 15;
+ uint32_t rber : 1; /**< [ 15: 15](RO) Role-based error reporting. Required to be set by PCIe 3.1. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_e_dev_cap_s cn; */
+};
+typedef union bdk_pccbr_xxx_e_dev_cap bdk_pccbr_xxx_e_dev_cap_t;
+
+#define BDK_PCCBR_XXX_E_DEV_CAP BDK_PCCBR_XXX_E_DEV_CAP_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_E_DEV_CAP_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_E_DEV_CAP_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x74;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x74;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x74;
+ __bdk_csr_fatal("PCCBR_XXX_E_DEV_CAP", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCBR_XXX_E_DEV_CAP bdk_pccbr_xxx_e_dev_cap_t
+#define bustype_BDK_PCCBR_XXX_E_DEV_CAP BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_E_DEV_CAP "PCCBR_XXX_E_DEV_CAP"
+#define busnum_BDK_PCCBR_XXX_E_DEV_CAP 0
+#define arguments_BDK_PCCBR_XXX_E_DEV_CAP -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_ea_br
+ *
+ * PCC Bridge PCI Enhanced Allocation Bridge Register
+ */
+union bdk_pccbr_xxx_ea_br
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_ea_br_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t fixed_subbnum : 8; /**< [ 15: 8](RO) PCI bus segment to which the subordinate interface is connected.
+ Resets to PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM]. */
+ uint32_t fixed_sbnum : 8; /**< [ 7: 0](RO) PCI bus segment to which the secondary interface is connected.
+ Resets to PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM]. */
+#else /* Word 0 - Little Endian */
+ uint32_t fixed_sbnum : 8; /**< [ 7: 0](RO) PCI bus segment to which the secondary interface is connected.
+ Resets to PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM]. */
+ uint32_t fixed_subbnum : 8; /**< [ 15: 8](RO) PCI bus segment to which the subordinate interface is connected.
+ Resets to PCCBR_XXX_VSEC_CTL[STATIC_SUBBNUM]. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_ea_br_s cn; */
+};
+typedef union bdk_pccbr_xxx_ea_br bdk_pccbr_xxx_ea_br_t;
+
+#define BDK_PCCBR_XXX_EA_BR BDK_PCCBR_XXX_EA_BR_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_EA_BR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_EA_BR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0xb4;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0xb4;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0xb4;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0xb4;
+ __bdk_csr_fatal("PCCBR_XXX_EA_BR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCBR_XXX_EA_BR bdk_pccbr_xxx_ea_br_t
+#define bustype_BDK_PCCBR_XXX_EA_BR BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_EA_BR "PCCBR_XXX_EA_BR"
+#define busnum_BDK_PCCBR_XXX_EA_BR 0
+#define arguments_BDK_PCCBR_XXX_EA_BR -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_ea_cap_hdr
+ *
+ * PCC Bridge PCI Enhanced Allocation Capabilities Register
+ * This register is the header of the 8-byte PCI enhanced allocation capability
+ * structure for type 1 bridges.
+ */
+union bdk_pccbr_xxx_ea_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_ea_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t num_entries : 6; /**< [ 21: 16](RO) Number of enhanced entries. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. No additional PCI capabilities. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) Enhanced allocation capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) Enhanced allocation capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Next capability pointer. No additional PCI capabilities. */
+ uint32_t num_entries : 6; /**< [ 21: 16](RO) Number of enhanced entries. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_ea_cap_hdr_s cn; */
+};
+typedef union bdk_pccbr_xxx_ea_cap_hdr bdk_pccbr_xxx_ea_cap_hdr_t;
+
+#define BDK_PCCBR_XXX_EA_CAP_HDR BDK_PCCBR_XXX_EA_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_EA_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_EA_CAP_HDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0xb0;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0xb0;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X))
+ return 0xb0;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0xb0;
+ __bdk_csr_fatal("PCCBR_XXX_EA_CAP_HDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCBR_XXX_EA_CAP_HDR bdk_pccbr_xxx_ea_cap_hdr_t
+#define bustype_BDK_PCCBR_XXX_EA_CAP_HDR BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_EA_CAP_HDR "PCCBR_XXX_EA_CAP_HDR"
+#define busnum_BDK_PCCBR_XXX_EA_CAP_HDR 0
+#define arguments_BDK_PCCBR_XXX_EA_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_id
+ *
+ * PCC Bridge Vendor and Device ID Register
+ * This register is the header of the 64-byte PCI type 1 configuration structure.
+ */
+union bdk_pccbr_xxx_id
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_id_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t devid : 16; /**< [ 31: 16](RO) Device ID. \<15:8\> is PCC_PROD_E::GEN. \<7:0\> is PCC_DEV_IDL_E::PCCBR. */
+ uint32_t vendid : 16; /**< [ 15: 0](RO) Cavium's vendor ID. Enumerated by PCC_VENDOR_E::CAVIUM. */
+#else /* Word 0 - Little Endian */
+ uint32_t vendid : 16; /**< [ 15: 0](RO) Cavium's vendor ID. Enumerated by PCC_VENDOR_E::CAVIUM. */
+ uint32_t devid : 16; /**< [ 31: 16](RO) Device ID. \<15:8\> is PCC_PROD_E::GEN. \<7:0\> is PCC_DEV_IDL_E::PCCBR. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_id_s cn; */
+};
+typedef union bdk_pccbr_xxx_id bdk_pccbr_xxx_id_t;
+
+#define BDK_PCCBR_XXX_ID BDK_PCCBR_XXX_ID_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_ID_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_ID_FUNC(void)
+{
+ return 0;
+}
+
+#define typedef_BDK_PCCBR_XXX_ID bdk_pccbr_xxx_id_t
+#define bustype_BDK_PCCBR_XXX_ID BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_ID "PCCBR_XXX_ID"
+#define busnum_BDK_PCCBR_XXX_ID 0
+#define arguments_BDK_PCCBR_XXX_ID -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_rev
+ *
+ * PCC Bridge Class Code/Revision ID Register
+ */
+union bdk_pccbr_xxx_rev
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_rev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bcc : 8; /**< [ 31: 24](RO) Base class code. */
+ uint32_t sc : 8; /**< [ 23: 16](RO) Subclass code. */
+ uint32_t pi : 8; /**< [ 15: 8](RO) Programming interface. */
+ uint32_t rid : 8; /**< [ 7: 0](RO/H) Revision ID. Read only version of PCCBR_XXX_VSEC_SCTL[RID]. */
+#else /* Word 0 - Little Endian */
+ uint32_t rid : 8; /**< [ 7: 0](RO/H) Revision ID. Read only version of PCCBR_XXX_VSEC_SCTL[RID]. */
+ uint32_t pi : 8; /**< [ 15: 8](RO) Programming interface. */
+ uint32_t sc : 8; /**< [ 23: 16](RO) Subclass code. */
+ uint32_t bcc : 8; /**< [ 31: 24](RO) Base class code. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_rev_s cn8; */
+ struct bdk_pccbr_xxx_rev_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bcc : 8; /**< [ 31: 24](RO) Base class code. */
+ uint32_t sc : 8; /**< [ 23: 16](RO) Subclass code. */
+ uint32_t pi : 8; /**< [ 15: 8](RO/H) Programming interface. Read only version of PCCBR_XXX_VSEC_SCTL[PI]. */
+ uint32_t rid : 8; /**< [ 7: 0](RO/H) Revision ID. Read only version of PCCBR_XXX_VSEC_SCTL[RID]. */
+#else /* Word 0 - Little Endian */
+ uint32_t rid : 8; /**< [ 7: 0](RO/H) Revision ID. Read only version of PCCBR_XXX_VSEC_SCTL[RID]. */
+ uint32_t pi : 8; /**< [ 15: 8](RO/H) Programming interface. Read only version of PCCBR_XXX_VSEC_SCTL[PI]. */
+ uint32_t sc : 8; /**< [ 23: 16](RO) Subclass code. */
+ uint32_t bcc : 8; /**< [ 31: 24](RO) Base class code. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccbr_xxx_rev bdk_pccbr_xxx_rev_t;
+
+#define BDK_PCCBR_XXX_REV BDK_PCCBR_XXX_REV_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_REV_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_REV_FUNC(void)
+{
+ return 8;
+}
+
+#define typedef_BDK_PCCBR_XXX_REV bdk_pccbr_xxx_rev_t
+#define bustype_BDK_PCCBR_XXX_REV BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_REV "PCCBR_XXX_REV"
+#define busnum_BDK_PCCBR_XXX_REV 0
+#define arguments_BDK_PCCBR_XXX_REV -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_vsec_cap_hdr
+ *
+ * PCC Bridge Vendor-Specific Capability Header Register
+ * This register is the header of the 16-byte {ProductLine} family bridge capability
+ * structure.
+ */
+union bdk_pccbr_xxx_vsec_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_vsec_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. None. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t rbareid : 16; /**< [ 15: 0](RO) PCIE extended capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t rbareid : 16; /**< [ 15: 0](RO) PCIE extended capability. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. None. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_vsec_cap_hdr_s cn8; */
+ struct bdk_pccbr_xxx_vsec_cap_hdr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. Points to PCCBR_XXX_ACS_CAP_HDR. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t rbareid : 16; /**< [ 15: 0](RO) PCIE extended capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t rbareid : 16; /**< [ 15: 0](RO) PCIE extended capability. */
+ uint32_t cv : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t nco : 12; /**< [ 31: 20](RO) Next capability offset. Points to PCCBR_XXX_ACS_CAP_HDR. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccbr_xxx_vsec_cap_hdr bdk_pccbr_xxx_vsec_cap_hdr_t;
+
+#define BDK_PCCBR_XXX_VSEC_CAP_HDR BDK_PCCBR_XXX_VSEC_CAP_HDR_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_VSEC_CAP_HDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_VSEC_CAP_HDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x100;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x100;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x108;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x100;
+ __bdk_csr_fatal("PCCBR_XXX_VSEC_CAP_HDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCBR_XXX_VSEC_CAP_HDR bdk_pccbr_xxx_vsec_cap_hdr_t
+#define bustype_BDK_PCCBR_XXX_VSEC_CAP_HDR BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_VSEC_CAP_HDR "PCCBR_XXX_VSEC_CAP_HDR"
+#define busnum_BDK_PCCBR_XXX_VSEC_CAP_HDR 0
+#define arguments_BDK_PCCBR_XXX_VSEC_CAP_HDR -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_vsec_ctl
+ *
+ * PCC Bridge Vendor-Specific Control Register
+ */
+union bdk_pccbr_xxx_vsec_ctl
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_vsec_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t static_subbnum : 8; /**< [ 15: 8](RO) Static bus number. If nonzero, downstream block expects
+ PCCBR_XXX_BUS[SUBBNUM] and PCCBR_XXX_BUS[SBNUM] to match this value. */
+ uint32_t inst_num : 8; /**< [ 7: 0](RO) Instance number. For blocks with multiple instances, indicates which instance number,
+ otherwise 0x0; may be used to form Linux device numbers. For example for UART(1) is 0x1. */
+#else /* Word 0 - Little Endian */
+ uint32_t inst_num : 8; /**< [ 7: 0](RO) Instance number. For blocks with multiple instances, indicates which instance number,
+ otherwise 0x0; may be used to form Linux device numbers. For example for UART(1) is 0x1. */
+ uint32_t static_subbnum : 8; /**< [ 15: 8](RO) Static bus number. If nonzero, downstream block expects
+ PCCBR_XXX_BUS[SUBBNUM] and PCCBR_XXX_BUS[SBNUM] to match this value. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_vsec_ctl_s cn; */
+};
+typedef union bdk_pccbr_xxx_vsec_ctl bdk_pccbr_xxx_vsec_ctl_t;
+
+#define BDK_PCCBR_XXX_VSEC_CTL BDK_PCCBR_XXX_VSEC_CTL_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_VSEC_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_VSEC_CTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x108;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x108;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x110;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x108;
+ __bdk_csr_fatal("PCCBR_XXX_VSEC_CTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCBR_XXX_VSEC_CTL bdk_pccbr_xxx_vsec_ctl_t
+#define bustype_BDK_PCCBR_XXX_VSEC_CTL BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_VSEC_CTL "PCCBR_XXX_VSEC_CTL"
+#define busnum_BDK_PCCBR_XXX_VSEC_CTL 0
+#define arguments_BDK_PCCBR_XXX_VSEC_CTL -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_vsec_id
+ *
+ * PCC Bridge Vendor-Specific Identification Register
+ */
+union bdk_pccbr_xxx_vsec_id
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_vsec_id_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t len : 12; /**< [ 31: 20](RO) Number of bytes in the entire VSEC structure including PCCBR_XXX_VSEC_CAP_HDR.
+ Internal:
+ Matches PCCPF_XXX_VSEC_ID[LEN], so extra bytes allocated and unused at the end
+ of the structure. */
+ uint32_t rev : 4; /**< [ 19: 16](RO) Vendor-specific revision. */
+ uint32_t id : 16; /**< [ 15: 0](RO) Vendor-specific ID. Indicates the {ProductLine} family bridge VSEC ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t id : 16; /**< [ 15: 0](RO) Vendor-specific ID. Indicates the {ProductLine} family bridge VSEC ID. */
+ uint32_t rev : 4; /**< [ 19: 16](RO) Vendor-specific revision. */
+ uint32_t len : 12; /**< [ 31: 20](RO) Number of bytes in the entire VSEC structure including PCCBR_XXX_VSEC_CAP_HDR.
+ Internal:
+ Matches PCCPF_XXX_VSEC_ID[LEN], so extra bytes allocated and unused at the end
+ of the structure. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pccbr_xxx_vsec_id_s cn8; */
+ struct bdk_pccbr_xxx_vsec_id_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t len : 12; /**< [ 31: 20](RO) Number of bytes in the entire VSEC structure including PCCBR_XXX_VSEC_CAP_HDR.
+ Internal:
+ Matches PCCPF_XXX_VSEC_ID[LEN], so extra bytes allocated and unused at the end
+ of the structure. */
+ uint32_t rev : 4; /**< [ 19: 16](RO) Vendor-specific revision. */
+ uint32_t id : 16; /**< [ 15: 0](RO) Vendor-specific ID. Indicates the {ProductLine} family bridge VSEC ID.
+ Enumerated by PCC_VSECID_E. */
+#else /* Word 0 - Little Endian */
+ uint32_t id : 16; /**< [ 15: 0](RO) Vendor-specific ID. Indicates the {ProductLine} family bridge VSEC ID.
+ Enumerated by PCC_VSECID_E. */
+ uint32_t rev : 4; /**< [ 19: 16](RO) Vendor-specific revision. */
+ uint32_t len : 12; /**< [ 31: 20](RO) Number of bytes in the entire VSEC structure including PCCBR_XXX_VSEC_CAP_HDR.
+ Internal:
+ Matches PCCPF_XXX_VSEC_ID[LEN], so extra bytes allocated and unused at the end
+ of the structure. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_pccbr_xxx_vsec_id bdk_pccbr_xxx_vsec_id_t;
+
+#define BDK_PCCBR_XXX_VSEC_ID BDK_PCCBR_XXX_VSEC_ID_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_VSEC_ID_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_VSEC_ID_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x104;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x104;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x10c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x104;
+ __bdk_csr_fatal("PCCBR_XXX_VSEC_ID", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCBR_XXX_VSEC_ID bdk_pccbr_xxx_vsec_id_t
+#define bustype_BDK_PCCBR_XXX_VSEC_ID BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_VSEC_ID "PCCBR_XXX_VSEC_ID"
+#define busnum_BDK_PCCBR_XXX_VSEC_ID 0
+#define arguments_BDK_PCCBR_XXX_VSEC_ID -1,-1,-1,-1
+
+/**
+ * Register (PCCBR) pccbr_xxx_vsec_sctl
+ *
+ * PCC Bridge Vendor-Specific Secure Control Register
+ * This register is reset on a chip domain reset.
+ */
+union bdk_pccbr_xxx_vsec_sctl
+{
+ uint32_t u;
+ struct bdk_pccbr_xxx_vsec_sctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pi : 8; /**< [ 31: 24](SR/W) Programming interface. R/W version of the value to be presented in
+ PCCBR_XXX_REV[PI]. Reset value 0x1 indicates transparent bridge
+ (subtractive decode). */
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCBR_XXX_REV[RID]. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCBR_XXX_REV[RID]. */
+ uint32_t pi : 8; /**< [ 31: 24](SR/W) Programming interface. R/W version of the value to be presented in
+ PCCBR_XXX_REV[PI]. Reset value 0x1 indicates transparent bridge
+ (subtractive decode). */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pccbr_xxx_vsec_sctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCBR_XXX_REV[RID]. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t rid : 8; /**< [ 23: 16](SR/W) Revision ID. R/W version of the value to be presented in PCCBR_XXX_REV[RID]. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_pccbr_xxx_vsec_sctl_s cn9; */
+};
+typedef union bdk_pccbr_xxx_vsec_sctl bdk_pccbr_xxx_vsec_sctl_t;
+
+#define BDK_PCCBR_XXX_VSEC_SCTL BDK_PCCBR_XXX_VSEC_SCTL_FUNC()
+static inline uint64_t BDK_PCCBR_XXX_VSEC_SCTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCCBR_XXX_VSEC_SCTL_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x10c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x10c;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX))
+ return 0x114;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x10c;
+ __bdk_csr_fatal("PCCBR_XXX_VSEC_SCTL", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_PCCBR_XXX_VSEC_SCTL bdk_pccbr_xxx_vsec_sctl_t
+#define bustype_BDK_PCCBR_XXX_VSEC_SCTL BDK_CSR_TYPE_PCCBR
+#define basename_BDK_PCCBR_XXX_VSEC_SCTL "PCCBR_XXX_VSEC_SCTL"
+#define busnum_BDK_PCCBR_XXX_VSEC_SCTL 0
+#define arguments_BDK_PCCBR_XXX_VSEC_SCTL -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_PCCBR_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccpf.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccpf.h
index 0b3e20ca4b..e915de8d01 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccpf.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pccpf.h
@@ -41,6 +41,7 @@
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <bdk.h> /* FIXME(dhendrix): added to satisfy compiler... */
/**
* @file
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pcierc.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pcierc.h
new file mode 100644
index 0000000000..07ef88b4fe
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-pcierc.h
@@ -0,0 +1,29162 @@
+#ifndef __BDK_CSRS_PCIERC_H__
+#define __BDK_CSRS_PCIERC_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium PCIERC.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ack_freq
+ *
+ * PCIe RC Ack Frequency Register
+ */
+union bdk_pciercx_ack_freq
+{
+ uint32_t u;
+ struct bdk_pciercx_ack_freq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t easpml1 : 1; /**< [ 30: 30](R/W/H) Enter ASPM L1 without receive in L0s. Allow core to enter ASPM L1 even when link partner
+ did not go to L0s (receive is not in L0s). When not set, core goes to ASPM L1 only after
+ idle period, during which both receive and transmit are in L0s. */
+ uint32_t l1el : 3; /**< [ 29: 27](R/W) L1 entrance latency. Values correspond to:
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 4 ms.
+ 0x3 = 8 ms.
+ 0x4 = 16 ms.
+ 0x5 = 32 ms.
+ 0x6 or 0x7 = 64 ms. */
+ uint32_t l0el : 3; /**< [ 26: 24](R/W) L0s entrance latency. Values correspond to:
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 3 ms.
+ 0x3 = 4 ms.
+ 0x4 = 5 ms.
+ 0x5 = 6 ms.
+ 0x6 or 0x7 = 7 ms. */
+ uint32_t n_fts_cc : 8; /**< [ 23: 16](RO) The number of fast training sequence (FTS) ordered sets to be transmitted when
+ transitioning from L0s to L0. The maximum number of FTS ordered sets that a component can
+ request is 255.
+ A value of zero is not supported; a value of zero can cause the LTSSM to go into the
+ recovery state when exiting from L0s. */
+ uint32_t n_fts : 8; /**< [ 15: 8](R/W) The number of fast training sequence (FTS) ordered sets to be transmitted when
+ transitioning from L0s to L0. The maximum number of FTS ordered sets that a component can
+ request is 255.
+ A value of zero is not supported; a value of zero can cause the LTSSM to go into the
+ recovery state when exiting from L0s. */
+ uint32_t ack_freq : 8; /**< [ 7: 0](R/W) ACK frequency. The number of pending ACKs specified here (up to 255) before sending an ACK. */
+#else /* Word 0 - Little Endian */
+ uint32_t ack_freq : 8; /**< [ 7: 0](R/W) ACK frequency. The number of pending ACKs specified here (up to 255) before sending an ACK. */
+ uint32_t n_fts : 8; /**< [ 15: 8](R/W) The number of fast training sequence (FTS) ordered sets to be transmitted when
+ transitioning from L0s to L0. The maximum number of FTS ordered sets that a component can
+ request is 255.
+ A value of zero is not supported; a value of zero can cause the LTSSM to go into the
+ recovery state when exiting from L0s. */
+ uint32_t n_fts_cc : 8; /**< [ 23: 16](RO) The number of fast training sequence (FTS) ordered sets to be transmitted when
+ transitioning from L0s to L0. The maximum number of FTS ordered sets that a component can
+ request is 255.
+ A value of zero is not supported; a value of zero can cause the LTSSM to go into the
+ recovery state when exiting from L0s. */
+ uint32_t l0el : 3; /**< [ 26: 24](R/W) L0s entrance latency. Values correspond to:
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 3 ms.
+ 0x3 = 4 ms.
+ 0x4 = 5 ms.
+ 0x5 = 6 ms.
+ 0x6 or 0x7 = 7 ms. */
+ uint32_t l1el : 3; /**< [ 29: 27](R/W) L1 entrance latency. Values correspond to:
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 4 ms.
+ 0x3 = 8 ms.
+ 0x4 = 16 ms.
+ 0x5 = 32 ms.
+ 0x6 or 0x7 = 64 ms. */
+ uint32_t easpml1 : 1; /**< [ 30: 30](R/W/H) Enter ASPM L1 without receive in L0s. Allow core to enter ASPM L1 even when link partner
+ did not go to L0s (receive is not in L0s). When not set, core goes to ASPM L1 only after
+ idle period, during which both receive and transmit are in L0s. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ack_freq_s cn; */
+};
+typedef union bdk_pciercx_ack_freq bdk_pciercx_ack_freq_t;
+
+static inline uint64_t BDK_PCIERCX_ACK_FREQ(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ACK_FREQ(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x70cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ACK_FREQ", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ACK_FREQ(a) bdk_pciercx_ack_freq_t
+#define bustype_BDK_PCIERCX_ACK_FREQ(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ACK_FREQ(a) "PCIERCX_ACK_FREQ"
+#define busnum_BDK_PCIERCX_ACK_FREQ(a) (a)
+#define arguments_BDK_PCIERCX_ACK_FREQ(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ack_timer
+ *
+ * PCIe RC Ack Latency Timer/Replay Timer Register
+ */
+union bdk_pciercx_ack_timer
+{
+ uint32_t u;
+ struct bdk_pciercx_ack_timer_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rtl : 16; /**< [ 31: 16](R/W/H) Replay time limit. The replay timer expires when it reaches this limit. The PCI Express
+ bus initiates a replay upon reception of a NAK or when the replay timer expires. This
+ value is set correctly by the hardware out of reset or when the negotiated link width or
+ payload size changes. If the user changes this value
+ they should refer to the PCIe specification for the correct value. */
+ uint32_t rtltl : 16; /**< [ 15: 0](R/W/H) Round trip latency time limit. The ACK/NAK latency timer expires when it reaches this
+ limit. This value is set correctly by the hardware out of reset or when the negotiated
+ link width or payload size changes. If the user changes this value
+ they should refer to the PCIe specification for the correct value. */
+#else /* Word 0 - Little Endian */
+ uint32_t rtltl : 16; /**< [ 15: 0](R/W/H) Round trip latency time limit. The ACK/NAK latency timer expires when it reaches this
+ limit. This value is set correctly by the hardware out of reset or when the negotiated
+ link width or payload size changes. If the user changes this value
+ they should refer to the PCIe specification for the correct value. */
+ uint32_t rtl : 16; /**< [ 31: 16](R/W/H) Replay time limit. The replay timer expires when it reaches this limit. The PCI Express
+ bus initiates a replay upon reception of a NAK or when the replay timer expires. This
+ value is set correctly by the hardware out of reset or when the negotiated link width or
+ payload size changes. If the user changes this value
+ they should refer to the PCIe specification for the correct value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ack_timer_s cn; */
+};
+typedef union bdk_pciercx_ack_timer bdk_pciercx_ack_timer_t;
+
+static inline uint64_t BDK_PCIERCX_ACK_TIMER(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ACK_TIMER(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x700ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ACK_TIMER", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ACK_TIMER(a) bdk_pciercx_ack_timer_t
+#define bustype_BDK_PCIERCX_ACK_TIMER(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ACK_TIMER(a) "PCIERCX_ACK_TIMER"
+#define busnum_BDK_PCIERCX_ACK_TIMER(a) (a)
+#define arguments_BDK_PCIERCX_ACK_TIMER(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_acs_cap_ctl
+ *
+ * PCIe RC ACS Capability and Control Register
+ */
+union bdk_pciercx_acs_cap_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_acs_cap_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t dte : 1; /**< [ 22: 22](R/W) ACS direct translated P2P enable. */
+ uint32_t ece : 1; /**< [ 21: 21](R/W) ACS P2P egress control enable. */
+ uint32_t ufe : 1; /**< [ 20: 20](R/W) ACS upstream forwarding enable. */
+ uint32_t cre : 1; /**< [ 19: 19](R/W) ACS P2P completion redirect enable. */
+ uint32_t rre : 1; /**< [ 18: 18](R/W) ACS P2P request redirect enable. */
+ uint32_t tbe : 1; /**< [ 17: 17](R/W) ACS translation blocking enable. */
+ uint32_t sve : 1; /**< [ 16: 16](R/W) ACS source validation enable. */
+ uint32_t ecvs : 8; /**< [ 15: 8](RO/WRSL) Egress control vector size.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t dt : 1; /**< [ 6: 6](RO/WRSL) ACS direct translated P2P.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t ec : 1; /**< [ 5: 5](RO/WRSL) ACS P2P egress control.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t uf : 1; /**< [ 4: 4](RO/WRSL) ACS upstream forwarding.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cr : 1; /**< [ 3: 3](RO/WRSL) ACS P2P completion redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t rr : 1; /**< [ 2: 2](RO/WRSL) ACS P2P request redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t tb : 1; /**< [ 1: 1](RO/WRSL) ACS translation blocking.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t sv : 1; /**< [ 0: 0](RO/WRSL) ACS source validation.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sv : 1; /**< [ 0: 0](RO/WRSL) ACS source validation.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t tb : 1; /**< [ 1: 1](RO/WRSL) ACS translation blocking.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t rr : 1; /**< [ 2: 2](RO/WRSL) ACS P2P request redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cr : 1; /**< [ 3: 3](RO/WRSL) ACS P2P completion redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t uf : 1; /**< [ 4: 4](RO/WRSL) ACS upstream forwarding.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t ec : 1; /**< [ 5: 5](RO/WRSL) ACS P2P egress control.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t dt : 1; /**< [ 6: 6](RO/WRSL) ACS direct translated P2P.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t ecvs : 8; /**< [ 15: 8](RO/WRSL) Egress control vector size.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t sve : 1; /**< [ 16: 16](R/W) ACS source validation enable. */
+ uint32_t tbe : 1; /**< [ 17: 17](R/W) ACS translation blocking enable. */
+ uint32_t rre : 1; /**< [ 18: 18](R/W) ACS P2P request redirect enable. */
+ uint32_t cre : 1; /**< [ 19: 19](R/W) ACS P2P completion redirect enable. */
+ uint32_t ufe : 1; /**< [ 20: 20](R/W) ACS upstream forwarding enable. */
+ uint32_t ece : 1; /**< [ 21: 21](R/W) ACS P2P egress control enable. */
+ uint32_t dte : 1; /**< [ 22: 22](R/W) ACS direct translated P2P enable. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_acs_cap_ctl_s cn; */
+};
+typedef union bdk_pciercx_acs_cap_ctl bdk_pciercx_acs_cap_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_ACS_CAP_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ACS_CAP_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x2f0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ACS_CAP_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ACS_CAP_CTL(a) bdk_pciercx_acs_cap_ctl_t
+#define bustype_BDK_PCIERCX_ACS_CAP_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ACS_CAP_CTL(a) "PCIERCX_ACS_CAP_CTL"
+#define busnum_BDK_PCIERCX_ACS_CAP_CTL(a) (a)
+#define arguments_BDK_PCIERCX_ACS_CAP_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_acs_cap_hdr
+ *
+ * PCIe RC PCI Express ACS Extended Capability Header Register
+ */
+union bdk_pciercx_acs_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_acs_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_acs_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_acs_cap_hdr bdk_pciercx_acs_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_ACS_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ACS_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x2ecll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ACS_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ACS_CAP_HDR(a) bdk_pciercx_acs_cap_hdr_t
+#define bustype_BDK_PCIERCX_ACS_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ACS_CAP_HDR(a) "PCIERCX_ACS_CAP_HDR"
+#define busnum_BDK_PCIERCX_ACS_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_ACS_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_acs_egr_ctl_vec
+ *
+ * PCIe RC Egress Control Vector Register
+ */
+union bdk_pciercx_acs_egr_ctl_vec
+{
+ uint32_t u;
+ struct bdk_pciercx_acs_egr_ctl_vec_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t unused : 29; /**< [ 31: 3](R/W/H) Reserved. */
+ uint32_t ecv : 3; /**< [ 2: 0](R/W/H) Egress control vector. */
+#else /* Word 0 - Little Endian */
+ uint32_t ecv : 3; /**< [ 2: 0](R/W/H) Egress control vector. */
+ uint32_t unused : 29; /**< [ 31: 3](R/W/H) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_acs_egr_ctl_vec_s cn; */
+};
+typedef union bdk_pciercx_acs_egr_ctl_vec bdk_pciercx_acs_egr_ctl_vec_t;
+
+static inline uint64_t BDK_PCIERCX_ACS_EGR_CTL_VEC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ACS_EGR_CTL_VEC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x2f4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ACS_EGR_CTL_VEC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ACS_EGR_CTL_VEC(a) bdk_pciercx_acs_egr_ctl_vec_t
+#define bustype_BDK_PCIERCX_ACS_EGR_CTL_VEC(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ACS_EGR_CTL_VEC(a) "PCIERCX_ACS_EGR_CTL_VEC"
+#define busnum_BDK_PCIERCX_ACS_EGR_CTL_VEC(a) (a)
+#define arguments_BDK_PCIERCX_ACS_EGR_CTL_VEC(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_adv_err_cap_cntrl
+ *
+ * PCIe RC Advanced Capabilities and Control Register
+ */
+union bdk_pciercx_adv_err_cap_cntrl
+{
+ uint32_t u;
+ struct bdk_pciercx_adv_err_cap_cntrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t tlp_plp : 1; /**< [ 11: 11](RO) TLP prefix log present. */
+ uint32_t mult_hdr_en : 1; /**< [ 10: 10](RO) Multiple header recording enable (not supported). */
+ uint32_t mult_hdr_cap : 1; /**< [ 9: 9](RO) Multiple header recording capability (not supported). */
+ uint32_t ce : 1; /**< [ 8: 8](R/W) ECRC check enable. */
+ uint32_t cc : 1; /**< [ 7: 7](RO) ECRC check capable. */
+ uint32_t ge : 1; /**< [ 6: 6](R/W) ECRC generation enable. */
+ uint32_t gc : 1; /**< [ 5: 5](RO) ECRC generation capability. */
+ uint32_t fep : 5; /**< [ 4: 0](RO) First error pointer. */
+#else /* Word 0 - Little Endian */
+ uint32_t fep : 5; /**< [ 4: 0](RO) First error pointer. */
+ uint32_t gc : 1; /**< [ 5: 5](RO) ECRC generation capability. */
+ uint32_t ge : 1; /**< [ 6: 6](R/W) ECRC generation enable. */
+ uint32_t cc : 1; /**< [ 7: 7](RO) ECRC check capable. */
+ uint32_t ce : 1; /**< [ 8: 8](R/W) ECRC check enable. */
+ uint32_t mult_hdr_cap : 1; /**< [ 9: 9](RO) Multiple header recording capability (not supported). */
+ uint32_t mult_hdr_en : 1; /**< [ 10: 10](RO) Multiple header recording enable (not supported). */
+ uint32_t tlp_plp : 1; /**< [ 11: 11](RO) TLP prefix log present. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_adv_err_cap_cntrl_s cn; */
+};
+typedef union bdk_pciercx_adv_err_cap_cntrl bdk_pciercx_adv_err_cap_cntrl_t;
+
+static inline uint64_t BDK_PCIERCX_ADV_ERR_CAP_CNTRL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ADV_ERR_CAP_CNTRL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x118ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ADV_ERR_CAP_CNTRL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ADV_ERR_CAP_CNTRL(a) bdk_pciercx_adv_err_cap_cntrl_t
+#define bustype_BDK_PCIERCX_ADV_ERR_CAP_CNTRL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ADV_ERR_CAP_CNTRL(a) "PCIERCX_ADV_ERR_CAP_CNTRL"
+#define busnum_BDK_PCIERCX_ADV_ERR_CAP_CNTRL(a) (a)
+#define arguments_BDK_PCIERCX_ADV_ERR_CAP_CNTRL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_aux_clk_freq
+ *
+ * PCIe RC Auxillary Clock Frequency Control Register
+ */
+union bdk_pciercx_aux_clk_freq
+{
+ uint32_t u;
+ struct bdk_pciercx_aux_clk_freq_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t upc_supp : 10; /**< [ 9: 0](R/W) The aux_clk frequency in MHz. This value is used to provide a 1 us reference for
+ counting time during low-power states with aux_clk when the PHY has removed the
+ pipe_clk. */
+#else /* Word 0 - Little Endian */
+ uint32_t upc_supp : 10; /**< [ 9: 0](R/W) The aux_clk frequency in MHz. This value is used to provide a 1 us reference for
+ counting time during low-power states with aux_clk when the PHY has removed the
+ pipe_clk. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_aux_clk_freq_s cn; */
+};
+typedef union bdk_pciercx_aux_clk_freq bdk_pciercx_aux_clk_freq_t;
+
+static inline uint64_t BDK_PCIERCX_AUX_CLK_FREQ(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_AUX_CLK_FREQ(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xb40ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_AUX_CLK_FREQ", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_AUX_CLK_FREQ(a) bdk_pciercx_aux_clk_freq_t
+#define bustype_BDK_PCIERCX_AUX_CLK_FREQ(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_AUX_CLK_FREQ(a) "PCIERCX_AUX_CLK_FREQ"
+#define busnum_BDK_PCIERCX_AUX_CLK_FREQ(a) (a)
+#define arguments_BDK_PCIERCX_AUX_CLK_FREQ(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_bar0l
+ *
+ * PCIe RC Base Address 0 Low Register
+ */
+union bdk_pciercx_bar0l
+{
+ uint32_t u;
+ struct bdk_pciercx_bar0l_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_bar0l_s cn; */
+};
+typedef union bdk_pciercx_bar0l bdk_pciercx_bar0l_t;
+
+static inline uint64_t BDK_PCIERCX_BAR0L(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_BAR0L(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x10ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_BAR0L", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_BAR0L(a) bdk_pciercx_bar0l_t
+#define bustype_BDK_PCIERCX_BAR0L(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_BAR0L(a) "PCIERCX_BAR0L"
+#define busnum_BDK_PCIERCX_BAR0L(a) (a)
+#define arguments_BDK_PCIERCX_BAR0L(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_bar0u
+ *
+ * PCIe RC Base Address 0 High Register
+ */
+union bdk_pciercx_bar0u
+{
+ uint32_t u;
+ struct bdk_pciercx_bar0u_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_bar0u_s cn; */
+};
+typedef union bdk_pciercx_bar0u bdk_pciercx_bar0u_t;
+
+static inline uint64_t BDK_PCIERCX_BAR0U(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_BAR0U(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x14ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_BAR0U", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_BAR0U(a) bdk_pciercx_bar0u_t
+#define bustype_BDK_PCIERCX_BAR0U(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_BAR0U(a) "PCIERCX_BAR0U"
+#define busnum_BDK_PCIERCX_BAR0U(a) (a)
+#define arguments_BDK_PCIERCX_BAR0U(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_bnum
+ *
+ * PCIe RC Bus Number Register
+ */
+union bdk_pciercx_bnum
+{
+ uint32_t u;
+ struct bdk_pciercx_bnum_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t slt : 8; /**< [ 31: 24](RO) Secondary latency timer. Not applicable to PCI Express, hardwired to 0x0. */
+ uint32_t subbnum : 8; /**< [ 23: 16](R/W) Subordinate bus number.
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ Internal:
+ Note IOB/ECAM snoops on writes to this register. */
+ uint32_t sbnum : 8; /**< [ 15: 8](R/W) Secondary bus number.
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ Internal:
+ Note IOB/ECAM snoops on writes to this register. */
+ uint32_t pbnum : 8; /**< [ 7: 0](R/W) Primary bus number. */
+#else /* Word 0 - Little Endian */
+ uint32_t pbnum : 8; /**< [ 7: 0](R/W) Primary bus number. */
+ uint32_t sbnum : 8; /**< [ 15: 8](R/W) Secondary bus number.
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ Internal:
+ Note IOB/ECAM snoops on writes to this register. */
+ uint32_t subbnum : 8; /**< [ 23: 16](R/W) Subordinate bus number.
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ Internal:
+ Note IOB/ECAM snoops on writes to this register. */
+ uint32_t slt : 8; /**< [ 31: 24](RO) Secondary latency timer. Not applicable to PCI Express, hardwired to 0x0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_bnum_s cn; */
+};
+typedef union bdk_pciercx_bnum bdk_pciercx_bnum_t;
+
+static inline uint64_t BDK_PCIERCX_BNUM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_BNUM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x18ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_BNUM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_BNUM(a) bdk_pciercx_bnum_t
+#define bustype_BDK_PCIERCX_BNUM(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_BNUM(a) "PCIERCX_BNUM"
+#define busnum_BDK_PCIERCX_BNUM(a) (a)
+#define arguments_BDK_PCIERCX_BNUM(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_c_rcv_credit
+ *
+ * PCIe RC VC0 Completion Receive Queue Control Register
+ */
+union bdk_pciercx_c_rcv_credit
+{
+ uint32_t u;
+ struct bdk_pciercx_c_rcv_credit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t data_sc : 2; /**< [ 27: 26](RO/WRSL) VC0 scale completion data credits.
+
+ Reset values:
+ _ UPEM: 0x2.
+ _ BPEM: 0x1. */
+ uint32_t hdr_sc : 2; /**< [ 25: 24](RO/WRSL) VC0 scale completion header credits.
+
+ Reset values:
+ _ UPEM: 0x3.
+ _ BPEM: 0x2. */
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 completion TLP queue mode. The operating mode of the completion receive queue for VC0,
+ used only in the segmented-buffer configuration, writable through
+ PEM()_CFG_WR.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward.
+
+ The application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL/H) VC0 completion header credits. The number of initial completion header credits for VC0,
+ used for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field.
+
+ Reset values:
+ _ UPEM: 0x28.
+ _ BPEM: 0x50. */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL/H) VC0 completion data credits. The number of initial completion data credits for VC0, used
+ for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field.
+
+ Reset values:
+ _ UPEM: 0x300.
+ _ BPEM: 0x600. */
+#else /* Word 0 - Little Endian */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL/H) VC0 completion data credits. The number of initial completion data credits for VC0, used
+ for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field.
+
+ Reset values:
+ _ UPEM: 0x300.
+ _ BPEM: 0x600. */
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL/H) VC0 completion header credits. The number of initial completion header credits for VC0,
+ used for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field.
+
+ Reset values:
+ _ UPEM: 0x28.
+ _ BPEM: 0x50. */
+ uint32_t reserved_20 : 1;
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 completion TLP queue mode. The operating mode of the completion receive queue for VC0,
+ used only in the segmented-buffer configuration, writable through
+ PEM()_CFG_WR.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward.
+
+ The application must not change this field. */
+ uint32_t hdr_sc : 2; /**< [ 25: 24](RO/WRSL) VC0 scale completion header credits.
+
+ Reset values:
+ _ UPEM: 0x3.
+ _ BPEM: 0x2. */
+ uint32_t data_sc : 2; /**< [ 27: 26](RO/WRSL) VC0 scale completion data credits.
+
+ Reset values:
+ _ UPEM: 0x2.
+ _ BPEM: 0x1. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_c_rcv_credit_s cn; */
+};
+typedef union bdk_pciercx_c_rcv_credit bdk_pciercx_c_rcv_credit_t;
+
+static inline uint64_t BDK_PCIERCX_C_RCV_CREDIT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_C_RCV_CREDIT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x750ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_C_RCV_CREDIT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_C_RCV_CREDIT(a) bdk_pciercx_c_rcv_credit_t
+#define bustype_BDK_PCIERCX_C_RCV_CREDIT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_C_RCV_CREDIT(a) "PCIERCX_C_RCV_CREDIT"
+#define busnum_BDK_PCIERCX_C_RCV_CREDIT(a) (a)
+#define arguments_BDK_PCIERCX_C_RCV_CREDIT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_c_xmit_credit
+ *
+ * PCIe RC Transmit Completion FC Credit Status Register
+ */
+union bdk_pciercx_c_xmit_credit
+{
+ uint32_t u;
+ struct bdk_pciercx_c_xmit_credit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_20_31 : 12;
+ uint32_t tchfcc : 8; /**< [ 19: 12](RO/H) Transmit completion header FC credits. The completion header credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tcdfcc : 12; /**< [ 11: 0](RO/H) Transmit completion data FC credits. The completion data credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t tcdfcc : 12; /**< [ 11: 0](RO/H) Transmit completion data FC credits. The completion data credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tchfcc : 8; /**< [ 19: 12](RO/H) Transmit completion header FC credits. The completion header credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t reserved_20_31 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_c_xmit_credit_s cn; */
+};
+typedef union bdk_pciercx_c_xmit_credit bdk_pciercx_c_xmit_credit_t;
+
+static inline uint64_t BDK_PCIERCX_C_XMIT_CREDIT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_C_XMIT_CREDIT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x738ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_C_XMIT_CREDIT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_C_XMIT_CREDIT(a) bdk_pciercx_c_xmit_credit_t
+#define bustype_BDK_PCIERCX_C_XMIT_CREDIT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_C_XMIT_CREDIT(a) "PCIERCX_C_XMIT_CREDIT"
+#define busnum_BDK_PCIERCX_C_XMIT_CREDIT(a) (a)
+#define arguments_BDK_PCIERCX_C_XMIT_CREDIT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cap_ptr
+ *
+ * PCIe RC Capability Pointer Register
+ */
+union bdk_pciercx_cap_ptr
+{
+ uint32_t u;
+ struct bdk_pciercx_cap_ptr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cp : 8; /**< [ 7: 0](RO/WRSL) First capability pointer. Points to power management capability structure by default,
+ writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t cp : 8; /**< [ 7: 0](RO/WRSL) First capability pointer. Points to power management capability structure by default,
+ writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cap_ptr_s cn; */
+};
+typedef union bdk_pciercx_cap_ptr bdk_pciercx_cap_ptr_t;
+
+static inline uint64_t BDK_PCIERCX_CAP_PTR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CAP_PTR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x34ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CAP_PTR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CAP_PTR(a) bdk_pciercx_cap_ptr_t
+#define bustype_BDK_PCIERCX_CAP_PTR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CAP_PTR(a) "PCIERCX_CAP_PTR"
+#define busnum_BDK_PCIERCX_CAP_PTR(a) (a)
+#define arguments_BDK_PCIERCX_CAP_PTR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg000
+ *
+ * PCIe RC Device ID and Vendor ID Register
+ * This register contains the first 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg000
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg000_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t devid : 16; /**< [ 31: 16](RO/WRSL) Device ID for PCIERC, writable through PEM()_CFG_WR. However, the application must not
+ change this field.
+ _ \<15:8\> resets to PCC_PROD_E::CNXXXX.
+ _ \<7:0\> resets to PCC_DEV_IDL_E::PCIERC. */
+ uint32_t vendid : 16; /**< [ 15: 0](RO/WRSL) Vendor ID, writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t vendid : 16; /**< [ 15: 0](RO/WRSL) Vendor ID, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t devid : 16; /**< [ 31: 16](RO/WRSL) Device ID for PCIERC, writable through PEM()_CFG_WR. However, the application must not
+ change this field.
+ _ \<15:8\> resets to PCC_PROD_E::CNXXXX.
+ _ \<7:0\> resets to PCC_DEV_IDL_E::PCIERC. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg000_s cn81xx; */
+ struct bdk_pciercx_cfg000_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t devid : 16; /**< [ 31: 16](RO/WRSL) Device ID, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t vendid : 16; /**< [ 15: 0](RO/WRSL) Vendor ID, writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t vendid : 16; /**< [ 15: 0](RO/WRSL) Vendor ID, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t devid : 16; /**< [ 31: 16](RO/WRSL) Device ID, writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pciercx_cfg000_s cn83xx; */
+};
+typedef union bdk_pciercx_cfg000 bdk_pciercx_cfg000_t;
+
+static inline uint64_t BDK_PCIERCX_CFG000(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG000(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000000ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000000ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000000ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG000", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG000(a) bdk_pciercx_cfg000_t
+#define bustype_BDK_PCIERCX_CFG000(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG000(a) "PCIERCX_CFG000"
+#define busnum_BDK_PCIERCX_CFG000(a) (a)
+#define arguments_BDK_PCIERCX_CFG000(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg001
+ *
+ * PCIe RC Command/Status Register
+ * This register contains the second 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg001
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg001_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dpe : 1; /**< [ 31: 31](R/W1C/H) Detected parity error. */
+ uint32_t sse : 1; /**< [ 30: 30](R/W1C/H) Signaled system error. */
+ uint32_t rma : 1; /**< [ 29: 29](R/W1C/H) Received master abort. */
+ uint32_t rta : 1; /**< [ 28: 28](R/W1C/H) Received target abort. */
+ uint32_t sta : 1; /**< [ 27: 27](R/W1C/H) Signaled target abort. */
+ uint32_t devt : 2; /**< [ 26: 25](RO) DEVSEL timing. Not applicable for PCI Express. Hardwired to 0x0. */
+ uint32_t mdpe : 1; /**< [ 24: 24](R/W1C/H) Master data parity error. */
+ uint32_t fbb : 1; /**< [ 23: 23](RO) Fast back-to-back capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_22 : 1;
+ uint32_t m66 : 1; /**< [ 21: 21](RO) 66 MHz capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. Hardwired to 1. */
+ uint32_t i_stat : 1; /**< [ 19: 19](RO) INTx status. */
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_dis : 1; /**< [ 10: 10](R/W) INTx assertion disable. */
+ uint32_t fbbe : 1; /**< [ 9: 9](RO) Fast back-to-back transaction enable. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t see : 1; /**< [ 8: 8](R/W) SERR# enable. */
+ uint32_t ids_wcc : 1; /**< [ 7: 7](RO) IDSEL stepping/wait cycle control. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t per : 1; /**< [ 6: 6](R/W) Parity error response. */
+ uint32_t vps : 1; /**< [ 5: 5](RO) VGA palette snoop. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t mwice : 1; /**< [ 4: 4](RO) Memory write and invalidate. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t scse : 1; /**< [ 3: 3](RO) Special cycle enable. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t me : 1; /**< [ 2: 2](R/W) Bus master enable. */
+ uint32_t msae : 1; /**< [ 1: 1](R/W) Memory space access enable. */
+ uint32_t isae : 1; /**< [ 0: 0](R/W) I/O space access enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t isae : 1; /**< [ 0: 0](R/W) I/O space access enable. */
+ uint32_t msae : 1; /**< [ 1: 1](R/W) Memory space access enable. */
+ uint32_t me : 1; /**< [ 2: 2](R/W) Bus master enable. */
+ uint32_t scse : 1; /**< [ 3: 3](RO) Special cycle enable. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t mwice : 1; /**< [ 4: 4](RO) Memory write and invalidate. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t vps : 1; /**< [ 5: 5](RO) VGA palette snoop. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t per : 1; /**< [ 6: 6](R/W) Parity error response. */
+ uint32_t ids_wcc : 1; /**< [ 7: 7](RO) IDSEL stepping/wait cycle control. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t see : 1; /**< [ 8: 8](R/W) SERR# enable. */
+ uint32_t fbbe : 1; /**< [ 9: 9](RO) Fast back-to-back transaction enable. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t i_dis : 1; /**< [ 10: 10](R/W) INTx assertion disable. */
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_stat : 1; /**< [ 19: 19](RO) INTx status. */
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. Hardwired to 1. */
+ uint32_t m66 : 1; /**< [ 21: 21](RO) 66 MHz capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_22 : 1;
+ uint32_t fbb : 1; /**< [ 23: 23](RO) Fast back-to-back capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t mdpe : 1; /**< [ 24: 24](R/W1C/H) Master data parity error. */
+ uint32_t devt : 2; /**< [ 26: 25](RO) DEVSEL timing. Not applicable for PCI Express. Hardwired to 0x0. */
+ uint32_t sta : 1; /**< [ 27: 27](R/W1C/H) Signaled target abort. */
+ uint32_t rta : 1; /**< [ 28: 28](R/W1C/H) Received target abort. */
+ uint32_t rma : 1; /**< [ 29: 29](R/W1C/H) Received master abort. */
+ uint32_t sse : 1; /**< [ 30: 30](R/W1C/H) Signaled system error. */
+ uint32_t dpe : 1; /**< [ 31: 31](R/W1C/H) Detected parity error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg001_s cn81xx; */
+ /* struct bdk_pciercx_cfg001_s cn88xx; */
+ struct bdk_pciercx_cfg001_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dpe : 1; /**< [ 31: 31](R/W1C/H) Detected parity error. */
+ uint32_t sse : 1; /**< [ 30: 30](R/W1C/H) Signaled system error. */
+ uint32_t rma : 1; /**< [ 29: 29](R/W1C/H) Received master abort. */
+ uint32_t rta : 1; /**< [ 28: 28](R/W1C/H) Received target abort. */
+ uint32_t sta : 1; /**< [ 27: 27](R/W1C/H) Signaled target abort. */
+ uint32_t devt : 2; /**< [ 26: 25](RO) DEVSEL timing. Not applicable for PCI Express. Hardwired to 0x0. */
+ uint32_t mdpe : 1; /**< [ 24: 24](R/W1C/H) Master data parity error. */
+ uint32_t fbb : 1; /**< [ 23: 23](RO) Fast back-to-back capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_22 : 1;
+ uint32_t m66 : 1; /**< [ 21: 21](RO) 66 MHz capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. Hardwired to 1. */
+ uint32_t i_stat : 1; /**< [ 19: 19](RO) INTx status. */
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_dis : 1; /**< [ 10: 10](R/W) INTx assertion disable. */
+ uint32_t fbbe : 1; /**< [ 9: 9](RO) Fast back-to-back transaction enable. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t see : 1; /**< [ 8: 8](R/W) SERR# enable. */
+ uint32_t ids_wcc : 1; /**< [ 7: 7](RO) IDSEL stepping/wait cycle control. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t per : 1; /**< [ 6: 6](R/W) Parity error response. */
+ uint32_t vps : 1; /**< [ 5: 5](RO) VGA palette snoop. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t mwice : 1; /**< [ 4: 4](RO) Memory write and invalidate. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t scse : 1; /**< [ 3: 3](RO) Special cycle enable. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t me : 1; /**< [ 2: 2](R/W) Bus master enable. */
+ uint32_t msae : 1; /**< [ 1: 1](R/W) Memory space access enable. */
+ uint32_t isae : 1; /**< [ 0: 0](R/W) I/O space access enable.
+ There are no I/O BARs supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t isae : 1; /**< [ 0: 0](R/W) I/O space access enable.
+ There are no I/O BARs supported. */
+ uint32_t msae : 1; /**< [ 1: 1](R/W) Memory space access enable. */
+ uint32_t me : 1; /**< [ 2: 2](R/W) Bus master enable. */
+ uint32_t scse : 1; /**< [ 3: 3](RO) Special cycle enable. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t mwice : 1; /**< [ 4: 4](RO) Memory write and invalidate. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t vps : 1; /**< [ 5: 5](RO) VGA palette snoop. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t per : 1; /**< [ 6: 6](R/W) Parity error response. */
+ uint32_t ids_wcc : 1; /**< [ 7: 7](RO) IDSEL stepping/wait cycle control. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t see : 1; /**< [ 8: 8](R/W) SERR# enable. */
+ uint32_t fbbe : 1; /**< [ 9: 9](RO) Fast back-to-back transaction enable. Not applicable for PCI Express. Must be hardwired to 0. */
+ uint32_t i_dis : 1; /**< [ 10: 10](R/W) INTx assertion disable. */
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_stat : 1; /**< [ 19: 19](RO) INTx status. */
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. Hardwired to 1. */
+ uint32_t m66 : 1; /**< [ 21: 21](RO) 66 MHz capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_22 : 1;
+ uint32_t fbb : 1; /**< [ 23: 23](RO) Fast back-to-back capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t mdpe : 1; /**< [ 24: 24](R/W1C/H) Master data parity error. */
+ uint32_t devt : 2; /**< [ 26: 25](RO) DEVSEL timing. Not applicable for PCI Express. Hardwired to 0x0. */
+ uint32_t sta : 1; /**< [ 27: 27](R/W1C/H) Signaled target abort. */
+ uint32_t rta : 1; /**< [ 28: 28](R/W1C/H) Received target abort. */
+ uint32_t rma : 1; /**< [ 29: 29](R/W1C/H) Received master abort. */
+ uint32_t sse : 1; /**< [ 30: 30](R/W1C/H) Signaled system error. */
+ uint32_t dpe : 1; /**< [ 31: 31](R/W1C/H) Detected parity error. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg001 bdk_pciercx_cfg001_t;
+
+static inline uint64_t BDK_PCIERCX_CFG001(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG001(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000004ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000004ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000004ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG001", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG001(a) bdk_pciercx_cfg001_t
+#define bustype_BDK_PCIERCX_CFG001(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG001(a) "PCIERCX_CFG001"
+#define busnum_BDK_PCIERCX_CFG001(a) (a)
+#define arguments_BDK_PCIERCX_CFG001(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg002
+ *
+ * PCIe RC Class Code/Revision ID Register
+ * This register contains the third 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg002
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg002_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bcc : 8; /**< [ 31: 24](RO/WRSL) Base class code, writable through PEM()_CFG_WR. However, the application must not
+ change this field.
+ 0x6 = Bridge. */
+ uint32_t sc : 8; /**< [ 23: 16](RO/WRSL) Subclass code, writable through PEM()_CFG_WR. However, the application must not change
+ this field.
+ 0x4 = PCI-to-PCI */
+ uint32_t pi : 8; /**< [ 15: 8](RO/WRSL) Programming interface, writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+ 0x0 = No standard interface. */
+ uint32_t rid : 8; /**< [ 7: 0](RO/WRSL) Revision ID, writable through PEM()_CFG_WR. However, the application must not change
+ this field.
+ See MIO_FUS_DAT2[CHIP_ID] for more information. */
+#else /* Word 0 - Little Endian */
+ uint32_t rid : 8; /**< [ 7: 0](RO/WRSL) Revision ID, writable through PEM()_CFG_WR. However, the application must not change
+ this field.
+ See MIO_FUS_DAT2[CHIP_ID] for more information. */
+ uint32_t pi : 8; /**< [ 15: 8](RO/WRSL) Programming interface, writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+ 0x0 = No standard interface. */
+ uint32_t sc : 8; /**< [ 23: 16](RO/WRSL) Subclass code, writable through PEM()_CFG_WR. However, the application must not change
+ this field.
+ 0x4 = PCI-to-PCI */
+ uint32_t bcc : 8; /**< [ 31: 24](RO/WRSL) Base class code, writable through PEM()_CFG_WR. However, the application must not
+ change this field.
+ 0x6 = Bridge. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg002_s cn81xx; */
+ struct bdk_pciercx_cfg002_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bcc : 8; /**< [ 31: 24](RO/WRSL) Base class code, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t sc : 8; /**< [ 23: 16](RO/WRSL) Subclass code, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t pi : 8; /**< [ 15: 8](RO/WRSL) Programming interface, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t rid : 8; /**< [ 7: 0](RO/WRSL) Revision ID, writable through PEM()_CFG_WR. However, the application must not change
+ this field.
+ See MIO_FUS_DAT2[CHIP_ID] for more information. */
+#else /* Word 0 - Little Endian */
+ uint32_t rid : 8; /**< [ 7: 0](RO/WRSL) Revision ID, writable through PEM()_CFG_WR. However, the application must not change
+ this field.
+ See MIO_FUS_DAT2[CHIP_ID] for more information. */
+ uint32_t pi : 8; /**< [ 15: 8](RO/WRSL) Programming interface, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t sc : 8; /**< [ 23: 16](RO/WRSL) Subclass code, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t bcc : 8; /**< [ 31: 24](RO/WRSL) Base class code, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pciercx_cfg002_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bcc : 8; /**< [ 31: 24](RO/WRSL) Base class code, writable through PEM()_CFG_WR. However, the application must not
+ change this field.
+ 0x6 = Bridge. */
+ uint32_t sc : 8; /**< [ 23: 16](RO/WRSL) Subclass code, writable through PEM()_CFG_WR. However, the application must not change
+ this field.
+ 0x4 = PCI-to-PCI */
+ uint32_t pi : 8; /**< [ 15: 8](RO/WRSL) Programming interface, writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+ 0x0 = No standard interface. */
+ uint32_t rid : 8; /**< [ 7: 0](RO/WRSL) Revision ID, writable through PEM()_CFG_WR. However, the application must not change
+ this field.
+ See MIO_FUS_DAT2[CHIP_ID] for more information.
+ 0x0 = Pass 1.0. */
+#else /* Word 0 - Little Endian */
+ uint32_t rid : 8; /**< [ 7: 0](RO/WRSL) Revision ID, writable through PEM()_CFG_WR. However, the application must not change
+ this field.
+ See MIO_FUS_DAT2[CHIP_ID] for more information.
+ 0x0 = Pass 1.0. */
+ uint32_t pi : 8; /**< [ 15: 8](RO/WRSL) Programming interface, writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+ 0x0 = No standard interface. */
+ uint32_t sc : 8; /**< [ 23: 16](RO/WRSL) Subclass code, writable through PEM()_CFG_WR. However, the application must not change
+ this field.
+ 0x4 = PCI-to-PCI */
+ uint32_t bcc : 8; /**< [ 31: 24](RO/WRSL) Base class code, writable through PEM()_CFG_WR. However, the application must not
+ change this field.
+ 0x6 = Bridge. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg002 bdk_pciercx_cfg002_t;
+
+static inline uint64_t BDK_PCIERCX_CFG002(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG002(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000008ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000008ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000008ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG002", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG002(a) bdk_pciercx_cfg002_t
+#define bustype_BDK_PCIERCX_CFG002(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG002(a) "PCIERCX_CFG002"
+#define busnum_BDK_PCIERCX_CFG002(a) (a)
+#define arguments_BDK_PCIERCX_CFG002(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg003
+ *
+ * PCIe RC BIST, Header Type, Master Latency Timer, Cache Line Size Register
+ * This register contains the fourth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg003
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg003_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bist : 8; /**< [ 31: 24](RO) The BIST register functions are not supported. All 8 bits of the BIST register are hardwired to 0. */
+ uint32_t mfd : 1; /**< [ 23: 23](RO) Multi function device. However, this is a single function device. Therefore,
+ the application must not write a 1 to this bit. */
+ uint32_t chf : 7; /**< [ 22: 16](RO) Configuration header format. Hardwired to 0x1. */
+ uint32_t lt : 8; /**< [ 15: 8](RO) Master latency timer. Not applicable for PCI Express, hardwired to 0x0. */
+ uint32_t cls : 8; /**< [ 7: 0](R/W) Cache line size. The cache line size register is R/W for legacy compatibility purposes and
+ is not applicable to PCI Express device functionality. */
+#else /* Word 0 - Little Endian */
+ uint32_t cls : 8; /**< [ 7: 0](R/W) Cache line size. The cache line size register is R/W for legacy compatibility purposes and
+ is not applicable to PCI Express device functionality. */
+ uint32_t lt : 8; /**< [ 15: 8](RO) Master latency timer. Not applicable for PCI Express, hardwired to 0x0. */
+ uint32_t chf : 7; /**< [ 22: 16](RO) Configuration header format. Hardwired to 0x1. */
+ uint32_t mfd : 1; /**< [ 23: 23](RO) Multi function device. However, this is a single function device. Therefore,
+ the application must not write a 1 to this bit. */
+ uint32_t bist : 8; /**< [ 31: 24](RO) The BIST register functions are not supported. All 8 bits of the BIST register are hardwired to 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg003_s cn81xx; */
+ struct bdk_pciercx_cfg003_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bist : 8; /**< [ 31: 24](RO) The BIST register functions are not supported. All 8 bits of the BIST register are hardwired to 0. */
+ uint32_t mfd : 1; /**< [ 23: 23](RO/WRSL) Multi function device. The multi function device bit is writable through PEM()_CFG_WR.
+ However, this is a single function device. Therefore, the application must not write a 1
+ to this bit. */
+ uint32_t chf : 7; /**< [ 22: 16](RO) Configuration header format. Hardwired to 0x1. */
+ uint32_t lt : 8; /**< [ 15: 8](RO) Master latency timer. Not applicable for PCI Express, hardwired to 0x0. */
+ uint32_t cls : 8; /**< [ 7: 0](R/W) Cache line size. The cache line size register is R/W for legacy compatibility purposes and
+ is not applicable to PCI Express device functionality. */
+#else /* Word 0 - Little Endian */
+ uint32_t cls : 8; /**< [ 7: 0](R/W) Cache line size. The cache line size register is R/W for legacy compatibility purposes and
+ is not applicable to PCI Express device functionality. */
+ uint32_t lt : 8; /**< [ 15: 8](RO) Master latency timer. Not applicable for PCI Express, hardwired to 0x0. */
+ uint32_t chf : 7; /**< [ 22: 16](RO) Configuration header format. Hardwired to 0x1. */
+ uint32_t mfd : 1; /**< [ 23: 23](RO/WRSL) Multi function device. The multi function device bit is writable through PEM()_CFG_WR.
+ However, this is a single function device. Therefore, the application must not write a 1
+ to this bit. */
+ uint32_t bist : 8; /**< [ 31: 24](RO) The BIST register functions are not supported. All 8 bits of the BIST register are hardwired to 0. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pciercx_cfg003_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bist : 8; /**< [ 31: 24](RO) The BIST register functions are not supported. All 8 bits of the BIST register are hardwired to 0. */
+ uint32_t mfd : 1; /**< [ 23: 23](RO) Multi function device. */
+ uint32_t chf : 7; /**< [ 22: 16](RO) Configuration header format. Hardwired to 0x1. */
+ uint32_t lt : 8; /**< [ 15: 8](RO) Master latency timer. Not applicable for PCI Express, hardwired to 0x0. */
+ uint32_t cls : 8; /**< [ 7: 0](R/W) Cache line size. The cache line size register is R/W for legacy compatibility purposes and
+ is not applicable to PCI Express device functionality. */
+#else /* Word 0 - Little Endian */
+ uint32_t cls : 8; /**< [ 7: 0](R/W) Cache line size. The cache line size register is R/W for legacy compatibility purposes and
+ is not applicable to PCI Express device functionality. */
+ uint32_t lt : 8; /**< [ 15: 8](RO) Master latency timer. Not applicable for PCI Express, hardwired to 0x0. */
+ uint32_t chf : 7; /**< [ 22: 16](RO) Configuration header format. Hardwired to 0x1. */
+ uint32_t mfd : 1; /**< [ 23: 23](RO) Multi function device. */
+ uint32_t bist : 8; /**< [ 31: 24](RO) The BIST register functions are not supported. All 8 bits of the BIST register are hardwired to 0. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg003 bdk_pciercx_cfg003_t;
+
+static inline uint64_t BDK_PCIERCX_CFG003(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG003(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000000cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000000cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000000cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG003", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG003(a) bdk_pciercx_cfg003_t
+#define bustype_BDK_PCIERCX_CFG003(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG003(a) "PCIERCX_CFG003"
+#define busnum_BDK_PCIERCX_CFG003(a) (a)
+#define arguments_BDK_PCIERCX_CFG003(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg004
+ *
+ * PCIe RC Base Address 0 Low Register
+ * This register contains the fifth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg004
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg004_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t unused : 32; /**< [ 31: 0](RO/H) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t unused : 32; /**< [ 31: 0](RO/H) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg004_s cn81xx; */
+ /* struct bdk_pciercx_cfg004_s cn88xx; */
+ struct bdk_pciercx_cfg004_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg004 bdk_pciercx_cfg004_t;
+
+static inline uint64_t BDK_PCIERCX_CFG004(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG004(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000010ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000010ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000010ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG004", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG004(a) bdk_pciercx_cfg004_t
+#define bustype_BDK_PCIERCX_CFG004(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG004(a) "PCIERCX_CFG004"
+#define busnum_BDK_PCIERCX_CFG004(a) (a)
+#define arguments_BDK_PCIERCX_CFG004(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg005
+ *
+ * PCIe RC Base Address 0 High Register
+ * This register contains the sixth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg005
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg005_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t unused : 32; /**< [ 31: 0](RO/H) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t unused : 32; /**< [ 31: 0](RO/H) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg005_s cn81xx; */
+ /* struct bdk_pciercx_cfg005_s cn88xx; */
+ struct bdk_pciercx_cfg005_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg005 bdk_pciercx_cfg005_t;
+
+static inline uint64_t BDK_PCIERCX_CFG005(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG005(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000014ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000014ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000014ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG005", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG005(a) bdk_pciercx_cfg005_t
+#define bustype_BDK_PCIERCX_CFG005(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG005(a) "PCIERCX_CFG005"
+#define busnum_BDK_PCIERCX_CFG005(a) (a)
+#define arguments_BDK_PCIERCX_CFG005(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg006
+ *
+ * PCIe RC Bus Number Register
+ * This register contains the seventh 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg006
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg006_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t slt : 8; /**< [ 31: 24](RO) Secondary latency timer. Not applicable to PCI Express, hardwired to 0x0. */
+ uint32_t subbnum : 8; /**< [ 23: 16](R/W) Subordinate bus number.
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ Internal:
+ Note IOB/ECAM snoops on writes to this register. */
+ uint32_t sbnum : 8; /**< [ 15: 8](R/W) Secondary bus number.
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ Internal:
+ Note IOB/ECAM snoops on writes to this register. */
+ uint32_t pbnum : 8; /**< [ 7: 0](R/W) Primary bus number. */
+#else /* Word 0 - Little Endian */
+ uint32_t pbnum : 8; /**< [ 7: 0](R/W) Primary bus number. */
+ uint32_t sbnum : 8; /**< [ 15: 8](R/W) Secondary bus number.
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ Internal:
+ Note IOB/ECAM snoops on writes to this register. */
+ uint32_t subbnum : 8; /**< [ 23: 16](R/W) Subordinate bus number.
+ If 0x0 no configuration accesses are forwarded to the secondary bus.
+
+ Internal:
+ Note IOB/ECAM snoops on writes to this register. */
+ uint32_t slt : 8; /**< [ 31: 24](RO) Secondary latency timer. Not applicable to PCI Express, hardwired to 0x0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg006_s cn; */
+};
+typedef union bdk_pciercx_cfg006 bdk_pciercx_cfg006_t;
+
+static inline uint64_t BDK_PCIERCX_CFG006(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG006(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000018ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000018ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000018ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG006", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG006(a) bdk_pciercx_cfg006_t
+#define bustype_BDK_PCIERCX_CFG006(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG006(a) "PCIERCX_CFG006"
+#define busnum_BDK_PCIERCX_CFG006(a) (a)
+#define arguments_BDK_PCIERCX_CFG006(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg007
+ *
+ * PCIe RC I/O Base and I/O Limit/Secondary Status Register
+ * This register contains the eighth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg007
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg007_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dpe : 1; /**< [ 31: 31](R/W1C/H) Detected parity error. */
+ uint32_t sse : 1; /**< [ 30: 30](R/W1C/H) Signaled system error. */
+ uint32_t rma : 1; /**< [ 29: 29](R/W1C/H) Received master abort. */
+ uint32_t rta : 1; /**< [ 28: 28](R/W1C/H) Received target abort. */
+ uint32_t sta : 1; /**< [ 27: 27](R/W1C/H) Signaled target abort. */
+ uint32_t devt : 2; /**< [ 26: 25](RO) DEVSEL timing. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t mdpe : 1; /**< [ 24: 24](R/W1C/H) Master data parity error */
+ uint32_t fbb : 1; /**< [ 23: 23](RO) Fast back-to-back capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_22 : 1;
+ uint32_t m66 : 1; /**< [ 21: 21](RO) 66 MHz capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_16_20 : 5;
+ uint32_t lio_limi : 4; /**< [ 15: 12](R/W) I/O space limit. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t io32b : 1; /**< [ 8: 8](RO/H) 32-bit I/O space. */
+ uint32_t lio_base : 4; /**< [ 7: 4](R/W) I/O space base. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t io32a : 1; /**< [ 0: 0](RO/WRSL) 32-bit I/O space.
+ 0 = 16-bit I/O addressing.
+ 1 = 32-bit I/O addressing.
+ This bit is writable through PEM()_CFG_WR. When the application writes to this bit
+ through PEM()_CFG_WR, the same value is written to bit 8 of this register. */
+#else /* Word 0 - Little Endian */
+ uint32_t io32a : 1; /**< [ 0: 0](RO/WRSL) 32-bit I/O space.
+ 0 = 16-bit I/O addressing.
+ 1 = 32-bit I/O addressing.
+ This bit is writable through PEM()_CFG_WR. When the application writes to this bit
+ through PEM()_CFG_WR, the same value is written to bit 8 of this register. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t lio_base : 4; /**< [ 7: 4](R/W) I/O space base. */
+ uint32_t io32b : 1; /**< [ 8: 8](RO/H) 32-bit I/O space. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t lio_limi : 4; /**< [ 15: 12](R/W) I/O space limit. */
+ uint32_t reserved_16_20 : 5;
+ uint32_t m66 : 1; /**< [ 21: 21](RO) 66 MHz capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t reserved_22 : 1;
+ uint32_t fbb : 1; /**< [ 23: 23](RO) Fast back-to-back capable. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t mdpe : 1; /**< [ 24: 24](R/W1C/H) Master data parity error */
+ uint32_t devt : 2; /**< [ 26: 25](RO) DEVSEL timing. Not applicable for PCI Express. Hardwired to 0. */
+ uint32_t sta : 1; /**< [ 27: 27](R/W1C/H) Signaled target abort. */
+ uint32_t rta : 1; /**< [ 28: 28](R/W1C/H) Received target abort. */
+ uint32_t rma : 1; /**< [ 29: 29](R/W1C/H) Received master abort. */
+ uint32_t sse : 1; /**< [ 30: 30](R/W1C/H) Signaled system error. */
+ uint32_t dpe : 1; /**< [ 31: 31](R/W1C/H) Detected parity error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg007_s cn; */
+};
+typedef union bdk_pciercx_cfg007 bdk_pciercx_cfg007_t;
+
+static inline uint64_t BDK_PCIERCX_CFG007(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG007(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000001cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000001cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000001cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG007", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG007(a) bdk_pciercx_cfg007_t
+#define bustype_BDK_PCIERCX_CFG007(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG007(a) "PCIERCX_CFG007"
+#define busnum_BDK_PCIERCX_CFG007(a) (a)
+#define arguments_BDK_PCIERCX_CFG007(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg008
+ *
+ * PCIe RC Memory Base and Memory Limit Register
+ * This register contains the ninth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg008
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg008_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ml_addr : 12; /**< [ 31: 20](R/W) Memory limit address. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t mb_addr : 12; /**< [ 15: 4](R/W) Memory base address. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t mb_addr : 12; /**< [ 15: 4](R/W) Memory base address. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t ml_addr : 12; /**< [ 31: 20](R/W) Memory limit address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg008_s cn; */
+};
+typedef union bdk_pciercx_cfg008 bdk_pciercx_cfg008_t;
+
+static inline uint64_t BDK_PCIERCX_CFG008(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG008(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000020ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000020ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000020ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG008", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG008(a) bdk_pciercx_cfg008_t
+#define bustype_BDK_PCIERCX_CFG008(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG008(a) "PCIERCX_CFG008"
+#define busnum_BDK_PCIERCX_CFG008(a) (a)
+#define arguments_BDK_PCIERCX_CFG008(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg009
+ *
+ * PCIe RC Prefetchable Memory and Limit Register
+ * This register contains the tenth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg009
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg009_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lmem_limit : 12; /**< [ 31: 20](R/W) Upper 12 bits of 32-bit prefetchable memory end address. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t mem64b : 1; /**< [ 16: 16](RO/H) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing. */
+ uint32_t lmem_base : 12; /**< [ 15: 4](R/W) Upper 12 bits of 32-bit prefetchable memory start address. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t mem64a : 1; /**< [ 0: 0](RO/WRSL) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing.
+
+ This bit is writable through PEM()_CFG_WR. When the application writes to this bit
+ through PEM()_CFG_WR, the same value is written to bit 16 of this register. */
+#else /* Word 0 - Little Endian */
+ uint32_t mem64a : 1; /**< [ 0: 0](RO/WRSL) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing.
+
+ This bit is writable through PEM()_CFG_WR. When the application writes to this bit
+ through PEM()_CFG_WR, the same value is written to bit 16 of this register. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t lmem_base : 12; /**< [ 15: 4](R/W) Upper 12 bits of 32-bit prefetchable memory start address. */
+ uint32_t mem64b : 1; /**< [ 16: 16](RO/H) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t lmem_limit : 12; /**< [ 31: 20](R/W) Upper 12 bits of 32-bit prefetchable memory end address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg009_s cn81xx; */
+ /* struct bdk_pciercx_cfg009_s cn88xx; */
+ struct bdk_pciercx_cfg009_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lmem_limit : 12; /**< [ 31: 20](R/W) Upper 12 bits of 32-bit prefetchable memory end address. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t mem64b : 1; /**< [ 16: 16](RO) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing. */
+ uint32_t lmem_base : 12; /**< [ 15: 4](R/W) Upper 12 bits of 32-bit prefetchable memory start address. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t mem64a : 1; /**< [ 0: 0](RO/WRSL) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing.
+
+ This bit is writable through PEM()_CFG_WR. When the application writes to this bit
+ through PEM()_CFG_WR, the same value is written to bit 16 of this register. */
+#else /* Word 0 - Little Endian */
+ uint32_t mem64a : 1; /**< [ 0: 0](RO/WRSL) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing.
+
+ This bit is writable through PEM()_CFG_WR. When the application writes to this bit
+ through PEM()_CFG_WR, the same value is written to bit 16 of this register. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t lmem_base : 12; /**< [ 15: 4](R/W) Upper 12 bits of 32-bit prefetchable memory start address. */
+ uint32_t mem64b : 1; /**< [ 16: 16](RO) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t lmem_limit : 12; /**< [ 31: 20](R/W) Upper 12 bits of 32-bit prefetchable memory end address. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg009 bdk_pciercx_cfg009_t;
+
+static inline uint64_t BDK_PCIERCX_CFG009(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG009(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000024ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000024ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000024ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG009", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG009(a) bdk_pciercx_cfg009_t
+#define bustype_BDK_PCIERCX_CFG009(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG009(a) "PCIERCX_CFG009"
+#define busnum_BDK_PCIERCX_CFG009(a) (a)
+#define arguments_BDK_PCIERCX_CFG009(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg010
+ *
+ * PCIe RC Prefetchable Base Upper 32 Bits Register
+ * This register contains the eleventh 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg010
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg010_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t umem_base : 32; /**< [ 31: 0](R/W) Upper 32 bits of base address of prefetchable memory space. Used only when 64-bit
+ prefetchable memory addressing is enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t umem_base : 32; /**< [ 31: 0](R/W) Upper 32 bits of base address of prefetchable memory space. Used only when 64-bit
+ prefetchable memory addressing is enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg010_s cn; */
+};
+typedef union bdk_pciercx_cfg010 bdk_pciercx_cfg010_t;
+
+static inline uint64_t BDK_PCIERCX_CFG010(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG010(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000028ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000028ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000028ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG010", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG010(a) bdk_pciercx_cfg010_t
+#define bustype_BDK_PCIERCX_CFG010(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG010(a) "PCIERCX_CFG010"
+#define busnum_BDK_PCIERCX_CFG010(a) (a)
+#define arguments_BDK_PCIERCX_CFG010(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg011
+ *
+ * PCIe RC Prefetchable Limit Upper 32 Bits Register
+ * This register contains the twelfth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg011
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg011_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t umem_limit : 32; /**< [ 31: 0](R/W) Upper 32 bits of limit address of prefetchable memory space. Used only when 64-bit
+ prefetchable memory addressing is enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t umem_limit : 32; /**< [ 31: 0](R/W) Upper 32 bits of limit address of prefetchable memory space. Used only when 64-bit
+ prefetchable memory addressing is enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg011_s cn; */
+};
+typedef union bdk_pciercx_cfg011 bdk_pciercx_cfg011_t;
+
+static inline uint64_t BDK_PCIERCX_CFG011(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG011(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000002cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000002cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000002cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG011", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG011(a) bdk_pciercx_cfg011_t
+#define bustype_BDK_PCIERCX_CFG011(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG011(a) "PCIERCX_CFG011"
+#define busnum_BDK_PCIERCX_CFG011(a) (a)
+#define arguments_BDK_PCIERCX_CFG011(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg012
+ *
+ * PCIe RC I/O Base and Limit Upper 16 Bits Register
+ * This register contains the thirteenth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg012
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg012_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t uio_limit : 16; /**< [ 31: 16](R/W) Upper 16 bits of I/O limit (if 32-bit I/O decoding is supported for devices on the secondary side). */
+ uint32_t uio_base : 16; /**< [ 15: 0](R/W) Upper 16 bits of I/O base (if 32-bit I/O decoding is supported for devices on the secondary side). */
+#else /* Word 0 - Little Endian */
+ uint32_t uio_base : 16; /**< [ 15: 0](R/W) Upper 16 bits of I/O base (if 32-bit I/O decoding is supported for devices on the secondary side). */
+ uint32_t uio_limit : 16; /**< [ 31: 16](R/W) Upper 16 bits of I/O limit (if 32-bit I/O decoding is supported for devices on the secondary side). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg012_s cn; */
+};
+typedef union bdk_pciercx_cfg012 bdk_pciercx_cfg012_t;
+
+static inline uint64_t BDK_PCIERCX_CFG012(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG012(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000030ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000030ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000030ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG012", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG012(a) bdk_pciercx_cfg012_t
+#define bustype_BDK_PCIERCX_CFG012(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG012(a) "PCIERCX_CFG012"
+#define busnum_BDK_PCIERCX_CFG012(a) (a)
+#define arguments_BDK_PCIERCX_CFG012(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg013
+ *
+ * PCIe RC Capability Pointer Register
+ * This register contains the fourteenth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg013
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg013_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t cp : 8; /**< [ 7: 0](RO/WRSL) First capability pointer. Points to power management capability structure by default,
+ writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t cp : 8; /**< [ 7: 0](RO/WRSL) First capability pointer. Points to power management capability structure by default,
+ writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg013_s cn; */
+};
+typedef union bdk_pciercx_cfg013 bdk_pciercx_cfg013_t;
+
+static inline uint64_t BDK_PCIERCX_CFG013(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG013(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000034ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000034ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000034ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG013", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG013(a) bdk_pciercx_cfg013_t
+#define bustype_BDK_PCIERCX_CFG013(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG013(a) "PCIERCX_CFG013"
+#define busnum_BDK_PCIERCX_CFG013(a) (a)
+#define arguments_BDK_PCIERCX_CFG013(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg014
+ *
+ * PCIe RC Expansion ROM Base Address Register
+ * This register contains the fifteenth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg014
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg014_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t unused : 32; /**< [ 31: 0](RO/WRSL/H) Writable, but unused. */
+#else /* Word 0 - Little Endian */
+ uint32_t unused : 32; /**< [ 31: 0](RO/WRSL/H) Writable, but unused. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg014_s cn81xx; */
+ /* struct bdk_pciercx_cfg014_s cn88xx; */
+ struct bdk_pciercx_cfg014_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t unused : 32; /**< [ 31: 0](RO/WRSL) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t unused : 32; /**< [ 31: 0](RO/WRSL) Reserved. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg014 bdk_pciercx_cfg014_t;
+
+static inline uint64_t BDK_PCIERCX_CFG014(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG014(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000038ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000038ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000038ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG014", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG014(a) bdk_pciercx_cfg014_t
+#define bustype_BDK_PCIERCX_CFG014(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG014(a) "PCIERCX_CFG014"
+#define busnum_BDK_PCIERCX_CFG014(a) (a)
+#define arguments_BDK_PCIERCX_CFG014(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg015
+ *
+ * PCIe RC Interrupt Line Register/Interrupt Pin/Bridge Control Register
+ * This register contains the sixteenth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg015
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg015_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t dtsees : 1; /**< [ 27: 27](RO) Discard timer SERR enable status. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t dts : 1; /**< [ 26: 26](RO) Discard timer status. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t sdt : 1; /**< [ 25: 25](RO) Secondary discard timer. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t pdt : 1; /**< [ 24: 24](RO) Primary discard timer. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t fbbe : 1; /**< [ 23: 23](RO) Fast back-to-back transactions enable. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t sbrst : 1; /**< [ 22: 22](R/W) Secondary bus reset. Hot reset. Causes TS1s with the hot reset bit to be sent to the link
+ partner. When set, software should wait 2 ms before clearing. The link partner normally
+ responds by sending TS1s with the hot reset bit set, which will cause a link down event.
+ Refer to 'PCIe Link-Down Reset in RC Mode' section. */
+ uint32_t mam : 1; /**< [ 21: 21](RO) Master abort mode. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t vga16d : 1; /**< [ 20: 20](RO) VGA 16-bit decode. */
+ uint32_t vgae : 1; /**< [ 19: 19](RO) VGA enable. */
+ uint32_t isae : 1; /**< [ 18: 18](R/W) ISA enable. */
+ uint32_t see : 1; /**< [ 17: 17](R/W) SERR enable. */
+ uint32_t pere : 1; /**< [ 16: 16](R/W) Parity error response enable. */
+ uint32_t inta : 8; /**< [ 15: 8](RO/WRSL) Interrupt pin. Identifies the legacy interrupt message that the device (or device
+ function) uses. The interrupt pin register is writable through
+ PEM()_CFG_WR. In a single-function configuration, only INTA is used. Therefore, the
+ application must not change this field. */
+ uint32_t il : 8; /**< [ 7: 0](R/W) Interrupt line. */
+#else /* Word 0 - Little Endian */
+ uint32_t il : 8; /**< [ 7: 0](R/W) Interrupt line. */
+ uint32_t inta : 8; /**< [ 15: 8](RO/WRSL) Interrupt pin. Identifies the legacy interrupt message that the device (or device
+ function) uses. The interrupt pin register is writable through
+ PEM()_CFG_WR. In a single-function configuration, only INTA is used. Therefore, the
+ application must not change this field. */
+ uint32_t pere : 1; /**< [ 16: 16](R/W) Parity error response enable. */
+ uint32_t see : 1; /**< [ 17: 17](R/W) SERR enable. */
+ uint32_t isae : 1; /**< [ 18: 18](R/W) ISA enable. */
+ uint32_t vgae : 1; /**< [ 19: 19](RO) VGA enable. */
+ uint32_t vga16d : 1; /**< [ 20: 20](RO) VGA 16-bit decode. */
+ uint32_t mam : 1; /**< [ 21: 21](RO) Master abort mode. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t sbrst : 1; /**< [ 22: 22](R/W) Secondary bus reset. Hot reset. Causes TS1s with the hot reset bit to be sent to the link
+ partner. When set, software should wait 2 ms before clearing. The link partner normally
+ responds by sending TS1s with the hot reset bit set, which will cause a link down event.
+ Refer to 'PCIe Link-Down Reset in RC Mode' section. */
+ uint32_t fbbe : 1; /**< [ 23: 23](RO) Fast back-to-back transactions enable. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t pdt : 1; /**< [ 24: 24](RO) Primary discard timer. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t sdt : 1; /**< [ 25: 25](RO) Secondary discard timer. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t dts : 1; /**< [ 26: 26](RO) Discard timer status. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t dtsees : 1; /**< [ 27: 27](RO) Discard timer SERR enable status. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg015_s cn81xx; */
+ /* struct bdk_pciercx_cfg015_s cn88xx; */
+ struct bdk_pciercx_cfg015_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t dtsees : 1; /**< [ 27: 27](RO) Discard timer SERR enable status. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t dts : 1; /**< [ 26: 26](RO) Discard timer status. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t sdt : 1; /**< [ 25: 25](RO) Secondary discard timer. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t pdt : 1; /**< [ 24: 24](RO) Primary discard timer. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t fbbe : 1; /**< [ 23: 23](RO) Fast back-to-back transactions enable. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t sbrst : 1; /**< [ 22: 22](R/W) Secondary bus reset. Hot reset. Causes TS1s with the hot reset bit to be sent to the link
+ partner. When set, software should wait 2 ms before clearing. The link partner normally
+ responds by sending TS1s with the hot reset bit set, which will cause a link down event.
+ Refer to 'PCIe Link-Down Reset in RC Mode' section. */
+ uint32_t mam : 1; /**< [ 21: 21](RO) Master abort mode. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t vga16d : 1; /**< [ 20: 20](RO) VGA 16-bit decode. */
+ uint32_t vgae : 1; /**< [ 19: 19](RO) VGA enable. */
+ uint32_t isae : 1; /**< [ 18: 18](R/W) ISA enable. */
+ uint32_t see : 1; /**< [ 17: 17](R/W) SERR enable. */
+ uint32_t pere : 1; /**< [ 16: 16](R/W) Parity error response enable. */
+ uint32_t inta : 8; /**< [ 15: 8](RO) Interrupt pin (not supported). */
+ uint32_t il : 8; /**< [ 7: 0](RO) Interrupt line. */
+#else /* Word 0 - Little Endian */
+ uint32_t il : 8; /**< [ 7: 0](RO) Interrupt line. */
+ uint32_t inta : 8; /**< [ 15: 8](RO) Interrupt pin (not supported). */
+ uint32_t pere : 1; /**< [ 16: 16](R/W) Parity error response enable. */
+ uint32_t see : 1; /**< [ 17: 17](R/W) SERR enable. */
+ uint32_t isae : 1; /**< [ 18: 18](R/W) ISA enable. */
+ uint32_t vgae : 1; /**< [ 19: 19](RO) VGA enable. */
+ uint32_t vga16d : 1; /**< [ 20: 20](RO) VGA 16-bit decode. */
+ uint32_t mam : 1; /**< [ 21: 21](RO) Master abort mode. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t sbrst : 1; /**< [ 22: 22](R/W) Secondary bus reset. Hot reset. Causes TS1s with the hot reset bit to be sent to the link
+ partner. When set, software should wait 2 ms before clearing. The link partner normally
+ responds by sending TS1s with the hot reset bit set, which will cause a link down event.
+ Refer to 'PCIe Link-Down Reset in RC Mode' section. */
+ uint32_t fbbe : 1; /**< [ 23: 23](RO) Fast back-to-back transactions enable. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t pdt : 1; /**< [ 24: 24](RO) Primary discard timer. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t sdt : 1; /**< [ 25: 25](RO) Secondary discard timer. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t dts : 1; /**< [ 26: 26](RO) Discard timer status. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t dtsees : 1; /**< [ 27: 27](RO) Discard timer SERR enable status. Not applicable to PCI Express, hardwired to 0. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg015 bdk_pciercx_cfg015_t;
+
+static inline uint64_t BDK_PCIERCX_CFG015(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG015(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000003cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000003cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000003cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG015", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG015(a) bdk_pciercx_cfg015_t
+#define bustype_BDK_PCIERCX_CFG015(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG015(a) "PCIERCX_CFG015"
+#define busnum_BDK_PCIERCX_CFG015(a) (a)
+#define arguments_BDK_PCIERCX_CFG015(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg016
+ *
+ * PCIe RC Power Management Capability ID Register
+ * This register contains the seventeenth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg016
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg016_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pmes : 5; /**< [ 31: 27](RO/WRSL/H) PME_Support. A value of 0x0 for any bit indicates that the device (or function) is not
+ capable of generating PME messages while in that power state:
+
+ _ Bit 11: If set, PME Messages can be generated from D0.
+
+ _ Bit 12: If set, PME Messages can be generated from D1.
+
+ _ Bit 13: If set, PME Messages can be generated from D2.
+
+ _ Bit 14: If set, PME Messages can be generated from D3hot.
+
+ _ Bit 15: If set, PME Messages can be generated from D3cold.
+
+ The PME_Support field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t d2s : 1; /**< [ 26: 26](RO/WRSL) D2 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t d1s : 1; /**< [ 25: 25](RO/WRSL) D1 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t auxc : 3; /**< [ 24: 22](RO/WRSL) AUX current, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t dsi : 1; /**< [ 21: 21](RO/WRSL) Device specific initialization (DSI), writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t pme_clock : 1; /**< [ 19: 19](RO) PME clock, hardwired to 0. */
+ uint32_t pmsv : 3; /**< [ 18: 16](RO/WRSL) Power management specification version, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the EA capabilities by default, writable through
+ PEM()_CFG_WR. */
+ uint32_t pmcid : 8; /**< [ 7: 0](RO) Power management capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pmcid : 8; /**< [ 7: 0](RO) Power management capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the EA capabilities by default, writable through
+ PEM()_CFG_WR. */
+ uint32_t pmsv : 3; /**< [ 18: 16](RO/WRSL) Power management specification version, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pme_clock : 1; /**< [ 19: 19](RO) PME clock, hardwired to 0. */
+ uint32_t reserved_20 : 1;
+ uint32_t dsi : 1; /**< [ 21: 21](RO/WRSL) Device specific initialization (DSI), writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t auxc : 3; /**< [ 24: 22](RO/WRSL) AUX current, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t d1s : 1; /**< [ 25: 25](RO/WRSL) D1 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t d2s : 1; /**< [ 26: 26](RO/WRSL) D2 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pmes : 5; /**< [ 31: 27](RO/WRSL/H) PME_Support. A value of 0x0 for any bit indicates that the device (or function) is not
+ capable of generating PME messages while in that power state:
+
+ _ Bit 11: If set, PME Messages can be generated from D0.
+
+ _ Bit 12: If set, PME Messages can be generated from D1.
+
+ _ Bit 13: If set, PME Messages can be generated from D2.
+
+ _ Bit 14: If set, PME Messages can be generated from D3hot.
+
+ _ Bit 15: If set, PME Messages can be generated from D3cold.
+
+ The PME_Support field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg016_s cn81xx; */
+ struct bdk_pciercx_cfg016_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pmes : 5; /**< [ 31: 27](RO/WRSL/H) PME_Support. A value of 0x0 for any bit indicates that the device (or function) is not
+ capable of generating PME messages while in that power state:
+
+ _ Bit 11: If set, PME Messages can be generated from D0.
+
+ _ Bit 12: If set, PME Messages can be generated from D1.
+
+ _ Bit 13: If set, PME Messages can be generated from D2.
+
+ _ Bit 14: If set, PME Messages can be generated from D3hot.
+
+ _ Bit 15: If set, PME Messages can be generated from D3cold.
+
+ The PME_Support field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t d2s : 1; /**< [ 26: 26](RO/WRSL) D2 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t d1s : 1; /**< [ 25: 25](RO/WRSL) D1 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t auxc : 3; /**< [ 24: 22](RO/WRSL) AUX current, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t dsi : 1; /**< [ 21: 21](RO/WRSL) Device specific initialization (DSI), writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t pme_clock : 1; /**< [ 19: 19](RO) PME clock, hardwired to 0. */
+ uint32_t pmsv : 3; /**< [ 18: 16](RO/WRSL) Power management specification version, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the MSI capabilities by default, writable through
+ PEM()_CFG_WR. */
+ uint32_t pmcid : 8; /**< [ 7: 0](RO) Power management capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pmcid : 8; /**< [ 7: 0](RO) Power management capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the MSI capabilities by default, writable through
+ PEM()_CFG_WR. */
+ uint32_t pmsv : 3; /**< [ 18: 16](RO/WRSL) Power management specification version, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pme_clock : 1; /**< [ 19: 19](RO) PME clock, hardwired to 0. */
+ uint32_t reserved_20 : 1;
+ uint32_t dsi : 1; /**< [ 21: 21](RO/WRSL) Device specific initialization (DSI), writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t auxc : 3; /**< [ 24: 22](RO/WRSL) AUX current, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t d1s : 1; /**< [ 25: 25](RO/WRSL) D1 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t d2s : 1; /**< [ 26: 26](RO/WRSL) D2 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pmes : 5; /**< [ 31: 27](RO/WRSL/H) PME_Support. A value of 0x0 for any bit indicates that the device (or function) is not
+ capable of generating PME messages while in that power state:
+
+ _ Bit 11: If set, PME Messages can be generated from D0.
+
+ _ Bit 12: If set, PME Messages can be generated from D1.
+
+ _ Bit 13: If set, PME Messages can be generated from D2.
+
+ _ Bit 14: If set, PME Messages can be generated from D3hot.
+
+ _ Bit 15: If set, PME Messages can be generated from D3cold.
+
+ The PME_Support field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pciercx_cfg016_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pmes : 5; /**< [ 31: 27](RO/WRSL/H) PME_Support. A value of 0x0 for any bit indicates that the device (or function) is not
+ capable of generating PME messages while in that power state:
+
+ _ Bit 11: If set, PME Messages can be generated from D0.
+
+ _ Bit 12: If set, PME Messages can be generated from D1.
+
+ _ Bit 13: If set, PME Messages can be generated from D2.
+
+ _ Bit 14: If set, PME Messages can be generated from D3hot.
+
+ _ Bit 15: If set, PME Messages can be generated from D3cold.
+
+ The PME_Support field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t d2s : 1; /**< [ 26: 26](RO/WRSL) D2 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t d1s : 1; /**< [ 25: 25](RO/WRSL) D1 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t auxc : 3; /**< [ 24: 22](RO/WRSL) AUX current, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t dsi : 1; /**< [ 21: 21](RO/WRSL) Device specific initialization (DSI), writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t pme_clock : 1; /**< [ 19: 19](RO) PME clock, hardwired to 0. */
+ uint32_t pmsv : 3; /**< [ 18: 16](RO/WRSL) Power management specification version, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the EA capabilities by default, writable
+ through PEM()_CFG_WR. */
+ uint32_t pmcid : 8; /**< [ 7: 0](RO) Power management capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pmcid : 8; /**< [ 7: 0](RO) Power management capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the EA capabilities by default, writable
+ through PEM()_CFG_WR. */
+ uint32_t pmsv : 3; /**< [ 18: 16](RO/WRSL) Power management specification version, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pme_clock : 1; /**< [ 19: 19](RO) PME clock, hardwired to 0. */
+ uint32_t reserved_20 : 1;
+ uint32_t dsi : 1; /**< [ 21: 21](RO/WRSL) Device specific initialization (DSI), writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t auxc : 3; /**< [ 24: 22](RO/WRSL) AUX current, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t d1s : 1; /**< [ 25: 25](RO/WRSL) D1 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t d2s : 1; /**< [ 26: 26](RO/WRSL) D2 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pmes : 5; /**< [ 31: 27](RO/WRSL/H) PME_Support. A value of 0x0 for any bit indicates that the device (or function) is not
+ capable of generating PME messages while in that power state:
+
+ _ Bit 11: If set, PME Messages can be generated from D0.
+
+ _ Bit 12: If set, PME Messages can be generated from D1.
+
+ _ Bit 13: If set, PME Messages can be generated from D2.
+
+ _ Bit 14: If set, PME Messages can be generated from D3hot.
+
+ _ Bit 15: If set, PME Messages can be generated from D3cold.
+
+ The PME_Support field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg016 bdk_pciercx_cfg016_t;
+
+static inline uint64_t BDK_PCIERCX_CFG016(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG016(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000040ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000040ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000040ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG016", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG016(a) bdk_pciercx_cfg016_t
+#define bustype_BDK_PCIERCX_CFG016(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG016(a) "PCIERCX_CFG016"
+#define busnum_BDK_PCIERCX_CFG016(a) (a)
+#define arguments_BDK_PCIERCX_CFG016(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg017
+ *
+ * PCIe RC Power Management Control and Status Register
+ * This register contains the eighteenth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg017
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg017_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pmdia : 8; /**< [ 31: 24](RO) Data register for additional information (not supported). */
+ uint32_t bpccee : 1; /**< [ 23: 23](RO) Bus power/clock control enable, hardwired to 0. */
+ uint32_t bd3h : 1; /**< [ 22: 22](RO) B2/B3 support, hardwired to 0. */
+ uint32_t reserved_16_21 : 6;
+ uint32_t pmess : 1; /**< [ 15: 15](R/W1C/H) PME status. Indicates whether or not a previously enabled PME event occurred. */
+ uint32_t pmedsia : 2; /**< [ 14: 13](RO) Data scale (not supported). */
+ uint32_t pmds : 4; /**< [ 12: 9](RO) Data select (not supported). */
+ uint32_t pmeens : 1; /**< [ 8: 8](R/W) PME enable. A value of 1 indicates that the device is enabled to generate PME. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t nsr : 1; /**< [ 3: 3](RO/WRSL) No soft reset, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t reserved_2 : 1;
+ uint32_t ps : 2; /**< [ 1: 0](R/W/H) Power state. Controls the device power state:
+ 0x0 = D0.
+ 0x1 = D1.
+ 0x2 = D2.
+ 0x3 = D3.
+
+ The written value is ignored if the specific state is not supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t ps : 2; /**< [ 1: 0](R/W/H) Power state. Controls the device power state:
+ 0x0 = D0.
+ 0x1 = D1.
+ 0x2 = D2.
+ 0x3 = D3.
+
+ The written value is ignored if the specific state is not supported. */
+ uint32_t reserved_2 : 1;
+ uint32_t nsr : 1; /**< [ 3: 3](RO/WRSL) No soft reset, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t pmeens : 1; /**< [ 8: 8](R/W) PME enable. A value of 1 indicates that the device is enabled to generate PME. */
+ uint32_t pmds : 4; /**< [ 12: 9](RO) Data select (not supported). */
+ uint32_t pmedsia : 2; /**< [ 14: 13](RO) Data scale (not supported). */
+ uint32_t pmess : 1; /**< [ 15: 15](R/W1C/H) PME status. Indicates whether or not a previously enabled PME event occurred. */
+ uint32_t reserved_16_21 : 6;
+ uint32_t bd3h : 1; /**< [ 22: 22](RO) B2/B3 support, hardwired to 0. */
+ uint32_t bpccee : 1; /**< [ 23: 23](RO) Bus power/clock control enable, hardwired to 0. */
+ uint32_t pmdia : 8; /**< [ 31: 24](RO) Data register for additional information (not supported). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg017_s cn; */
+};
+typedef union bdk_pciercx_cfg017 bdk_pciercx_cfg017_t;
+
+static inline uint64_t BDK_PCIERCX_CFG017(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG017(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000044ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000044ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000044ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG017", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG017(a) bdk_pciercx_cfg017_t
+#define bustype_BDK_PCIERCX_CFG017(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG017(a) "PCIERCX_CFG017"
+#define busnum_BDK_PCIERCX_CFG017(a) (a)
+#define arguments_BDK_PCIERCX_CFG017(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg020
+ *
+ * PCIe RC MSI Capability ID/MSI Next Item Pointer/MSI Control Register
+ * This register contains the twenty-first 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg020
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg020_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the PCIe capabilities list by default, writable through
+ PEM()_CFG_WR. */
+ uint32_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_7 : 8;
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the PCIe capabilities list by default, writable through
+ PEM()_CFG_WR. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg020_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ea_rsvd : 10; /**< [ 31: 22](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t num_entries : 6; /**< [ 21: 16](RO/WRSL) Number of entries following the first DW of the capability.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the PCIe capabilities list by default, writable through
+ PEM()_CFG_WR. */
+ uint32_t eacid : 8; /**< [ 7: 0](RO/WRSL) Enhanced allocation capability ID.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+#else /* Word 0 - Little Endian */
+ uint32_t eacid : 8; /**< [ 7: 0](RO/WRSL) Enhanced allocation capability ID.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the PCIe capabilities list by default, writable through
+ PEM()_CFG_WR. */
+ uint32_t num_entries : 6; /**< [ 21: 16](RO/WRSL) Number of entries following the first DW of the capability.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ea_rsvd : 10; /**< [ 31: 22](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg020_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t pvms : 1; /**< [ 24: 24](RO) Per-vector masking capable. */
+ uint32_t m64 : 1; /**< [ 23: 23](RO/WRSL) 64-bit address capable, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t mme : 3; /**< [ 22: 20](R/W) Multiple message enabled. Indicates that multiple message mode is enabled by system
+ software. The number of messages enabled must be less than or equal to the multiple
+ message capable (MMC) value. */
+ uint32_t mmc : 3; /**< [ 19: 17](RO/WRSL) Multiple message capable, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t msien : 1; /**< [ 16: 16](R/W) MSI enabled. When set, INTx must be disabled. This bit must never be set, as internal-MSI
+ is not supported in RC mode. (Note that this has no effect on external MSI, which is
+ commonly used in RC mode.) */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to PCI Express capabilities by default, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t msicid : 8; /**< [ 7: 0](RO) MSI capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t msicid : 8; /**< [ 7: 0](RO) MSI capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to PCI Express capabilities by default, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t msien : 1; /**< [ 16: 16](R/W) MSI enabled. When set, INTx must be disabled. This bit must never be set, as internal-MSI
+ is not supported in RC mode. (Note that this has no effect on external MSI, which is
+ commonly used in RC mode.) */
+ uint32_t mmc : 3; /**< [ 19: 17](RO/WRSL) Multiple message capable, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t mme : 3; /**< [ 22: 20](R/W) Multiple message enabled. Indicates that multiple message mode is enabled by system
+ software. The number of messages enabled must be less than or equal to the multiple
+ message capable (MMC) value. */
+ uint32_t m64 : 1; /**< [ 23: 23](RO/WRSL) 64-bit address capable, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t pvms : 1; /**< [ 24: 24](RO) Per-vector masking capable. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pciercx_cfg020_cn81xx cn83xx; */
+};
+typedef union bdk_pciercx_cfg020 bdk_pciercx_cfg020_t;
+
+static inline uint64_t BDK_PCIERCX_CFG020(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG020(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000050ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000050ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000050ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG020", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG020(a) bdk_pciercx_cfg020_t
+#define bustype_BDK_PCIERCX_CFG020(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG020(a) "PCIERCX_CFG020"
+#define busnum_BDK_PCIERCX_CFG020(a) (a)
+#define arguments_BDK_PCIERCX_CFG020(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg021
+ *
+ * PCIe RC MSI Lower 32 Bits Address Register
+ * This register contains the twenty-second 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg021
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg021_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg021_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ea_rsvd : 16; /**< [ 31: 16](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t fixed_subnum : 8; /**< [ 15: 8](RO/WRSL) Fixed subordinate bus number.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t fixed_secnum : 8; /**< [ 7: 0](RO/WRSL) Fixed secondary bus number.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+#else /* Word 0 - Little Endian */
+ uint32_t fixed_secnum : 8; /**< [ 7: 0](RO/WRSL) Fixed secondary bus number.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t fixed_subnum : 8; /**< [ 15: 8](RO/WRSL) Fixed subordinate bus number.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ea_rsvd : 16; /**< [ 31: 16](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg021_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lmsi : 30; /**< [ 31: 2](R/W) Lower 32-bit address. */
+ uint32_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_1 : 2;
+ uint32_t lmsi : 30; /**< [ 31: 2](R/W) Lower 32-bit address. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pciercx_cfg021_cn81xx cn83xx; */
+};
+typedef union bdk_pciercx_cfg021 bdk_pciercx_cfg021_t;
+
+static inline uint64_t BDK_PCIERCX_CFG021(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG021(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000054ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000054ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000054ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG021", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG021(a) bdk_pciercx_cfg021_t
+#define bustype_BDK_PCIERCX_CFG021(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG021(a) "PCIERCX_CFG021"
+#define busnum_BDK_PCIERCX_CFG021(a) (a)
+#define arguments_BDK_PCIERCX_CFG021(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg022
+ *
+ * PCIe RC MSI Upper 32 Bits Address Register
+ * This register contains the twenty-third 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg022
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg022_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg022_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ena : 1; /**< [ 31: 31](RO/WRSL) Enable for this entry. This field is writable through PEM()_CFG_WR. However, the
+ application must
+ not change this field. */
+ uint32_t wr : 1; /**< [ 30: 30](RO/WRSL) Writable. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t ea_rsvd_1 : 6; /**< [ 29: 24](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t sprop : 8; /**< [ 23: 16](RO/WRSL) Secondary properties.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t pprop : 8; /**< [ 15: 8](RO/WRSL) Primary properties.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t bei : 4; /**< [ 7: 4](RO/WRSL) Bar equivalent indicator.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ea_rsvd_0 : 1; /**< [ 3: 3](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t esize : 3; /**< [ 2: 0](RO/WRSL) Entry size - the number of DW following the initial DW in this entry.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+#else /* Word 0 - Little Endian */
+ uint32_t esize : 3; /**< [ 2: 0](RO/WRSL) Entry size - the number of DW following the initial DW in this entry.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ea_rsvd_0 : 1; /**< [ 3: 3](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t bei : 4; /**< [ 7: 4](RO/WRSL) Bar equivalent indicator.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t pprop : 8; /**< [ 15: 8](RO/WRSL) Primary properties.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t sprop : 8; /**< [ 23: 16](RO/WRSL) Secondary properties.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ea_rsvd_1 : 6; /**< [ 29: 24](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t wr : 1; /**< [ 30: 30](RO/WRSL) Writable. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t ena : 1; /**< [ 31: 31](RO/WRSL) Enable for this entry. This field is writable through PEM()_CFG_WR. However, the
+ application must
+ not change this field. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg022_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t umsi : 32; /**< [ 31: 0](R/W) Upper 32-bit address. */
+#else /* Word 0 - Little Endian */
+ uint32_t umsi : 32; /**< [ 31: 0](R/W) Upper 32-bit address. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pciercx_cfg022_cn81xx cn83xx; */
+};
+typedef union bdk_pciercx_cfg022 bdk_pciercx_cfg022_t;
+
+static inline uint64_t BDK_PCIERCX_CFG022(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG022(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000058ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000058ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000058ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG022", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG022(a) bdk_pciercx_cfg022_t
+#define bustype_BDK_PCIERCX_CFG022(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG022(a) "PCIERCX_CFG022"
+#define busnum_BDK_PCIERCX_CFG022(a) (a)
+#define arguments_BDK_PCIERCX_CFG022(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg023
+ *
+ * PCIe RC MSI Data Register
+ * This register contains the twenty-fourth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg023
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg023_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg023_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbase : 30; /**< [ 31: 2](RO/WRSL) Lower base. The value is determined by taking the lower 32-bits of PEMRC's BAR4 address
+ (PEMRC()_BAR_E::PEMRC()_PF_BAR4) and right-shifting by two bits. This field is writable
+ through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t size : 1; /**< [ 1: 1](RO/WRSL) Size - 64-bit (1), 32-bit (0). This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t ea_rsvd : 1; /**< [ 0: 0](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t ea_rsvd : 1; /**< [ 0: 0](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t size : 1; /**< [ 1: 1](RO/WRSL) Size - 64-bit (1), 32-bit (0). This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t lbase : 30; /**< [ 31: 2](RO/WRSL) Lower base. The value is determined by taking the lower 32-bits of PEMRC's BAR4 address
+ (PEMRC()_BAR_E::PEMRC()_PF_BAR4) and right-shifting by two bits. This field is writable
+ through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg023_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t msimd : 16; /**< [ 15: 0](R/W) MSI data. Pattern assigned by system software. Bits [4:0] are ORed with MSI_VECTOR to
+ generate 32 MSI messages per function. */
+#else /* Word 0 - Little Endian */
+ uint32_t msimd : 16; /**< [ 15: 0](R/W) MSI data. Pattern assigned by system software. Bits [4:0] are ORed with MSI_VECTOR to
+ generate 32 MSI messages per function. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pciercx_cfg023_cn81xx cn83xx; */
+};
+typedef union bdk_pciercx_cfg023 bdk_pciercx_cfg023_t;
+
+static inline uint64_t BDK_PCIERCX_CFG023(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG023(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000005cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000005cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000005cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG023", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG023(a) bdk_pciercx_cfg023_t
+#define bustype_BDK_PCIERCX_CFG023(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG023(a) "PCIERCX_CFG023"
+#define busnum_BDK_PCIERCX_CFG023(a) (a)
+#define arguments_BDK_PCIERCX_CFG023(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg024
+ *
+ * PCIe RC Enhanced Allocation Entry 0 Max Offset Register
+ * This register contains the twenty-fifth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg024
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg024_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t moffs : 30; /**< [ 31: 2](RO/WRSL) Lower base. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+
+ Internal:
+ This is the offset to cover PEMRC BAR4 0xfffff & 0xffffc \>\>2 */
+ uint32_t size : 1; /**< [ 1: 1](RO/WRSL) Size - 64-bit (1), 32-bit (0). This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t ea_rsvd : 1; /**< [ 0: 0](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t ea_rsvd : 1; /**< [ 0: 0](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t size : 1; /**< [ 1: 1](RO/WRSL) Size - 64-bit (1), 32-bit (0). This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t moffs : 30; /**< [ 31: 2](RO/WRSL) Lower base. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+
+ Internal:
+ This is the offset to cover PEMRC BAR4 0xfffff & 0xffffc \>\>2 */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg024_s cn; */
+};
+typedef union bdk_pciercx_cfg024 bdk_pciercx_cfg024_t;
+
+static inline uint64_t BDK_PCIERCX_CFG024(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG024(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000060ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000060ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG024", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG024(a) bdk_pciercx_cfg024_t
+#define bustype_BDK_PCIERCX_CFG024(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG024(a) "PCIERCX_CFG024"
+#define busnum_BDK_PCIERCX_CFG024(a) (a)
+#define arguments_BDK_PCIERCX_CFG024(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg025
+ *
+ * PCIe RC Enhanced Allocation Entry 0 Upper Base Register
+ * This register contains the twenty-sixth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg025
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg025_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubase : 32; /**< [ 31: 0](RO/WRSL) Upper base. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+
+ Internal:
+ This is the upper 32 bits of PEM_BAR_E::PEM()_PF_BAR0 */
+#else /* Word 0 - Little Endian */
+ uint32_t ubase : 32; /**< [ 31: 0](RO/WRSL) Upper base. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+
+ Internal:
+ This is the upper 32 bits of PEM_BAR_E::PEM()_PF_BAR0 */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg025_s cn; */
+};
+typedef union bdk_pciercx_cfg025 bdk_pciercx_cfg025_t;
+
+static inline uint64_t BDK_PCIERCX_CFG025(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG025(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000064ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000064ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG025", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG025(a) bdk_pciercx_cfg025_t
+#define bustype_BDK_PCIERCX_CFG025(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG025(a) "PCIERCX_CFG025"
+#define busnum_BDK_PCIERCX_CFG025(a) (a)
+#define arguments_BDK_PCIERCX_CFG025(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg028
+ *
+ * PCIe RC PCIe Capabilities/PCIe Capabilities List Register
+ * This register contains the twenty-ninth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg028
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg028_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t imn : 5; /**< [ 29: 25](RO/WRSL) Interrupt message number. Updated by hardware, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t si : 1; /**< [ 24: 24](RO/WRSL) Slot implemented. This bit is writable through PEM()_CFG_WR. */
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Device port type. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCI Express capability version. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the MSI-X capability by default, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCI Express capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCI Express capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the MSI-X capability by default, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCI Express capability version. */
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Device port type. */
+ uint32_t si : 1; /**< [ 24: 24](RO/WRSL) Slot implemented. This bit is writable through PEM()_CFG_WR. */
+ uint32_t imn : 5; /**< [ 29: 25](RO/WRSL) Interrupt message number. Updated by hardware, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg028_s cn; */
+};
+typedef union bdk_pciercx_cfg028 bdk_pciercx_cfg028_t;
+
+static inline uint64_t BDK_PCIERCX_CFG028(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG028(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000070ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000070ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000070ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG028", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG028(a) bdk_pciercx_cfg028_t
+#define bustype_BDK_PCIERCX_CFG028(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG028(a) "PCIERCX_CFG028"
+#define busnum_BDK_PCIERCX_CFG028(a) (a)
+#define arguments_BDK_PCIERCX_CFG028(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg029
+ *
+ * PCIe RC Device Capabilities Register
+ * This register contains the thirtieth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg029
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg029_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t flr_cap : 1; /**< [ 28: 28](RO) Function level reset capability. This bit applies to endpoints only. */
+ uint32_t cspls : 2; /**< [ 27: 26](RO) Captured slot power limit scale. Not applicable for RC port, upstream port only */
+ uint32_t csplv : 8; /**< [ 25: 18](RO) Captured slot power limit value. Not applicable for RC port, upstream port only. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t rber : 1; /**< [ 15: 15](RO/WRSL) Role-based error reporting, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t el1al : 3; /**< [ 11: 9](RO) Endpoint L1 acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t el0al : 3; /**< [ 8: 6](RO) Endpoint L0s acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t etfs : 1; /**< [ 5: 5](RO/WRSL) Extended tag field supported. This bit is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pfs : 2; /**< [ 4: 3](RO/WRSL) Phantom function supported. This field is writable through
+ PEM()_CFG_WR. However, phantom function is not supported. Therefore, the application
+ must not write any value other than 0x0 to this field. */
+ uint32_t mpss : 3; /**< [ 2: 0](RO/WRSL) Max_Payload_Size supported, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t mpss : 3; /**< [ 2: 0](RO/WRSL) Max_Payload_Size supported, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t pfs : 2; /**< [ 4: 3](RO/WRSL) Phantom function supported. This field is writable through
+ PEM()_CFG_WR. However, phantom function is not supported. Therefore, the application
+ must not write any value other than 0x0 to this field. */
+ uint32_t etfs : 1; /**< [ 5: 5](RO/WRSL) Extended tag field supported. This bit is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t el0al : 3; /**< [ 8: 6](RO) Endpoint L0s acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t el1al : 3; /**< [ 11: 9](RO) Endpoint L1 acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t rber : 1; /**< [ 15: 15](RO/WRSL) Role-based error reporting, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t csplv : 8; /**< [ 25: 18](RO) Captured slot power limit value. Not applicable for RC port, upstream port only. */
+ uint32_t cspls : 2; /**< [ 27: 26](RO) Captured slot power limit scale. Not applicable for RC port, upstream port only */
+ uint32_t flr_cap : 1; /**< [ 28: 28](RO) Function level reset capability. This bit applies to endpoints only. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg029_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t cspls : 2; /**< [ 27: 26](RO) Captured slot power limit scale. Not applicable for RC port, upstream port only */
+ uint32_t csplv : 8; /**< [ 25: 18](RO) Captured slot power limit value. Not applicable for RC port, upstream port only. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t rber : 1; /**< [ 15: 15](RO/WRSL) Role-based error reporting, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t reserved_14 : 1;
+ uint32_t reserved_13 : 1;
+ uint32_t reserved_12 : 1;
+ uint32_t el1al : 3; /**< [ 11: 9](RO) Endpoint L1 acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t el0al : 3; /**< [ 8: 6](RO) Endpoint L0s acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t etfs : 1; /**< [ 5: 5](RO/WRSL) Extended tag field supported. This bit is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pfs : 2; /**< [ 4: 3](RO/WRSL) Phantom function supported. This field is writable through
+ PEM()_CFG_WR. However, phantom function is not supported. Therefore, the application
+ must not write any value other than 0x0 to this field. */
+ uint32_t mpss : 3; /**< [ 2: 0](RO/WRSL) Max_Payload_Size supported, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t mpss : 3; /**< [ 2: 0](RO/WRSL) Max_Payload_Size supported, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t pfs : 2; /**< [ 4: 3](RO/WRSL) Phantom function supported. This field is writable through
+ PEM()_CFG_WR. However, phantom function is not supported. Therefore, the application
+ must not write any value other than 0x0 to this field. */
+ uint32_t etfs : 1; /**< [ 5: 5](RO/WRSL) Extended tag field supported. This bit is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t el0al : 3; /**< [ 8: 6](RO) Endpoint L0s acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t el1al : 3; /**< [ 11: 9](RO) Endpoint L1 acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_13 : 1;
+ uint32_t reserved_14 : 1;
+ uint32_t rber : 1; /**< [ 15: 15](RO/WRSL) Role-based error reporting, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t csplv : 8; /**< [ 25: 18](RO) Captured slot power limit value. Not applicable for RC port, upstream port only. */
+ uint32_t cspls : 2; /**< [ 27: 26](RO) Captured slot power limit scale. Not applicable for RC port, upstream port only */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pciercx_cfg029_cn81xx cn88xx; */
+ struct bdk_pciercx_cfg029_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t flr_cap : 1; /**< [ 28: 28](RO) Function level reset capability. This bit applies to endpoints only. */
+ uint32_t cspls : 2; /**< [ 27: 26](RO) Captured slot power limit scale. Not applicable for RC port, upstream port only */
+ uint32_t csplv : 8; /**< [ 25: 18](RO) Captured slot power limit value. Not applicable for RC port, upstream port only. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t rber : 1; /**< [ 15: 15](RO/WRSL) Role-based error reporting, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t reserved_14 : 1;
+ uint32_t reserved_13 : 1;
+ uint32_t reserved_12 : 1;
+ uint32_t el1al : 3; /**< [ 11: 9](RO) Endpoint L1 acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t el0al : 3; /**< [ 8: 6](RO) Endpoint L0s acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t etfs : 1; /**< [ 5: 5](RO/WRSL) Extended tag field supported. This bit is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pfs : 2; /**< [ 4: 3](RO/WRSL) Phantom function supported. This field is writable through
+ PEM()_CFG_WR. However, phantom function is not supported. Therefore, the application
+ must not write any value other than 0x0 to this field. */
+ uint32_t mpss : 3; /**< [ 2: 0](RO/WRSL) Max_Payload_Size supported, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t mpss : 3; /**< [ 2: 0](RO/WRSL) Max_Payload_Size supported, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t pfs : 2; /**< [ 4: 3](RO/WRSL) Phantom function supported. This field is writable through
+ PEM()_CFG_WR. However, phantom function is not supported. Therefore, the application
+ must not write any value other than 0x0 to this field. */
+ uint32_t etfs : 1; /**< [ 5: 5](RO/WRSL) Extended tag field supported. This bit is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t el0al : 3; /**< [ 8: 6](RO) Endpoint L0s acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t el1al : 3; /**< [ 11: 9](RO) Endpoint L1 acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_13 : 1;
+ uint32_t reserved_14 : 1;
+ uint32_t rber : 1; /**< [ 15: 15](RO/WRSL) Role-based error reporting, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t csplv : 8; /**< [ 25: 18](RO) Captured slot power limit value. Not applicable for RC port, upstream port only. */
+ uint32_t cspls : 2; /**< [ 27: 26](RO) Captured slot power limit scale. Not applicable for RC port, upstream port only */
+ uint32_t flr_cap : 1; /**< [ 28: 28](RO) Function level reset capability. This bit applies to endpoints only. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg029 bdk_pciercx_cfg029_t;
+
+static inline uint64_t BDK_PCIERCX_CFG029(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG029(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000074ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000074ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000074ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG029", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG029(a) bdk_pciercx_cfg029_t
+#define bustype_BDK_PCIERCX_CFG029(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG029(a) "PCIERCX_CFG029"
+#define busnum_BDK_PCIERCX_CFG029(a) (a)
+#define arguments_BDK_PCIERCX_CFG029(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg030
+ *
+ * PCIe RC Device Control/Device Status Register
+ * This register contains the thirty-first 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg030
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg030_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t tp : 1; /**< [ 21: 21](RO) Transaction pending. Hard-wired to 0. */
+ uint32_t ap_d : 1; /**< [ 20: 20](RO) AUX power detected. Set to 1 if AUX power detected. */
+ uint32_t ur_d : 1; /**< [ 19: 19](R/W1C/H) Unsupported request detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. UR_D occurs when we receive
+ something unsupported. Unsupported requests are nonfatal errors, so UR_D should cause
+ NFE_D. Receiving a vendor-defined message should cause an unsupported request. */
+ uint32_t fe_d : 1; /**< [ 18: 18](R/W1C/H) Fatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC()_CFG066 that has a severity set to fatal. Malformed TLPs
+ generally fit into this category. */
+ uint32_t nfe_d : 1; /**< [ 17: 17](R/W1C/H) Nonfatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC()_CFG066 that has a severity set to Nonfatal and does NOT
+ meet Advisory Nonfatal criteria, which most poisoned TLPs should. */
+ uint32_t ce_d : 1; /**< [ 16: 16](R/W1C/H) Correctable error detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. This field is set if we
+ receive any of the errors in PCIERC()_CFG068, for example, a replay timer timeout.
+ Also, it can be set if we get any of the errors in PCIERC()_CFG066 that has a severity
+ set to nonfatal and meets the advisory nonfatal criteria, which most ECRC errors should. */
+ uint32_t reserved_15 : 1;
+ uint32_t mrrs : 3; /**< [ 14: 12](R/W) Max read request size.
+ 0x0 =128 bytes.
+ 0x1 = 256 bytes.
+ 0x2 = 512 bytes.
+ 0x3 = 1024 bytes.
+ 0x4 = 2048 bytes.
+ 0x5 = 4096 bytes. */
+ uint32_t ns_en : 1; /**< [ 11: 11](R/W) Enable no snoop. */
+ uint32_t ap_en : 1; /**< [ 10: 10](R/W/H) AUX power PM enable. */
+ uint32_t pf_en : 1; /**< [ 9: 9](R/W/H) Phantom function enable. This bit should never be set; CNXXXX requests never uses phantom
+ functions. */
+ uint32_t etf_en : 1; /**< [ 8: 8](R/W) Extended tag field enable. Set this bit to enable extended tags. */
+ uint32_t mps : 3; /**< [ 7: 5](R/W) Max payload size. Legal values: 0x0 = 128 B, 0x1 = 256 B.
+ Larger sizes are not supported.
+ The payload size is the actual number of double-words transferred as indicated
+ in the TLP length field and does not take byte enables into account. */
+ uint32_t ro_en : 1; /**< [ 4: 4](R/W) Enable relaxed ordering. */
+ uint32_t ur_en : 1; /**< [ 3: 3](R/W) Unsupported request reporting enable. */
+ uint32_t fe_en : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. */
+ uint32_t nfe_en : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. */
+ uint32_t ce_en : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t ce_en : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. */
+ uint32_t nfe_en : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. */
+ uint32_t fe_en : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. */
+ uint32_t ur_en : 1; /**< [ 3: 3](R/W) Unsupported request reporting enable. */
+ uint32_t ro_en : 1; /**< [ 4: 4](R/W) Enable relaxed ordering. */
+ uint32_t mps : 3; /**< [ 7: 5](R/W) Max payload size. Legal values: 0x0 = 128 B, 0x1 = 256 B.
+ Larger sizes are not supported.
+ The payload size is the actual number of double-words transferred as indicated
+ in the TLP length field and does not take byte enables into account. */
+ uint32_t etf_en : 1; /**< [ 8: 8](R/W) Extended tag field enable. Set this bit to enable extended tags. */
+ uint32_t pf_en : 1; /**< [ 9: 9](R/W/H) Phantom function enable. This bit should never be set; CNXXXX requests never uses phantom
+ functions. */
+ uint32_t ap_en : 1; /**< [ 10: 10](R/W/H) AUX power PM enable. */
+ uint32_t ns_en : 1; /**< [ 11: 11](R/W) Enable no snoop. */
+ uint32_t mrrs : 3; /**< [ 14: 12](R/W) Max read request size.
+ 0x0 =128 bytes.
+ 0x1 = 256 bytes.
+ 0x2 = 512 bytes.
+ 0x3 = 1024 bytes.
+ 0x4 = 2048 bytes.
+ 0x5 = 4096 bytes. */
+ uint32_t reserved_15 : 1;
+ uint32_t ce_d : 1; /**< [ 16: 16](R/W1C/H) Correctable error detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. This field is set if we
+ receive any of the errors in PCIERC()_CFG068, for example, a replay timer timeout.
+ Also, it can be set if we get any of the errors in PCIERC()_CFG066 that has a severity
+ set to nonfatal and meets the advisory nonfatal criteria, which most ECRC errors should. */
+ uint32_t nfe_d : 1; /**< [ 17: 17](R/W1C/H) Nonfatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC()_CFG066 that has a severity set to Nonfatal and does NOT
+ meet Advisory Nonfatal criteria, which most poisoned TLPs should. */
+ uint32_t fe_d : 1; /**< [ 18: 18](R/W1C/H) Fatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC()_CFG066 that has a severity set to fatal. Malformed TLPs
+ generally fit into this category. */
+ uint32_t ur_d : 1; /**< [ 19: 19](R/W1C/H) Unsupported request detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. UR_D occurs when we receive
+ something unsupported. Unsupported requests are nonfatal errors, so UR_D should cause
+ NFE_D. Receiving a vendor-defined message should cause an unsupported request. */
+ uint32_t ap_d : 1; /**< [ 20: 20](RO) AUX power detected. Set to 1 if AUX power detected. */
+ uint32_t tp : 1; /**< [ 21: 21](RO) Transaction pending. Hard-wired to 0. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg030_s cn81xx; */
+ /* struct bdk_pciercx_cfg030_s cn88xx; */
+ struct bdk_pciercx_cfg030_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t tp : 1; /**< [ 21: 21](RO) Transaction pending. Hard-wired to 0. */
+ uint32_t ap_d : 1; /**< [ 20: 20](RO) AUX power detected. Set to 1 if AUX power detected. */
+ uint32_t ur_d : 1; /**< [ 19: 19](R/W1C/H) Unsupported request detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. UR_D occurs when we receive
+ something unsupported. Unsupported requests are nonfatal errors, so UR_D should cause
+ NFE_D. Receiving a vendor-defined message should cause an unsupported request. */
+ uint32_t fe_d : 1; /**< [ 18: 18](R/W1C/H) Fatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC()_CFG066 that has a severity set to fatal. Malformed TLPs
+ generally fit into this category. */
+ uint32_t nfe_d : 1; /**< [ 17: 17](R/W1C/H) Nonfatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC()_CFG066 that has a severity set to Nonfatal and does NOT
+ meet Advisory Nonfatal criteria, which most poisoned TLPs should. */
+ uint32_t ce_d : 1; /**< [ 16: 16](R/W1C/H) Correctable error detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. This field is set if we
+ receive any of the errors in PCIERC()_CFG068, for example, a replay timer timeout.
+ Also, it can be set if we get any of the errors in PCIERC()_CFG066 that has a severity
+ set to nonfatal and meets the advisory nonfatal criteria, which most ECRC errors should. */
+ uint32_t reserved_15 : 1;
+ uint32_t mrrs : 3; /**< [ 14: 12](R/W) Max read request size.
+ 0x0 =128 bytes.
+ 0x1 = 256 bytes.
+ 0x2 = 512 bytes.
+ 0x3 = 1024 bytes.
+ 0x4 = 2048 bytes.
+ 0x5 = 4096 bytes.
+
+ DPI_SLI_PRT()_CFG[MRRS] must be set and properly must not exceed the desired
+ max read request size. */
+ uint32_t ns_en : 1; /**< [ 11: 11](R/W) Enable no snoop. */
+ uint32_t ap_en : 1; /**< [ 10: 10](RO) AUX power PM enable (Not supported). */
+ uint32_t pf_en : 1; /**< [ 9: 9](R/W/H) Phantom function enable. This bit should never be set; CNXXXX requests never uses phantom
+ functions. */
+ uint32_t etf_en : 1; /**< [ 8: 8](R/W) Extended tag field enable. Set this bit to enable extended tags. */
+ uint32_t mps : 3; /**< [ 7: 5](R/W) Max payload size. Legal values:
+ 0x0 = 128 bytes.
+ 0x1 = 256 bytes.
+ 0x2 = 512 bytes.
+ 0x3 = 1024 bytes.
+ Larger sizes are not supported by CNXXXX.
+
+ DPI_SLI_PRT()_CFG[MPS] must be set to the same value as this field for proper
+ functionality. */
+ uint32_t ro_en : 1; /**< [ 4: 4](R/W) Enable relaxed ordering. */
+ uint32_t ur_en : 1; /**< [ 3: 3](R/W) Unsupported request reporting enable. */
+ uint32_t fe_en : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. */
+ uint32_t nfe_en : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. */
+ uint32_t ce_en : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t ce_en : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. */
+ uint32_t nfe_en : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. */
+ uint32_t fe_en : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. */
+ uint32_t ur_en : 1; /**< [ 3: 3](R/W) Unsupported request reporting enable. */
+ uint32_t ro_en : 1; /**< [ 4: 4](R/W) Enable relaxed ordering. */
+ uint32_t mps : 3; /**< [ 7: 5](R/W) Max payload size. Legal values:
+ 0x0 = 128 bytes.
+ 0x1 = 256 bytes.
+ 0x2 = 512 bytes.
+ 0x3 = 1024 bytes.
+ Larger sizes are not supported by CNXXXX.
+
+ DPI_SLI_PRT()_CFG[MPS] must be set to the same value as this field for proper
+ functionality. */
+ uint32_t etf_en : 1; /**< [ 8: 8](R/W) Extended tag field enable. Set this bit to enable extended tags. */
+ uint32_t pf_en : 1; /**< [ 9: 9](R/W/H) Phantom function enable. This bit should never be set; CNXXXX requests never uses phantom
+ functions. */
+ uint32_t ap_en : 1; /**< [ 10: 10](RO) AUX power PM enable (Not supported). */
+ uint32_t ns_en : 1; /**< [ 11: 11](R/W) Enable no snoop. */
+ uint32_t mrrs : 3; /**< [ 14: 12](R/W) Max read request size.
+ 0x0 =128 bytes.
+ 0x1 = 256 bytes.
+ 0x2 = 512 bytes.
+ 0x3 = 1024 bytes.
+ 0x4 = 2048 bytes.
+ 0x5 = 4096 bytes.
+
+ DPI_SLI_PRT()_CFG[MRRS] must be set and properly must not exceed the desired
+ max read request size. */
+ uint32_t reserved_15 : 1;
+ uint32_t ce_d : 1; /**< [ 16: 16](R/W1C/H) Correctable error detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. This field is set if we
+ receive any of the errors in PCIERC()_CFG068, for example, a replay timer timeout.
+ Also, it can be set if we get any of the errors in PCIERC()_CFG066 that has a severity
+ set to nonfatal and meets the advisory nonfatal criteria, which most ECRC errors should. */
+ uint32_t nfe_d : 1; /**< [ 17: 17](R/W1C/H) Nonfatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC()_CFG066 that has a severity set to Nonfatal and does NOT
+ meet Advisory Nonfatal criteria, which most poisoned TLPs should. */
+ uint32_t fe_d : 1; /**< [ 18: 18](R/W1C/H) Fatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC()_CFG066 that has a severity set to fatal. Malformed TLPs
+ generally fit into this category. */
+ uint32_t ur_d : 1; /**< [ 19: 19](R/W1C/H) Unsupported request detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. UR_D occurs when we receive
+ something unsupported. Unsupported requests are nonfatal errors, so UR_D should cause
+ NFE_D. Receiving a vendor-defined message should cause an unsupported request. */
+ uint32_t ap_d : 1; /**< [ 20: 20](RO) AUX power detected. Set to 1 if AUX power detected. */
+ uint32_t tp : 1; /**< [ 21: 21](RO) Transaction pending. Hard-wired to 0. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg030 bdk_pciercx_cfg030_t;
+
+static inline uint64_t BDK_PCIERCX_CFG030(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG030(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000078ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000078ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000078ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG030", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG030(a) bdk_pciercx_cfg030_t
+#define bustype_BDK_PCIERCX_CFG030(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG030(a) "PCIERCX_CFG030"
+#define busnum_BDK_PCIERCX_CFG030(a) (a)
+#define arguments_BDK_PCIERCX_CFG030(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg031
+ *
+ * PCIe RC Link Capabilities Register
+ * This register contains the thirty-second 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg031
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg031_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pnum : 8; /**< [ 31: 24](RO/WRSL) Port number, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t aspm : 1; /**< [ 22: 22](RO/WRSL) ASPM optionality compliance. */
+ uint32_t lbnc : 1; /**< [ 21: 21](RO/WRSL) Link bandwidth notification capability. */
+ uint32_t dllarc : 1; /**< [ 20: 20](RO) Data link layer active reporting capable. Set to 1 for root complex devices and 0 for
+ endpoint devices. */
+ uint32_t sderc : 1; /**< [ 19: 19](RO) Surprise down error reporting capable. Not supported; hardwired to 0. */
+ uint32_t cpm : 1; /**< [ 18: 18](RO) Clock power management. The default value is the value that software specifies during
+ hardware configuration, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t l1el : 3; /**< [ 17: 15](RO/WRSL) L1 exit latency. The default value is the value that software specifies during hardware
+ configuration, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t l0el : 3; /**< [ 14: 12](RO/WRSL) L0s exit latency. The default value is the value that software specifies during hardware
+ configuration, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t aslpms : 2; /**< [ 11: 10](RO/WRSL) Active state link PM support. The default value is the value that software specifies
+ during hardware configuration, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t mlw : 6; /**< [ 9: 4](RO/WRSL/H) Maximum link width.
+ The reset value of this field is determined by the value read from
+ PEM()_CFG[LANES8]. If LANES8 is set the reset value is 0x4, otherwise 0x8.
+
+ This field is writable through PEM()_CFG_WR. */
+ uint32_t mls : 4; /**< [ 3: 0](RO/WRSL) Maximum link speed. The reset value of this field is controlled by the value read from
+ PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode).
+
+ This field is writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t mls : 4; /**< [ 3: 0](RO/WRSL) Maximum link speed. The reset value of this field is controlled by the value read from
+ PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode).
+
+ This field is writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t mlw : 6; /**< [ 9: 4](RO/WRSL/H) Maximum link width.
+ The reset value of this field is determined by the value read from
+ PEM()_CFG[LANES8]. If LANES8 is set the reset value is 0x4, otherwise 0x8.
+
+ This field is writable through PEM()_CFG_WR. */
+ uint32_t aslpms : 2; /**< [ 11: 10](RO/WRSL) Active state link PM support. The default value is the value that software specifies
+ during hardware configuration, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t l0el : 3; /**< [ 14: 12](RO/WRSL) L0s exit latency. The default value is the value that software specifies during hardware
+ configuration, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t l1el : 3; /**< [ 17: 15](RO/WRSL) L1 exit latency. The default value is the value that software specifies during hardware
+ configuration, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t cpm : 1; /**< [ 18: 18](RO) Clock power management. The default value is the value that software specifies during
+ hardware configuration, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t sderc : 1; /**< [ 19: 19](RO) Surprise down error reporting capable. Not supported; hardwired to 0. */
+ uint32_t dllarc : 1; /**< [ 20: 20](RO) Data link layer active reporting capable. Set to 1 for root complex devices and 0 for
+ endpoint devices. */
+ uint32_t lbnc : 1; /**< [ 21: 21](RO/WRSL) Link bandwidth notification capability. */
+ uint32_t aspm : 1; /**< [ 22: 22](RO/WRSL) ASPM optionality compliance. */
+ uint32_t reserved_23 : 1;
+ uint32_t pnum : 8; /**< [ 31: 24](RO/WRSL) Port number, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg031_s cn81xx; */
+ /* struct bdk_pciercx_cfg031_s cn88xx; */
+ struct bdk_pciercx_cfg031_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pnum : 8; /**< [ 31: 24](RO/WRSL) Port number, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t aspm : 1; /**< [ 22: 22](RO/WRSL) ASPM optionality compliance. */
+ uint32_t lbnc : 1; /**< [ 21: 21](RO/WRSL) Link bandwidth notification capability. */
+ uint32_t dllarc : 1; /**< [ 20: 20](RO) Data link layer active reporting capable. Set to 1 for root complex devices and 0 for
+ endpoint devices. */
+ uint32_t sderc : 1; /**< [ 19: 19](RO/WRSL) Surprise down error reporting capable. Set to 1 for root complex devices and 0 for
+ endpoint devices. */
+ uint32_t cpm : 1; /**< [ 18: 18](RO) Clock power management. The default value is the value that software specifies during
+ hardware configuration, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t l1el : 3; /**< [ 17: 15](RO/WRSL) L1 exit latency. The default value is the value that software specifies during hardware
+ configuration, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t l0el : 3; /**< [ 14: 12](RO/WRSL) L0s exit latency. The default value is the value that software specifies during hardware
+ configuration, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t aslpms : 2; /**< [ 11: 10](RO/WRSL) Active state link PM support. The default value is the value that software specifies
+ during hardware configuration, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t mlw : 6; /**< [ 9: 4](RO/WRSL/H) Maximum link width.
+ The reset value of this field is determined by the value read from
+ PEM()_CFG[LANES8]. If LANES8 is set the reset value is 0x8, otherwise 0x4.
+
+ This field is writable through PEM()_CFG_WR. */
+ uint32_t mls : 4; /**< [ 3: 0](RO/WRSL) Maximum link speed. The reset value of this field is controlled by the value read from
+ PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode).
+
+ This field is writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t mls : 4; /**< [ 3: 0](RO/WRSL) Maximum link speed. The reset value of this field is controlled by the value read from
+ PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode).
+
+ This field is writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t mlw : 6; /**< [ 9: 4](RO/WRSL/H) Maximum link width.
+ The reset value of this field is determined by the value read from
+ PEM()_CFG[LANES8]. If LANES8 is set the reset value is 0x8, otherwise 0x4.
+
+ This field is writable through PEM()_CFG_WR. */
+ uint32_t aslpms : 2; /**< [ 11: 10](RO/WRSL) Active state link PM support. The default value is the value that software specifies
+ during hardware configuration, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t l0el : 3; /**< [ 14: 12](RO/WRSL) L0s exit latency. The default value is the value that software specifies during hardware
+ configuration, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t l1el : 3; /**< [ 17: 15](RO/WRSL) L1 exit latency. The default value is the value that software specifies during hardware
+ configuration, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t cpm : 1; /**< [ 18: 18](RO) Clock power management. The default value is the value that software specifies during
+ hardware configuration, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t sderc : 1; /**< [ 19: 19](RO/WRSL) Surprise down error reporting capable. Set to 1 for root complex devices and 0 for
+ endpoint devices. */
+ uint32_t dllarc : 1; /**< [ 20: 20](RO) Data link layer active reporting capable. Set to 1 for root complex devices and 0 for
+ endpoint devices. */
+ uint32_t lbnc : 1; /**< [ 21: 21](RO/WRSL) Link bandwidth notification capability. */
+ uint32_t aspm : 1; /**< [ 22: 22](RO/WRSL) ASPM optionality compliance. */
+ uint32_t reserved_23 : 1;
+ uint32_t pnum : 8; /**< [ 31: 24](RO/WRSL) Port number, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg031 bdk_pciercx_cfg031_t;
+
+static inline uint64_t BDK_PCIERCX_CFG031(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG031(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000007cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000007cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000007cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG031", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG031(a) bdk_pciercx_cfg031_t
+#define bustype_BDK_PCIERCX_CFG031(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG031(a) "PCIERCX_CFG031"
+#define busnum_BDK_PCIERCX_CFG031(a) (a)
+#define arguments_BDK_PCIERCX_CFG031(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg032
+ *
+ * PCIe RC Link Control/Link Status Register
+ * This register contains the thirty-third 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg032
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg032_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lab : 1; /**< [ 31: 31](R/W1C/H) Link autonomous bandwidth status. This bit is set to indicate that hardware has
+ autonomously changed link speed or width, without the port transitioning through DL_Down
+ status, for reasons other than to attempt to correct unreliable link operation. */
+ uint32_t lbm : 1; /**< [ 30: 30](R/W1C/H) Link bandwidth management status. This bit is set to indicate either of the following has
+ occurred without the port transitioning through DL_DOWN status:
+
+ * A link retraining has completed following a write of 1b to the retrain link bit.
+
+ * Hardware has changed the Link speed or width to attempt to correct unreliable link
+ operation, either through a LTSSM timeout of higher level process. This bit must be set if
+ the physical layer reports a speed or width change was initiated by the downstream
+ component that was not indicated as an autonomous change. */
+ uint32_t dlla : 1; /**< [ 29: 29](RO/H) Data link layer active. */
+ uint32_t scc : 1; /**< [ 28: 28](RO/WRSL) Slot clock configuration. Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector. The default value is the value you
+ select during hardware configuration, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t lt : 1; /**< [ 27: 27](RO/H) Link training. */
+ uint32_t reserved_26 : 1;
+ uint32_t nlw : 6; /**< [ 25: 20](RO/H) Negotiated link width. Set automatically by hardware after link initialization. Value is
+ undefined when link is not up. */
+ uint32_t ls : 4; /**< [ 19: 16](RO/H) Current link speed. The encoded value specifies a bit location in the supported link
+ speeds vector (in the link capabilities 2 register) that corresponds to the current link
+ speed.
+ 0x1 = Supported link speeds vector field bit 0.
+ 0x2 = Supported link speeds vector field bit 1.
+ 0x3 = Supported link speeds vector field bit 2. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lab_int_enb : 1; /**< [ 11: 11](R/W) Link autonomous bandwidth interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link autonomous bandwidth status bit has been set. */
+ uint32_t lbm_int_enb : 1; /**< [ 10: 10](R/W) Link bandwidth management interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link bandwidth management status bit has been set. */
+ uint32_t hawd : 1; /**< [ 9: 9](RO) Hardware autonomous width disable (not supported). */
+ uint32_t ecpm : 1; /**< [ 8: 8](R/W/H) Enable clock power management. Hardwired to 0 if clock power management is disabled in the
+ link capabilities register. */
+ uint32_t es : 1; /**< [ 7: 7](R/W) Extended synch. */
+ uint32_t ccc : 1; /**< [ 6: 6](R/W) Common clock configuration. */
+ uint32_t rl : 1; /**< [ 5: 5](R/W/H) Retrain link.
+ As per the PCIe specification this bit always reads as zero. */
+ uint32_t ld : 1; /**< [ 4: 4](R/W) Link disable. */
+ uint32_t rcb : 1; /**< [ 3: 3](RO/WRSL) Read completion boundary (RCB), writable through
+ PEM()_CFG_WR. However, the application must not change this field because an RCB of 64
+ bytes is not supported. */
+ uint32_t reserved_2 : 1;
+ uint32_t aslpc : 2; /**< [ 1: 0](R/W) Active state link PM control. */
+#else /* Word 0 - Little Endian */
+ uint32_t aslpc : 2; /**< [ 1: 0](R/W) Active state link PM control. */
+ uint32_t reserved_2 : 1;
+ uint32_t rcb : 1; /**< [ 3: 3](RO/WRSL) Read completion boundary (RCB), writable through
+ PEM()_CFG_WR. However, the application must not change this field because an RCB of 64
+ bytes is not supported. */
+ uint32_t ld : 1; /**< [ 4: 4](R/W) Link disable. */
+ uint32_t rl : 1; /**< [ 5: 5](R/W/H) Retrain link.
+ As per the PCIe specification this bit always reads as zero. */
+ uint32_t ccc : 1; /**< [ 6: 6](R/W) Common clock configuration. */
+ uint32_t es : 1; /**< [ 7: 7](R/W) Extended synch. */
+ uint32_t ecpm : 1; /**< [ 8: 8](R/W/H) Enable clock power management. Hardwired to 0 if clock power management is disabled in the
+ link capabilities register. */
+ uint32_t hawd : 1; /**< [ 9: 9](RO) Hardware autonomous width disable (not supported). */
+ uint32_t lbm_int_enb : 1; /**< [ 10: 10](R/W) Link bandwidth management interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link bandwidth management status bit has been set. */
+ uint32_t lab_int_enb : 1; /**< [ 11: 11](R/W) Link autonomous bandwidth interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link autonomous bandwidth status bit has been set. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t ls : 4; /**< [ 19: 16](RO/H) Current link speed. The encoded value specifies a bit location in the supported link
+ speeds vector (in the link capabilities 2 register) that corresponds to the current link
+ speed.
+ 0x1 = Supported link speeds vector field bit 0.
+ 0x2 = Supported link speeds vector field bit 1.
+ 0x3 = Supported link speeds vector field bit 2. */
+ uint32_t nlw : 6; /**< [ 25: 20](RO/H) Negotiated link width. Set automatically by hardware after link initialization. Value is
+ undefined when link is not up. */
+ uint32_t reserved_26 : 1;
+ uint32_t lt : 1; /**< [ 27: 27](RO/H) Link training. */
+ uint32_t scc : 1; /**< [ 28: 28](RO/WRSL) Slot clock configuration. Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector. The default value is the value you
+ select during hardware configuration, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t dlla : 1; /**< [ 29: 29](RO/H) Data link layer active. */
+ uint32_t lbm : 1; /**< [ 30: 30](R/W1C/H) Link bandwidth management status. This bit is set to indicate either of the following has
+ occurred without the port transitioning through DL_DOWN status:
+
+ * A link retraining has completed following a write of 1b to the retrain link bit.
+
+ * Hardware has changed the Link speed or width to attempt to correct unreliable link
+ operation, either through a LTSSM timeout of higher level process. This bit must be set if
+ the physical layer reports a speed or width change was initiated by the downstream
+ component that was not indicated as an autonomous change. */
+ uint32_t lab : 1; /**< [ 31: 31](R/W1C/H) Link autonomous bandwidth status. This bit is set to indicate that hardware has
+ autonomously changed link speed or width, without the port transitioning through DL_Down
+ status, for reasons other than to attempt to correct unreliable link operation. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg032_s cn81xx; */
+ struct bdk_pciercx_cfg032_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lab : 1; /**< [ 31: 31](R/W1C) Link autonomous bandwidth status. This bit is set to indicate that hardware has
+ autonomously changed link speed or width, without the port transitioning through DL_Down
+ status, for reasons other than to attempt to correct unreliable link operation. */
+ uint32_t lbm : 1; /**< [ 30: 30](R/W1C) Link bandwidth management status. This bit is set to indicate either of the following has
+ occurred without the port transitioning through DL_DOWN status:
+
+ * A link retraining has completed following a write of 1b to the retrain link bit.
+
+ * Hardware has changed the Link speed or width to attempt to correct unreliable link
+ operation, either through a LTSSM timeout of higher level process. This bit must be set if
+ the physical layer reports a speed or width change was initiated by the downstream
+ component that was not indicated as an autonomous change. */
+ uint32_t dlla : 1; /**< [ 29: 29](RO/H) Data link layer active. */
+ uint32_t scc : 1; /**< [ 28: 28](RO/WRSL) Slot clock configuration. Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector. The default value is the value you
+ select during hardware configuration, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t lt : 1; /**< [ 27: 27](RO/H) Link training. */
+ uint32_t reserved_26 : 1;
+ uint32_t nlw : 6; /**< [ 25: 20](RO/H) Negotiated link width. Set automatically by hardware after link initialization. Value is
+ undefined when link is not up. */
+ uint32_t ls : 4; /**< [ 19: 16](RO/H) Current link speed. The encoded value specifies a bit location in the supported link
+ speeds vector (in the link capabilities 2 register) that corresponds to the current link
+ speed.
+ 0x1 = Supported link speeds vector field bit 0.
+ 0x2 = Supported link speeds vector field bit 1.
+ 0x3 = Supported link speeds vector field bit 2. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lab_int_enb : 1; /**< [ 11: 11](R/W) Link autonomous bandwidth interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link autonomous bandwidth status bit has been set. */
+ uint32_t lbm_int_enb : 1; /**< [ 10: 10](R/W) Link bandwidth management interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link bandwidth management status bit has been set. */
+ uint32_t hawd : 1; /**< [ 9: 9](RO) Hardware autonomous width disable (not supported). */
+ uint32_t ecpm : 1; /**< [ 8: 8](R/W/H) Enable clock power management. Hardwired to 0 if clock power management is disabled in the
+ link capabilities register. */
+ uint32_t es : 1; /**< [ 7: 7](R/W) Extended synch. */
+ uint32_t ccc : 1; /**< [ 6: 6](R/W) Common clock configuration. */
+ uint32_t rl : 1; /**< [ 5: 5](WO) Retrain link.
+ As per the PCIe specification this bit always reads as zero. */
+ uint32_t ld : 1; /**< [ 4: 4](R/W) Link disable. */
+ uint32_t rcb : 1; /**< [ 3: 3](RO/WRSL) Read completion boundary (RCB), writable through
+ PEM()_CFG_WR. However, the application must not change this field because an RCB of 64
+ bytes is not supported. */
+ uint32_t reserved_2 : 1;
+ uint32_t aslpc : 2; /**< [ 1: 0](R/W) Active state link PM control. */
+#else /* Word 0 - Little Endian */
+ uint32_t aslpc : 2; /**< [ 1: 0](R/W) Active state link PM control. */
+ uint32_t reserved_2 : 1;
+ uint32_t rcb : 1; /**< [ 3: 3](RO/WRSL) Read completion boundary (RCB), writable through
+ PEM()_CFG_WR. However, the application must not change this field because an RCB of 64
+ bytes is not supported. */
+ uint32_t ld : 1; /**< [ 4: 4](R/W) Link disable. */
+ uint32_t rl : 1; /**< [ 5: 5](WO) Retrain link.
+ As per the PCIe specification this bit always reads as zero. */
+ uint32_t ccc : 1; /**< [ 6: 6](R/W) Common clock configuration. */
+ uint32_t es : 1; /**< [ 7: 7](R/W) Extended synch. */
+ uint32_t ecpm : 1; /**< [ 8: 8](R/W/H) Enable clock power management. Hardwired to 0 if clock power management is disabled in the
+ link capabilities register. */
+ uint32_t hawd : 1; /**< [ 9: 9](RO) Hardware autonomous width disable (not supported). */
+ uint32_t lbm_int_enb : 1; /**< [ 10: 10](R/W) Link bandwidth management interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link bandwidth management status bit has been set. */
+ uint32_t lab_int_enb : 1; /**< [ 11: 11](R/W) Link autonomous bandwidth interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link autonomous bandwidth status bit has been set. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t ls : 4; /**< [ 19: 16](RO/H) Current link speed. The encoded value specifies a bit location in the supported link
+ speeds vector (in the link capabilities 2 register) that corresponds to the current link
+ speed.
+ 0x1 = Supported link speeds vector field bit 0.
+ 0x2 = Supported link speeds vector field bit 1.
+ 0x3 = Supported link speeds vector field bit 2. */
+ uint32_t nlw : 6; /**< [ 25: 20](RO/H) Negotiated link width. Set automatically by hardware after link initialization. Value is
+ undefined when link is not up. */
+ uint32_t reserved_26 : 1;
+ uint32_t lt : 1; /**< [ 27: 27](RO/H) Link training. */
+ uint32_t scc : 1; /**< [ 28: 28](RO/WRSL) Slot clock configuration. Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector. The default value is the value you
+ select during hardware configuration, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t dlla : 1; /**< [ 29: 29](RO/H) Data link layer active. */
+ uint32_t lbm : 1; /**< [ 30: 30](R/W1C) Link bandwidth management status. This bit is set to indicate either of the following has
+ occurred without the port transitioning through DL_DOWN status:
+
+ * A link retraining has completed following a write of 1b to the retrain link bit.
+
+ * Hardware has changed the Link speed or width to attempt to correct unreliable link
+ operation, either through a LTSSM timeout of higher level process. This bit must be set if
+ the physical layer reports a speed or width change was initiated by the downstream
+ component that was not indicated as an autonomous change. */
+ uint32_t lab : 1; /**< [ 31: 31](R/W1C) Link autonomous bandwidth status. This bit is set to indicate that hardware has
+ autonomously changed link speed or width, without the port transitioning through DL_Down
+ status, for reasons other than to attempt to correct unreliable link operation. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pciercx_cfg032_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lab : 1; /**< [ 31: 31](R/W1C/H) Link autonomous bandwidth status. This bit is set to indicate that hardware has
+ autonomously changed link speed or width, without the port transitioning through DL_Down
+ status, for reasons other than to attempt to correct unreliable link operation. */
+ uint32_t lbm : 1; /**< [ 30: 30](R/W1C/H) Link bandwidth management status. This bit is set to indicate either of the following has
+ occurred without the port transitioning through DL_DOWN status:
+
+ * A link retraining has completed following a write of 1b to the retrain link bit.
+
+ * Hardware has changed the Link speed or width to attempt to correct unreliable link
+ operation, either through a LTSSM timeout of higher level process. This bit must be set if
+ the physical layer reports a speed or width change was initiated by the downstream
+ component that was not indicated as an autonomous change. */
+ uint32_t dlla : 1; /**< [ 29: 29](RO/H) Data link layer active. */
+ uint32_t scc : 1; /**< [ 28: 28](RO/WRSL) Slot clock configuration. Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector. The default value is the value you
+ select during hardware configuration, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t lt : 1; /**< [ 27: 27](RO/H) Link training. */
+ uint32_t reserved_26 : 1;
+ uint32_t nlw : 6; /**< [ 25: 20](RO/H) Negotiated link width. Set automatically by hardware after link initialization. Value is
+ undefined when link is not up. */
+ uint32_t ls : 4; /**< [ 19: 16](RO/H) Current link speed. The encoded value specifies a bit location in the supported link
+ speeds vector (in the link capabilities 2 register) that corresponds to the current link
+ speed.
+ 0x1 = Supported link speeds vector field bit 0.
+ 0x2 = Supported link speeds vector field bit 1.
+ 0x3 = Supported link speeds vector field bit 2. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lab_int_enb : 1; /**< [ 11: 11](R/W) Link autonomous bandwidth interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link autonomous bandwidth status bit has been set. */
+ uint32_t lbm_int_enb : 1; /**< [ 10: 10](R/W) Link bandwidth management interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link bandwidth management status bit has been set. */
+ uint32_t hawd : 1; /**< [ 9: 9](R/W) Hardware autonomous width disable (not supported). */
+ uint32_t ecpm : 1; /**< [ 8: 8](R/W/H) Enable clock power management. Hardwired to 0 if clock power management is disabled in the
+ link capabilities register. */
+ uint32_t es : 1; /**< [ 7: 7](R/W) Extended synch. */
+ uint32_t ccc : 1; /**< [ 6: 6](R/W) Common clock configuration. */
+ uint32_t rl : 1; /**< [ 5: 5](R/W/H) Retrain link.
+ As per the PCIe specification this bit always reads as zero. */
+ uint32_t ld : 1; /**< [ 4: 4](R/W) Link disable. */
+ uint32_t rcb : 1; /**< [ 3: 3](RO/WRSL) Read completion boundary (RCB), writable through
+ PEM()_CFG_WR. However, the application must not change this field because an RCB of 64
+ bytes is not supported. */
+ uint32_t reserved_2 : 1;
+ uint32_t aslpc : 2; /**< [ 1: 0](R/W) Active state link PM control. */
+#else /* Word 0 - Little Endian */
+ uint32_t aslpc : 2; /**< [ 1: 0](R/W) Active state link PM control. */
+ uint32_t reserved_2 : 1;
+ uint32_t rcb : 1; /**< [ 3: 3](RO/WRSL) Read completion boundary (RCB), writable through
+ PEM()_CFG_WR. However, the application must not change this field because an RCB of 64
+ bytes is not supported. */
+ uint32_t ld : 1; /**< [ 4: 4](R/W) Link disable. */
+ uint32_t rl : 1; /**< [ 5: 5](R/W/H) Retrain link.
+ As per the PCIe specification this bit always reads as zero. */
+ uint32_t ccc : 1; /**< [ 6: 6](R/W) Common clock configuration. */
+ uint32_t es : 1; /**< [ 7: 7](R/W) Extended synch. */
+ uint32_t ecpm : 1; /**< [ 8: 8](R/W/H) Enable clock power management. Hardwired to 0 if clock power management is disabled in the
+ link capabilities register. */
+ uint32_t hawd : 1; /**< [ 9: 9](R/W) Hardware autonomous width disable (not supported). */
+ uint32_t lbm_int_enb : 1; /**< [ 10: 10](R/W) Link bandwidth management interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link bandwidth management status bit has been set. */
+ uint32_t lab_int_enb : 1; /**< [ 11: 11](R/W) Link autonomous bandwidth interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link autonomous bandwidth status bit has been set. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t ls : 4; /**< [ 19: 16](RO/H) Current link speed. The encoded value specifies a bit location in the supported link
+ speeds vector (in the link capabilities 2 register) that corresponds to the current link
+ speed.
+ 0x1 = Supported link speeds vector field bit 0.
+ 0x2 = Supported link speeds vector field bit 1.
+ 0x3 = Supported link speeds vector field bit 2. */
+ uint32_t nlw : 6; /**< [ 25: 20](RO/H) Negotiated link width. Set automatically by hardware after link initialization. Value is
+ undefined when link is not up. */
+ uint32_t reserved_26 : 1;
+ uint32_t lt : 1; /**< [ 27: 27](RO/H) Link training. */
+ uint32_t scc : 1; /**< [ 28: 28](RO/WRSL) Slot clock configuration. Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector. The default value is the value you
+ select during hardware configuration, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t dlla : 1; /**< [ 29: 29](RO/H) Data link layer active. */
+ uint32_t lbm : 1; /**< [ 30: 30](R/W1C/H) Link bandwidth management status. This bit is set to indicate either of the following has
+ occurred without the port transitioning through DL_DOWN status:
+
+ * A link retraining has completed following a write of 1b to the retrain link bit.
+
+ * Hardware has changed the Link speed or width to attempt to correct unreliable link
+ operation, either through a LTSSM timeout of higher level process. This bit must be set if
+ the physical layer reports a speed or width change was initiated by the downstream
+ component that was not indicated as an autonomous change. */
+ uint32_t lab : 1; /**< [ 31: 31](R/W1C/H) Link autonomous bandwidth status. This bit is set to indicate that hardware has
+ autonomously changed link speed or width, without the port transitioning through DL_Down
+ status, for reasons other than to attempt to correct unreliable link operation. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg032 bdk_pciercx_cfg032_t;
+
+static inline uint64_t BDK_PCIERCX_CFG032(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG032(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000080ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000080ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000080ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG032", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG032(a) bdk_pciercx_cfg032_t
+#define bustype_BDK_PCIERCX_CFG032(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG032(a) "PCIERCX_CFG032"
+#define busnum_BDK_PCIERCX_CFG032(a) (a)
+#define arguments_BDK_PCIERCX_CFG032(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg033
+ *
+ * PCIe RC Slot Capabilities Register
+ * This register contains the thirty-fourth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg033
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg033_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ps_num : 13; /**< [ 31: 19](RO/WRSL) Physical slot number, writable through PEM()_CFG_WR. */
+ uint32_t nccs : 1; /**< [ 18: 18](RO/WRSL) No command complete support, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t emip : 1; /**< [ 17: 17](RO/WRSL) Electromechanical interlock present, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t sp_ls : 2; /**< [ 16: 15](RO/WRSL) Slot power limit scale, writable through PEM()_CFG_WR. */
+ uint32_t sp_lv : 8; /**< [ 14: 7](RO/WRSL) Slot power limit value, writable through PEM()_CFG_WR. */
+ uint32_t hp_c : 1; /**< [ 6: 6](RO/WRSL) Hot plug capable, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t hp_s : 1; /**< [ 5: 5](RO/WRSL) Hot plug surprise, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t pip : 1; /**< [ 4: 4](RO/WRSL) Power indicator present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t aip : 1; /**< [ 3: 3](RO/WRSL) Attention indicator present, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t mrlsp : 1; /**< [ 2: 2](RO/WRSL) MRL sensor present, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t pcp : 1; /**< [ 1: 1](RO/WRSL) Power controller present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t abp : 1; /**< [ 0: 0](RO/WRSL) Attention button present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t abp : 1; /**< [ 0: 0](RO/WRSL) Attention button present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t pcp : 1; /**< [ 1: 1](RO/WRSL) Power controller present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t mrlsp : 1; /**< [ 2: 2](RO/WRSL) MRL sensor present, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t aip : 1; /**< [ 3: 3](RO/WRSL) Attention indicator present, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t pip : 1; /**< [ 4: 4](RO/WRSL) Power indicator present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t hp_s : 1; /**< [ 5: 5](RO/WRSL) Hot plug surprise, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t hp_c : 1; /**< [ 6: 6](RO/WRSL) Hot plug capable, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t sp_lv : 8; /**< [ 14: 7](RO/WRSL) Slot power limit value, writable through PEM()_CFG_WR. */
+ uint32_t sp_ls : 2; /**< [ 16: 15](RO/WRSL) Slot power limit scale, writable through PEM()_CFG_WR. */
+ uint32_t emip : 1; /**< [ 17: 17](RO/WRSL) Electromechanical interlock present, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t nccs : 1; /**< [ 18: 18](RO/WRSL) No command complete support, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t ps_num : 13; /**< [ 31: 19](RO/WRSL) Physical slot number, writable through PEM()_CFG_WR. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg033_s cn; */
+};
+typedef union bdk_pciercx_cfg033 bdk_pciercx_cfg033_t;
+
+static inline uint64_t BDK_PCIERCX_CFG033(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG033(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000084ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000084ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000084ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG033", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG033(a) bdk_pciercx_cfg033_t
+#define bustype_BDK_PCIERCX_CFG033(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG033(a) "PCIERCX_CFG033"
+#define busnum_BDK_PCIERCX_CFG033(a) (a)
+#define arguments_BDK_PCIERCX_CFG033(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg034
+ *
+ * PCIe RC Slot Control/Slot Status Register
+ * This register contains the thirty-fifth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg034
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg034_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t dlls_c : 1; /**< [ 24: 24](R/W1C/H) Data link layer state changed. */
+ uint32_t emis : 1; /**< [ 23: 23](RO) Electromechanical interlock status. */
+ uint32_t pds : 1; /**< [ 22: 22](RO/H) Presence detect state. */
+ uint32_t mrlss : 1; /**< [ 21: 21](RO) MRL sensor state. */
+ uint32_t ccint_d : 1; /**< [ 20: 20](R/W1C/H) Command completed. */
+ uint32_t pd_c : 1; /**< [ 19: 19](R/W1C/H) Presence detect changed. */
+ uint32_t mrls_c : 1; /**< [ 18: 18](R/W1C/H) MRL sensor changed. */
+ uint32_t pf_d : 1; /**< [ 17: 17](R/W1C/H) Power fault detected. */
+ uint32_t abp_d : 1; /**< [ 16: 16](R/W1C/H) Attention button pressed. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t dlls_en : 1; /**< [ 12: 12](R/W) Data link layer state changed enable. */
+ uint32_t emic : 1; /**< [ 11: 11](WO) Electromechanical interlock control. */
+ uint32_t pcc : 1; /**< [ 10: 10](R/W) Power controller control. */
+ uint32_t pic : 2; /**< [ 9: 8](R/W) Power indicator control. */
+ uint32_t aic : 2; /**< [ 7: 6](R/W) Attention indicator control. */
+ uint32_t hpint_en : 1; /**< [ 5: 5](R/W) Hot-plug interrupt enable. */
+ uint32_t ccint_en : 1; /**< [ 4: 4](R/W) Command completed interrupt enable. */
+ uint32_t pd_en : 1; /**< [ 3: 3](R/W) Presence detect changed enable. */
+ uint32_t mrls_en : 1; /**< [ 2: 2](R/W) MRL sensor changed enable. */
+ uint32_t pf_en : 1; /**< [ 1: 1](R/W) Power fault detected enable. */
+ uint32_t abp_en : 1; /**< [ 0: 0](R/W) Attention button pressed enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t abp_en : 1; /**< [ 0: 0](R/W) Attention button pressed enable. */
+ uint32_t pf_en : 1; /**< [ 1: 1](R/W) Power fault detected enable. */
+ uint32_t mrls_en : 1; /**< [ 2: 2](R/W) MRL sensor changed enable. */
+ uint32_t pd_en : 1; /**< [ 3: 3](R/W) Presence detect changed enable. */
+ uint32_t ccint_en : 1; /**< [ 4: 4](R/W) Command completed interrupt enable. */
+ uint32_t hpint_en : 1; /**< [ 5: 5](R/W) Hot-plug interrupt enable. */
+ uint32_t aic : 2; /**< [ 7: 6](R/W) Attention indicator control. */
+ uint32_t pic : 2; /**< [ 9: 8](R/W) Power indicator control. */
+ uint32_t pcc : 1; /**< [ 10: 10](R/W) Power controller control. */
+ uint32_t emic : 1; /**< [ 11: 11](WO) Electromechanical interlock control. */
+ uint32_t dlls_en : 1; /**< [ 12: 12](R/W) Data link layer state changed enable. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t abp_d : 1; /**< [ 16: 16](R/W1C/H) Attention button pressed. */
+ uint32_t pf_d : 1; /**< [ 17: 17](R/W1C/H) Power fault detected. */
+ uint32_t mrls_c : 1; /**< [ 18: 18](R/W1C/H) MRL sensor changed. */
+ uint32_t pd_c : 1; /**< [ 19: 19](R/W1C/H) Presence detect changed. */
+ uint32_t ccint_d : 1; /**< [ 20: 20](R/W1C/H) Command completed. */
+ uint32_t mrlss : 1; /**< [ 21: 21](RO) MRL sensor state. */
+ uint32_t pds : 1; /**< [ 22: 22](RO/H) Presence detect state. */
+ uint32_t emis : 1; /**< [ 23: 23](RO) Electromechanical interlock status. */
+ uint32_t dlls_c : 1; /**< [ 24: 24](R/W1C/H) Data link layer state changed. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg034_s cn81xx; */
+ struct bdk_pciercx_cfg034_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t dlls_c : 1; /**< [ 24: 24](R/W1C/H) Data link layer state changed. */
+ uint32_t emis : 1; /**< [ 23: 23](RO) Electromechanical interlock status. */
+ uint32_t pds : 1; /**< [ 22: 22](RO) Presence detect state. */
+ uint32_t mrlss : 1; /**< [ 21: 21](RO) MRL sensor state. */
+ uint32_t ccint_d : 1; /**< [ 20: 20](R/W1C/H) Command completed. */
+ uint32_t pd_c : 1; /**< [ 19: 19](R/W1C/H) Presence detect changed. */
+ uint32_t mrls_c : 1; /**< [ 18: 18](R/W1C/H) MRL sensor changed. */
+ uint32_t pf_d : 1; /**< [ 17: 17](R/W1C/H) Power fault detected. */
+ uint32_t abp_d : 1; /**< [ 16: 16](R/W1C/H) Attention button pressed. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t dlls_en : 1; /**< [ 12: 12](R/W) Data link layer state changed enable. */
+ uint32_t emic : 1; /**< [ 11: 11](WO) Electromechanical interlock control. */
+ uint32_t pcc : 1; /**< [ 10: 10](R/W) Power controller control. */
+ uint32_t pic : 2; /**< [ 9: 8](R/W) Power indicator control. */
+ uint32_t aic : 2; /**< [ 7: 6](R/W) Attention indicator control. */
+ uint32_t hpint_en : 1; /**< [ 5: 5](R/W) Hot-plug interrupt enable. */
+ uint32_t ccint_en : 1; /**< [ 4: 4](R/W) Command completed interrupt enable. */
+ uint32_t pd_en : 1; /**< [ 3: 3](R/W) Presence detect changed enable. */
+ uint32_t mrls_en : 1; /**< [ 2: 2](R/W) MRL sensor changed enable. */
+ uint32_t pf_en : 1; /**< [ 1: 1](R/W) Power fault detected enable. */
+ uint32_t abp_en : 1; /**< [ 0: 0](R/W) Attention button pressed enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t abp_en : 1; /**< [ 0: 0](R/W) Attention button pressed enable. */
+ uint32_t pf_en : 1; /**< [ 1: 1](R/W) Power fault detected enable. */
+ uint32_t mrls_en : 1; /**< [ 2: 2](R/W) MRL sensor changed enable. */
+ uint32_t pd_en : 1; /**< [ 3: 3](R/W) Presence detect changed enable. */
+ uint32_t ccint_en : 1; /**< [ 4: 4](R/W) Command completed interrupt enable. */
+ uint32_t hpint_en : 1; /**< [ 5: 5](R/W) Hot-plug interrupt enable. */
+ uint32_t aic : 2; /**< [ 7: 6](R/W) Attention indicator control. */
+ uint32_t pic : 2; /**< [ 9: 8](R/W) Power indicator control. */
+ uint32_t pcc : 1; /**< [ 10: 10](R/W) Power controller control. */
+ uint32_t emic : 1; /**< [ 11: 11](WO) Electromechanical interlock control. */
+ uint32_t dlls_en : 1; /**< [ 12: 12](R/W) Data link layer state changed enable. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t abp_d : 1; /**< [ 16: 16](R/W1C/H) Attention button pressed. */
+ uint32_t pf_d : 1; /**< [ 17: 17](R/W1C/H) Power fault detected. */
+ uint32_t mrls_c : 1; /**< [ 18: 18](R/W1C/H) MRL sensor changed. */
+ uint32_t pd_c : 1; /**< [ 19: 19](R/W1C/H) Presence detect changed. */
+ uint32_t ccint_d : 1; /**< [ 20: 20](R/W1C/H) Command completed. */
+ uint32_t mrlss : 1; /**< [ 21: 21](RO) MRL sensor state. */
+ uint32_t pds : 1; /**< [ 22: 22](RO) Presence detect state. */
+ uint32_t emis : 1; /**< [ 23: 23](RO) Electromechanical interlock status. */
+ uint32_t dlls_c : 1; /**< [ 24: 24](R/W1C/H) Data link layer state changed. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pciercx_cfg034_s cn83xx; */
+};
+typedef union bdk_pciercx_cfg034 bdk_pciercx_cfg034_t;
+
+static inline uint64_t BDK_PCIERCX_CFG034(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG034(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000088ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000088ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000088ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG034", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG034(a) bdk_pciercx_cfg034_t
+#define bustype_BDK_PCIERCX_CFG034(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG034(a) "PCIERCX_CFG034"
+#define busnum_BDK_PCIERCX_CFG034(a) (a)
+#define arguments_BDK_PCIERCX_CFG034(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg035
+ *
+ * PCIe RC Root Control/Root Capabilities Register
+ * This register contains the thirty-sixth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg035
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg035_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_17_31 : 15;
+ uint32_t crssv : 1; /**< [ 16: 16](RO) CRS software visibility. Not supported, hardwired to 0. */
+ uint32_t reserved_5_15 : 11;
+ uint32_t crssve : 1; /**< [ 4: 4](RO) CRS software visibility enable. Not supported, hardwired to 0. */
+ uint32_t pmeie : 1; /**< [ 3: 3](R/W) PME interrupt enable. */
+ uint32_t sefee : 1; /**< [ 2: 2](R/W) System error on fatal error enable. */
+ uint32_t senfee : 1; /**< [ 1: 1](R/W) System error on nonfatal error enable. */
+ uint32_t secee : 1; /**< [ 0: 0](R/W) System error on correctable error enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t secee : 1; /**< [ 0: 0](R/W) System error on correctable error enable. */
+ uint32_t senfee : 1; /**< [ 1: 1](R/W) System error on nonfatal error enable. */
+ uint32_t sefee : 1; /**< [ 2: 2](R/W) System error on fatal error enable. */
+ uint32_t pmeie : 1; /**< [ 3: 3](R/W) PME interrupt enable. */
+ uint32_t crssve : 1; /**< [ 4: 4](RO) CRS software visibility enable. Not supported, hardwired to 0. */
+ uint32_t reserved_5_15 : 11;
+ uint32_t crssv : 1; /**< [ 16: 16](RO) CRS software visibility. Not supported, hardwired to 0. */
+ uint32_t reserved_17_31 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg035_s cn; */
+};
+typedef union bdk_pciercx_cfg035 bdk_pciercx_cfg035_t;
+
+static inline uint64_t BDK_PCIERCX_CFG035(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG035(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000008cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000008cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000008cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG035", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG035(a) bdk_pciercx_cfg035_t
+#define bustype_BDK_PCIERCX_CFG035(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG035(a) "PCIERCX_CFG035"
+#define busnum_BDK_PCIERCX_CFG035(a) (a)
+#define arguments_BDK_PCIERCX_CFG035(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg036
+ *
+ * PCIe RC Root Status Register
+ * This register contains the thirty-seventh 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg036
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg036_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_18_31 : 14;
+ uint32_t pme_pend : 1; /**< [ 17: 17](RO) PME pending. */
+ uint32_t pme_stat : 1; /**< [ 16: 16](R/W1C/H) PME status. */
+ uint32_t pme_rid : 16; /**< [ 15: 0](RO) PME requester ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pme_rid : 16; /**< [ 15: 0](RO) PME requester ID. */
+ uint32_t pme_stat : 1; /**< [ 16: 16](R/W1C/H) PME status. */
+ uint32_t pme_pend : 1; /**< [ 17: 17](RO) PME pending. */
+ uint32_t reserved_18_31 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg036_s cn; */
+};
+typedef union bdk_pciercx_cfg036 bdk_pciercx_cfg036_t;
+
+static inline uint64_t BDK_PCIERCX_CFG036(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG036(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000090ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000090ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000090ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG036", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG036(a) bdk_pciercx_cfg036_t
+#define bustype_BDK_PCIERCX_CFG036(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG036(a) "PCIERCX_CFG036"
+#define busnum_BDK_PCIERCX_CFG036(a) (a)
+#define arguments_BDK_PCIERCX_CFG036(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg037
+ *
+ * PCIe RC Device Capabilities 2 Register
+ * This register contains the thirty-eighth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg037
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg037_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t meetp : 2; /**< [ 23: 22](RO) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t eetps : 1; /**< [ 21: 21](RO) End-end TLP prefix supported (not supported). */
+ uint32_t effs : 1; /**< [ 20: 20](RO) Extended fmt field supported (not supported). */
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported (not supported). */
+ uint32_t tag10b_req_supp : 1; /**< [ 17: 17](RO) 10-bit tag requestor supported (not supported). */
+ uint32_t tag10b_cpl_supp : 1; /**< [ 16: 16](RO) 10-bit tag completer supported (not supported). */
+ uint32_t ln_sys_cls : 2; /**< [ 15: 14](RO) LN System CLS (not supported). */
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported (not supported). */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported (not supported). */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported. */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported (not supported). */
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported (not supported). */
+ uint32_t ln_sys_cls : 2; /**< [ 15: 14](RO) LN System CLS (not supported). */
+ uint32_t tag10b_cpl_supp : 1; /**< [ 16: 16](RO) 10-bit tag completer supported (not supported). */
+ uint32_t tag10b_req_supp : 1; /**< [ 17: 17](RO) 10-bit tag requestor supported (not supported). */
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported (not supported). */
+ uint32_t effs : 1; /**< [ 20: 20](RO) Extended fmt field supported (not supported). */
+ uint32_t eetps : 1; /**< [ 21: 21](RO) End-end TLP prefix supported (not supported). */
+ uint32_t meetp : 2; /**< [ 23: 22](RO) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg037_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t meetp : 2; /**< [ 23: 22](RO) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t eetps : 1; /**< [ 21: 21](RO) End-end TLP prefix supported (not supported). */
+ uint32_t effs : 1; /**< [ 20: 20](RO) Extended fmt field supported (not supported). */
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported (not supported). */
+ uint32_t reserved_14_17 : 4;
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported (not supported). */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported (not supported). */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported (not supported). */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported (not supported). */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported (not supported). */
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported (not supported). */
+ uint32_t reserved_14_17 : 4;
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported (not supported). */
+ uint32_t effs : 1; /**< [ 20: 20](RO) Extended fmt field supported (not supported). */
+ uint32_t eetps : 1; /**< [ 21: 21](RO) End-end TLP prefix supported (not supported). */
+ uint32_t meetp : 2; /**< [ 23: 22](RO) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pciercx_cfg037_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t meetp : 2; /**< [ 23: 22](RO) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t eetps : 1; /**< [ 21: 21](RO) End-end TLP prefix supported (not supported). */
+ uint32_t effs : 1; /**< [ 20: 20](RO) Extended fmt field supported (not supported). */
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported (not supported). */
+ uint32_t reserved_14_17 : 4;
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported (not supported). */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported (not supported). */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported. */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported (not supported). */
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported (not supported). */
+ uint32_t reserved_14_17 : 4;
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported (not supported). */
+ uint32_t effs : 1; /**< [ 20: 20](RO) Extended fmt field supported (not supported). */
+ uint32_t eetps : 1; /**< [ 21: 21](RO) End-end TLP prefix supported (not supported). */
+ uint32_t meetp : 2; /**< [ 23: 22](RO) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg037_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t meetp : 2; /**< [ 23: 22](RO) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t eetps : 1; /**< [ 21: 21](RO) End-end TLP prefix supported (not supported). */
+ uint32_t effs : 1; /**< [ 20: 20](RO/WRSL) Extended fmt field supported (not supported). */
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported (not supported). */
+ uint32_t tag10b_req_supp : 1; /**< [ 17: 17](RO) 10-bit tag requestor supported (not supported). */
+ uint32_t tag10b_cpl_supp : 1; /**< [ 16: 16](RO) 10-bit tag completer supported (not supported). */
+ uint32_t ln_sys_cls : 2; /**< [ 15: 14](RO) LN System CLS (not supported). */
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported (not supported). */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported (not supported). */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported (not supported). */
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported (not supported). */
+ uint32_t ln_sys_cls : 2; /**< [ 15: 14](RO) LN System CLS (not supported). */
+ uint32_t tag10b_cpl_supp : 1; /**< [ 16: 16](RO) 10-bit tag completer supported (not supported). */
+ uint32_t tag10b_req_supp : 1; /**< [ 17: 17](RO) 10-bit tag requestor supported (not supported). */
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported (not supported). */
+ uint32_t effs : 1; /**< [ 20: 20](RO/WRSL) Extended fmt field supported (not supported). */
+ uint32_t eetps : 1; /**< [ 21: 21](RO) End-end TLP prefix supported (not supported). */
+ uint32_t meetp : 2; /**< [ 23: 22](RO) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_pciercx_cfg037_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t meetp : 2; /**< [ 23: 22](RO) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t eetps : 1; /**< [ 21: 21](RO) End-end TLP prefix supported (not supported). */
+ uint32_t effs : 1; /**< [ 20: 20](RO/WRSL) Extended fmt field supported. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported (not supported). */
+ uint32_t reserved_14_17 : 4;
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported (not supported). */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported (not supported). */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported. */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported (not supported). */
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported (not supported). */
+ uint32_t reserved_14_17 : 4;
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported (not supported). */
+ uint32_t effs : 1; /**< [ 20: 20](RO/WRSL) Extended fmt field supported. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t eetps : 1; /**< [ 21: 21](RO) End-end TLP prefix supported (not supported). */
+ uint32_t meetp : 2; /**< [ 23: 22](RO) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_pciercx_cfg037 bdk_pciercx_cfg037_t;
+
+static inline uint64_t BDK_PCIERCX_CFG037(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG037(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000094ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000094ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000094ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG037", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG037(a) bdk_pciercx_cfg037_t
+#define bustype_BDK_PCIERCX_CFG037(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG037(a) "PCIERCX_CFG037"
+#define busnum_BDK_PCIERCX_CFG037(a) (a)
+#define arguments_BDK_PCIERCX_CFG037(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg038
+ *
+ * PCIe RC Device Control 2 Register/Device Status 2 Register
+ * This register contains the thirty-ninth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg038
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg038_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t eetpb : 1; /**< [ 15: 15](RO) Unsupported end-end TLP prefix blocking. */
+ uint32_t obffe : 2; /**< [ 14: 13](RO) Optimized buffer flush fill (OBFF) enable (not supported). */
+ uint32_t reserved_12 : 1;
+ uint32_t tag10b_req_en : 1; /**< [ 11: 11](RO) 10-bit tag requestoer enable (not supported). */
+ uint32_t ltre : 1; /**< [ 10: 10](RO) Latency tolerance reporting (LTR) mechanism enable. (not supported). */
+ uint32_t id0_cp : 1; /**< [ 9: 9](RO) ID based ordering completion enable (not supported). */
+ uint32_t id0_rq : 1; /**< [ 8: 8](RO) ID based ordering request enable (not supported). */
+ uint32_t atom_op_eb : 1; /**< [ 7: 7](RO) AtomicOp egress blocking (not supported). */
+ uint32_t atom_op : 1; /**< [ 6: 6](R/W) AtomicOp requester enable. */
+ uint32_t ari : 1; /**< [ 5: 5](R/W) Alternate routing ID forwarding supported. */
+ uint32_t ctd : 1; /**< [ 4: 4](R/W) Completion timeout disable. */
+ uint32_t ctv : 4; /**< [ 3: 0](R/W) Completion timeout value.
+ 0x0 = Default range: 50 us to 50 ms.
+ 0x1 = 50 us to 100 us.
+ 0x2 = 1 ms to 10 ms.
+ 0x5 = 16 ms to 55 ms.
+ 0x6 = 65 ms to 210 ms.
+ 0x9 = 260 ms to 900 ms.
+ 0xA = 1 s to 3.5 s.
+ 0xD = 4 s to 13 s.
+ 0xE = 17 s to 64 s.
+
+ Values not defined are reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctv : 4; /**< [ 3: 0](R/W) Completion timeout value.
+ 0x0 = Default range: 50 us to 50 ms.
+ 0x1 = 50 us to 100 us.
+ 0x2 = 1 ms to 10 ms.
+ 0x5 = 16 ms to 55 ms.
+ 0x6 = 65 ms to 210 ms.
+ 0x9 = 260 ms to 900 ms.
+ 0xA = 1 s to 3.5 s.
+ 0xD = 4 s to 13 s.
+ 0xE = 17 s to 64 s.
+
+ Values not defined are reserved. */
+ uint32_t ctd : 1; /**< [ 4: 4](R/W) Completion timeout disable. */
+ uint32_t ari : 1; /**< [ 5: 5](R/W) Alternate routing ID forwarding supported. */
+ uint32_t atom_op : 1; /**< [ 6: 6](R/W) AtomicOp requester enable. */
+ uint32_t atom_op_eb : 1; /**< [ 7: 7](RO) AtomicOp egress blocking (not supported). */
+ uint32_t id0_rq : 1; /**< [ 8: 8](RO) ID based ordering request enable (not supported). */
+ uint32_t id0_cp : 1; /**< [ 9: 9](RO) ID based ordering completion enable (not supported). */
+ uint32_t ltre : 1; /**< [ 10: 10](RO) Latency tolerance reporting (LTR) mechanism enable. (not supported). */
+ uint32_t tag10b_req_en : 1; /**< [ 11: 11](RO) 10-bit tag requestoer enable (not supported). */
+ uint32_t reserved_12 : 1;
+ uint32_t obffe : 2; /**< [ 14: 13](RO) Optimized buffer flush fill (OBFF) enable (not supported). */
+ uint32_t eetpb : 1; /**< [ 15: 15](RO) Unsupported end-end TLP prefix blocking. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg038_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t eetpb : 1; /**< [ 15: 15](RO) Unsupported end-end TLP prefix blocking. */
+ uint32_t obffe : 2; /**< [ 14: 13](RO) Optimized buffer flush fill (OBFF) enable (not supported). */
+ uint32_t reserved_11_12 : 2;
+ uint32_t ltre : 1; /**< [ 10: 10](RO) Latency tolerance reporting (LTR) mechanism enable. (not supported). */
+ uint32_t id0_cp : 1; /**< [ 9: 9](RO) ID based ordering completion enable (not supported). */
+ uint32_t id0_rq : 1; /**< [ 8: 8](RO) ID based ordering request enable (not supported). */
+ uint32_t atom_op_eb : 1; /**< [ 7: 7](RO) AtomicOp egress blocking (not supported). */
+ uint32_t atom_op : 1; /**< [ 6: 6](R/W) AtomicOp requester enable. */
+ uint32_t ari : 1; /**< [ 5: 5](R/W) Alternate routing ID forwarding supported. */
+ uint32_t ctd : 1; /**< [ 4: 4](R/W) Completion timeout disable. */
+ uint32_t ctv : 4; /**< [ 3: 0](R/W) Completion timeout value.
+ 0x0 = Default range: 50 us to 50 ms.
+ 0x1 = 50 us to 100 us.
+ 0x2 = 1 ms to 10 ms.
+ 0x5 = 16 ms to 55 ms.
+ 0x6 = 65 ms to 210 ms.
+ 0x9 = 260 ms to 900 ms.
+ 0xA = 1 s to 3.5 s.
+ 0xD = 4 s to 13 s.
+ 0xE = 17 s to 64 s.
+
+ Values not defined are reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctv : 4; /**< [ 3: 0](R/W) Completion timeout value.
+ 0x0 = Default range: 50 us to 50 ms.
+ 0x1 = 50 us to 100 us.
+ 0x2 = 1 ms to 10 ms.
+ 0x5 = 16 ms to 55 ms.
+ 0x6 = 65 ms to 210 ms.
+ 0x9 = 260 ms to 900 ms.
+ 0xA = 1 s to 3.5 s.
+ 0xD = 4 s to 13 s.
+ 0xE = 17 s to 64 s.
+
+ Values not defined are reserved. */
+ uint32_t ctd : 1; /**< [ 4: 4](R/W) Completion timeout disable. */
+ uint32_t ari : 1; /**< [ 5: 5](R/W) Alternate routing ID forwarding supported. */
+ uint32_t atom_op : 1; /**< [ 6: 6](R/W) AtomicOp requester enable. */
+ uint32_t atom_op_eb : 1; /**< [ 7: 7](RO) AtomicOp egress blocking (not supported). */
+ uint32_t id0_rq : 1; /**< [ 8: 8](RO) ID based ordering request enable (not supported). */
+ uint32_t id0_cp : 1; /**< [ 9: 9](RO) ID based ordering completion enable (not supported). */
+ uint32_t ltre : 1; /**< [ 10: 10](RO) Latency tolerance reporting (LTR) mechanism enable. (not supported). */
+ uint32_t reserved_11_12 : 2;
+ uint32_t obffe : 2; /**< [ 14: 13](RO) Optimized buffer flush fill (OBFF) enable (not supported). */
+ uint32_t eetpb : 1; /**< [ 15: 15](RO) Unsupported end-end TLP prefix blocking. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg038_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t eetpb : 1; /**< [ 15: 15](RO) Unsupported end-end TLP prefix blocking. */
+ uint32_t obffe : 2; /**< [ 14: 13](RO) Optimized buffer flush fill (OBFF) enable (not supported). */
+ uint32_t reserved_11_12 : 2;
+ uint32_t ltre : 1; /**< [ 10: 10](RO) Latency tolerance reporting (LTR) mechanism enable. (not supported). */
+ uint32_t id0_cp : 1; /**< [ 9: 9](RO) ID based ordering completion enable (not supported). */
+ uint32_t id0_rq : 1; /**< [ 8: 8](RO) ID based ordering request enable (not supported). */
+ uint32_t atom_op_eb : 1; /**< [ 7: 7](RO) AtomicOp egress blocking (not supported). */
+ uint32_t atom_op : 1; /**< [ 6: 6](R/W) AtomicOp requester enable. */
+ uint32_t ari : 1; /**< [ 5: 5](R/W) Alternate routing ID forwarding supported. */
+ uint32_t ctd : 1; /**< [ 4: 4](R/W) Completion timeout disable. */
+ uint32_t ctv : 4; /**< [ 3: 0](RO/H) Completion timeout value.
+ 0x0 = Default range: 16 ms to 55 ms.
+ 0x1 = 50 us to 100 us.
+ 0x2 = 1 ms to 10 ms.
+ 0x3 = 16 ms to 55 ms.
+ 0x6 = 65 ms to 210 ms.
+ 0x9 = 260 ms to 900 ms.
+ 0xA = 1 s to 3.5 s.
+ 0xD = 4 s to 13 s.
+ 0xE = 17 s to 64 s.
+
+ Values not defined are reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctv : 4; /**< [ 3: 0](RO/H) Completion timeout value.
+ 0x0 = Default range: 16 ms to 55 ms.
+ 0x1 = 50 us to 100 us.
+ 0x2 = 1 ms to 10 ms.
+ 0x3 = 16 ms to 55 ms.
+ 0x6 = 65 ms to 210 ms.
+ 0x9 = 260 ms to 900 ms.
+ 0xA = 1 s to 3.5 s.
+ 0xD = 4 s to 13 s.
+ 0xE = 17 s to 64 s.
+
+ Values not defined are reserved. */
+ uint32_t ctd : 1; /**< [ 4: 4](R/W) Completion timeout disable. */
+ uint32_t ari : 1; /**< [ 5: 5](R/W) Alternate routing ID forwarding supported. */
+ uint32_t atom_op : 1; /**< [ 6: 6](R/W) AtomicOp requester enable. */
+ uint32_t atom_op_eb : 1; /**< [ 7: 7](RO) AtomicOp egress blocking (not supported). */
+ uint32_t id0_rq : 1; /**< [ 8: 8](RO) ID based ordering request enable (not supported). */
+ uint32_t id0_cp : 1; /**< [ 9: 9](RO) ID based ordering completion enable (not supported). */
+ uint32_t ltre : 1; /**< [ 10: 10](RO) Latency tolerance reporting (LTR) mechanism enable. (not supported). */
+ uint32_t reserved_11_12 : 2;
+ uint32_t obffe : 2; /**< [ 14: 13](RO) Optimized buffer flush fill (OBFF) enable (not supported). */
+ uint32_t eetpb : 1; /**< [ 15: 15](RO) Unsupported end-end TLP prefix blocking. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pciercx_cfg038_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t eetpb : 1; /**< [ 15: 15](RO) Unsupported end-end TLP prefix blocking. */
+ uint32_t obffe : 2; /**< [ 14: 13](RO) Optimized buffer flush fill (OBFF) enable (not supported). */
+ uint32_t reserved_12 : 1;
+ uint32_t tag10b_req_en : 1; /**< [ 11: 11](RO) 10-bit tag requestoer enable (not supported). */
+ uint32_t ltre : 1; /**< [ 10: 10](RO) Latency tolerance reporting (LTR) mechanism enable. (not supported). */
+ uint32_t id0_cp : 1; /**< [ 9: 9](RO) ID based ordering completion enable (not supported). */
+ uint32_t id0_rq : 1; /**< [ 8: 8](RO) ID based ordering request enable (not supported). */
+ uint32_t atom_op_eb : 1; /**< [ 7: 7](R/W) AtomicOp egress blocking. */
+ uint32_t atom_op : 1; /**< [ 6: 6](R/W) AtomicOp requester enable. */
+ uint32_t ari : 1; /**< [ 5: 5](R/W) Alternate routing ID forwarding supported. */
+ uint32_t ctd : 1; /**< [ 4: 4](R/W) Completion timeout disable. */
+ uint32_t ctv : 4; /**< [ 3: 0](R/W/H) Completion timeout value.
+ 0x0 = Default range: 16 ms to 55 ms.
+ 0x1 = 50 us to 100 us.
+ 0x2 = 1 ms to 10 ms.
+ 0x3 = 16 ms to 55 ms.
+ 0x6 = 65 ms to 210 ms.
+ 0x9 = 260 ms to 900 ms.
+ 0xA = 1 s to 3.5 s.
+ 0xD = 4 s to 13 s.
+ 0xE = 17 s to 64 s.
+
+ Values not defined are reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctv : 4; /**< [ 3: 0](R/W/H) Completion timeout value.
+ 0x0 = Default range: 16 ms to 55 ms.
+ 0x1 = 50 us to 100 us.
+ 0x2 = 1 ms to 10 ms.
+ 0x3 = 16 ms to 55 ms.
+ 0x6 = 65 ms to 210 ms.
+ 0x9 = 260 ms to 900 ms.
+ 0xA = 1 s to 3.5 s.
+ 0xD = 4 s to 13 s.
+ 0xE = 17 s to 64 s.
+
+ Values not defined are reserved. */
+ uint32_t ctd : 1; /**< [ 4: 4](R/W) Completion timeout disable. */
+ uint32_t ari : 1; /**< [ 5: 5](R/W) Alternate routing ID forwarding supported. */
+ uint32_t atom_op : 1; /**< [ 6: 6](R/W) AtomicOp requester enable. */
+ uint32_t atom_op_eb : 1; /**< [ 7: 7](R/W) AtomicOp egress blocking. */
+ uint32_t id0_rq : 1; /**< [ 8: 8](RO) ID based ordering request enable (not supported). */
+ uint32_t id0_cp : 1; /**< [ 9: 9](RO) ID based ordering completion enable (not supported). */
+ uint32_t ltre : 1; /**< [ 10: 10](RO) Latency tolerance reporting (LTR) mechanism enable. (not supported). */
+ uint32_t tag10b_req_en : 1; /**< [ 11: 11](RO) 10-bit tag requestoer enable (not supported). */
+ uint32_t reserved_12 : 1;
+ uint32_t obffe : 2; /**< [ 14: 13](RO) Optimized buffer flush fill (OBFF) enable (not supported). */
+ uint32_t eetpb : 1; /**< [ 15: 15](RO) Unsupported end-end TLP prefix blocking. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg038 bdk_pciercx_cfg038_t;
+
+static inline uint64_t BDK_PCIERCX_CFG038(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG038(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000098ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000098ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000098ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG038", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG038(a) bdk_pciercx_cfg038_t
+#define bustype_BDK_PCIERCX_CFG038(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG038(a) "PCIERCX_CFG038"
+#define busnum_BDK_PCIERCX_CFG038(a) (a)
+#define arguments_BDK_PCIERCX_CFG038(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg039
+ *
+ * PCIe RC Link Capabilities 2 Register
+ * This register contains the fortieth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg039
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg039_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t cls : 1; /**< [ 8: 8](RO) Crosslink supported. */
+ uint32_t slsv : 7; /**< [ 7: 1](RO/WRSL) Supported link speeds vector. Indicates the supported link speeds of the associated port.
+ For each bit, a value of 1 b indicates that the corresponding link speed is supported;
+ otherwise, the link speed is not supported. Bit definitions are:
+
+ _ Bit \<1\> = 2.5 GT/s.
+
+ _ Bit \<2\> = 5.0 GT/s.
+
+ _ Bit \<3\> = 8.0 GT/s.
+
+ _ Bits \<7:4\> are reserved.
+
+ The reset value of this field is controlled by the value read from PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x3: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x7: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x7: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode). */
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t slsv : 7; /**< [ 7: 1](RO/WRSL) Supported link speeds vector. Indicates the supported link speeds of the associated port.
+ For each bit, a value of 1 b indicates that the corresponding link speed is supported;
+ otherwise, the link speed is not supported. Bit definitions are:
+
+ _ Bit \<1\> = 2.5 GT/s.
+
+ _ Bit \<2\> = 5.0 GT/s.
+
+ _ Bit \<3\> = 8.0 GT/s.
+
+ _ Bits \<7:4\> are reserved.
+
+ The reset value of this field is controlled by the value read from PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x3: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x7: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x7: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode). */
+ uint32_t cls : 1; /**< [ 8: 8](RO) Crosslink supported. */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg039_s cn; */
+};
+typedef union bdk_pciercx_cfg039 bdk_pciercx_cfg039_t;
+
+static inline uint64_t BDK_PCIERCX_CFG039(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG039(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000009cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000009cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000009cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG039", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG039(a) bdk_pciercx_cfg039_t
+#define bustype_BDK_PCIERCX_CFG039(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG039(a) "PCIERCX_CFG039"
+#define busnum_BDK_PCIERCX_CFG039(a) (a)
+#define arguments_BDK_PCIERCX_CFG039(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg040
+ *
+ * PCIe RC Link Control 2 Register/Link Status 2 Register
+ * This register contains the forty-first 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg040
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg040_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t rtd : 1; /**< [ 22: 22](RO) Retimer presence detected. */
+ uint32_t ler : 1; /**< [ 21: 21](R/W1C/H) Link equalization request */
+ uint32_t ep3s : 1; /**< [ 20: 20](RO/H) Equalization phase 3 successful */
+ uint32_t ep2s : 1; /**< [ 19: 19](RO/H) Equalization phase 2 successful */
+ uint32_t ep1s : 1; /**< [ 18: 18](RO/H) Equalization phase 1 successful */
+ uint32_t eqc : 1; /**< [ 17: 17](RO/H) Equalization complete */
+ uint32_t cdl : 1; /**< [ 16: 16](RO/H) Current deemphasis level. When the link is operating at 5 GT/s speed, this bit reflects
+ the level of deemphasis. Encodings:
+ 1 = -3.5 dB.
+ 0 = -6 dB.
+
+ The value in this bit is undefined when the link is operating at 2.5 GT/s speed. */
+ uint32_t cde : 4; /**< [ 15: 12](R/W) Compliance deemphasis. This bit sets the deemphasis level in Polling.Compliance state if
+ the entry occurred due to the TX compliance receive bit being 1. Encodings:
+ 0x1 = -3.5 dB.
+ 0x0 = -6 dB.
+
+ When the Link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t csos : 1; /**< [ 11: 11](R/W) Compliance SOS. When set to 1, the LTSSM is required to send SKP ordered sets periodically
+ in between the (modified) compliance patterns.
+ When the link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t emc : 1; /**< [ 10: 10](R/W) Enter modified compliance. When this bit is set to 1, the device transmits a modified
+ compliance pattern if the LTSSM enters Polling.Compliance state. */
+ uint32_t tm : 3; /**< [ 9: 7](R/W/H) Transmit margin. This field controls the value of the non-deemphasized voltage level at
+ the transmitter signals:
+ 0x0 = 800-1200 mV for full swing 400-600 mV for half-swing.
+ 0x1-0x2 = Values must be monotonic with a nonzero slope.
+ 0x3 = 200-400 mV for full-swing and 100-200 mV for half-swing.
+ 0x4-0x7 = Reserved.
+ This field is reset to 0x0 on entry to the LTSSM Polling.Compliance substate. When
+ operating in 5.0 GT/s mode with full swing, the deemphasis ratio must be maintained within
+ +/- 1 dB from the specification-defined operational value either -3.5 or -6 dB. */
+ uint32_t sde : 1; /**< [ 6: 6](RO/WRSL) Selectable deemphasis. When the link is operating at 5.0 GT/s speed, selects the level of
+ deemphasis:
+ 1 = -3.5 dB.
+ 0 = -6 dB.
+
+ When the link is operating at 2.5 GT/s speed, the setting of this bit has no effect. */
+ uint32_t hasd : 1; /**< [ 5: 5](R/W) Hardware autonomous speed disable. When asserted, the application must disable hardware
+ from changing the link speed for device-specific reasons other than attempting to correct
+ unreliable link operation by reducing link speed. Initial transition to the highest
+ supported common link speed is not blocked by this signal. */
+ uint32_t ec : 1; /**< [ 4: 4](R/W) Enter compliance. Software is permitted to force a link to enter compliance mode at the
+ speed indicated in the target link speed field by setting this bit to 1 in both components
+ on a link and then initiating a hot reset on the link. */
+ uint32_t tls : 4; /**< [ 3: 0](R/W) Target link speed. For downstream ports, this field sets an upper limit on link
+ operational speed by restricting the values advertised by the upstream component in its
+ training sequences:
+
+ 0x1 = 2.5 Gb/s target link speed.
+ 0x2 = 5 Gb/s target link speed.
+ 0x3 = 8 Gb/s target link speed.
+
+ All other encodings are reserved.
+
+ If a value is written to this field that does not correspond to a speed included in the
+ supported link speeds field, the result is undefined. For both upstream and downstream
+ ports, this field is used to set the target compliance mode speed when software is using
+ the enter compliance bit to force a link into compliance mode.
+ The reset value of this field is controlled by the value read from PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode). */
+#else /* Word 0 - Little Endian */
+ uint32_t tls : 4; /**< [ 3: 0](R/W) Target link speed. For downstream ports, this field sets an upper limit on link
+ operational speed by restricting the values advertised by the upstream component in its
+ training sequences:
+
+ 0x1 = 2.5 Gb/s target link speed.
+ 0x2 = 5 Gb/s target link speed.
+ 0x3 = 8 Gb/s target link speed.
+
+ All other encodings are reserved.
+
+ If a value is written to this field that does not correspond to a speed included in the
+ supported link speeds field, the result is undefined. For both upstream and downstream
+ ports, this field is used to set the target compliance mode speed when software is using
+ the enter compliance bit to force a link into compliance mode.
+ The reset value of this field is controlled by the value read from PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode). */
+ uint32_t ec : 1; /**< [ 4: 4](R/W) Enter compliance. Software is permitted to force a link to enter compliance mode at the
+ speed indicated in the target link speed field by setting this bit to 1 in both components
+ on a link and then initiating a hot reset on the link. */
+ uint32_t hasd : 1; /**< [ 5: 5](R/W) Hardware autonomous speed disable. When asserted, the application must disable hardware
+ from changing the link speed for device-specific reasons other than attempting to correct
+ unreliable link operation by reducing link speed. Initial transition to the highest
+ supported common link speed is not blocked by this signal. */
+ uint32_t sde : 1; /**< [ 6: 6](RO/WRSL) Selectable deemphasis. When the link is operating at 5.0 GT/s speed, selects the level of
+ deemphasis:
+ 1 = -3.5 dB.
+ 0 = -6 dB.
+
+ When the link is operating at 2.5 GT/s speed, the setting of this bit has no effect. */
+ uint32_t tm : 3; /**< [ 9: 7](R/W/H) Transmit margin. This field controls the value of the non-deemphasized voltage level at
+ the transmitter signals:
+ 0x0 = 800-1200 mV for full swing 400-600 mV for half-swing.
+ 0x1-0x2 = Values must be monotonic with a nonzero slope.
+ 0x3 = 200-400 mV for full-swing and 100-200 mV for half-swing.
+ 0x4-0x7 = Reserved.
+ This field is reset to 0x0 on entry to the LTSSM Polling.Compliance substate. When
+ operating in 5.0 GT/s mode with full swing, the deemphasis ratio must be maintained within
+ +/- 1 dB from the specification-defined operational value either -3.5 or -6 dB. */
+ uint32_t emc : 1; /**< [ 10: 10](R/W) Enter modified compliance. When this bit is set to 1, the device transmits a modified
+ compliance pattern if the LTSSM enters Polling.Compliance state. */
+ uint32_t csos : 1; /**< [ 11: 11](R/W) Compliance SOS. When set to 1, the LTSSM is required to send SKP ordered sets periodically
+ in between the (modified) compliance patterns.
+ When the link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t cde : 4; /**< [ 15: 12](R/W) Compliance deemphasis. This bit sets the deemphasis level in Polling.Compliance state if
+ the entry occurred due to the TX compliance receive bit being 1. Encodings:
+ 0x1 = -3.5 dB.
+ 0x0 = -6 dB.
+
+ When the Link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t cdl : 1; /**< [ 16: 16](RO/H) Current deemphasis level. When the link is operating at 5 GT/s speed, this bit reflects
+ the level of deemphasis. Encodings:
+ 1 = -3.5 dB.
+ 0 = -6 dB.
+
+ The value in this bit is undefined when the link is operating at 2.5 GT/s speed. */
+ uint32_t eqc : 1; /**< [ 17: 17](RO/H) Equalization complete */
+ uint32_t ep1s : 1; /**< [ 18: 18](RO/H) Equalization phase 1 successful */
+ uint32_t ep2s : 1; /**< [ 19: 19](RO/H) Equalization phase 2 successful */
+ uint32_t ep3s : 1; /**< [ 20: 20](RO/H) Equalization phase 3 successful */
+ uint32_t ler : 1; /**< [ 21: 21](R/W1C/H) Link equalization request */
+ uint32_t rtd : 1; /**< [ 22: 22](RO) Retimer presence detected. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg040_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t ler : 1; /**< [ 21: 21](R/W1C/H) Link equalization request */
+ uint32_t ep3s : 1; /**< [ 20: 20](RO/H) Equalization phase 3 successful */
+ uint32_t ep2s : 1; /**< [ 19: 19](RO/H) Equalization phase 2 successful */
+ uint32_t ep1s : 1; /**< [ 18: 18](RO/H) Equalization phase 1 successful */
+ uint32_t eqc : 1; /**< [ 17: 17](RO/H) Equalization complete */
+ uint32_t cdl : 1; /**< [ 16: 16](RO/H) Current deemphasis level. When the link is operating at 5 GT/s speed, this bit reflects
+ the level of deemphasis. Encodings:
+ 1 = -3.5 dB.
+ 0 = -6 dB.
+
+ The value in this bit is undefined when the link is operating at 2.5 GT/s speed. */
+ uint32_t cde : 4; /**< [ 15: 12](R/W) Compliance deemphasis. This bit sets the deemphasis level in Polling.Compliance state if
+ the entry occurred due to the TX compliance receive bit being 1. Encodings:
+ 0x1 = -3.5 dB.
+ 0x0 = -6 dB.
+
+ When the Link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t csos : 1; /**< [ 11: 11](R/W) Compliance SOS. When set to 1, the LTSSM is required to send SKP ordered sets periodically
+ in between the (modified) compliance patterns.
+ When the link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t emc : 1; /**< [ 10: 10](R/W) Enter modified compliance. When this bit is set to 1, the device transmits a modified
+ compliance pattern if the LTSSM enters Polling.Compliance state. */
+ uint32_t tm : 3; /**< [ 9: 7](R/W/H) Transmit margin. This field controls the value of the non-deemphasized voltage level at
+ the transmitter signals:
+ 0x0 = 800-1200 mV for full swing 400-600 mV for half-swing.
+ 0x1-0x2 = Values must be monotonic with a nonzero slope.
+ 0x3 = 200-400 mV for full-swing and 100-200 mV for half-swing.
+ 0x4-0x7 = Reserved.
+ This field is reset to 0x0 on entry to the LTSSM Polling.Compliance substate. When
+ operating in 5.0 GT/s mode with full swing, the deemphasis ratio must be maintained within
+ +/- 1 dB from the specification-defined operational value either -3.5 or -6 dB. */
+ uint32_t sde : 1; /**< [ 6: 6](RO/WRSL) Selectable deemphasis. When the link is operating at 5.0 GT/s speed, selects the level of
+ deemphasis:
+ 1 = -3.5 dB.
+ 0 = -6 dB.
+
+ When the link is operating at 2.5 GT/s speed, the setting of this bit has no effect. */
+ uint32_t hasd : 1; /**< [ 5: 5](R/W) Hardware autonomous speed disable. When asserted, the application must disable hardware
+ from changing the link speed for device-specific reasons other than attempting to correct
+ unreliable link operation by reducing link speed. Initial transition to the highest
+ supported common link speed is not blocked by this signal. */
+ uint32_t ec : 1; /**< [ 4: 4](R/W) Enter compliance. Software is permitted to force a link to enter compliance mode at the
+ speed indicated in the target link speed field by setting this bit to 1 in both components
+ on a link and then initiating a hot reset on the link. */
+ uint32_t tls : 4; /**< [ 3: 0](R/W) Target link speed. For downstream ports, this field sets an upper limit on link
+ operational speed by restricting the values advertised by the upstream component in its
+ training sequences:
+
+ 0x1 = 2.5 Gb/s target link speed.
+ 0x2 = 5 Gb/s target link speed.
+ 0x3 = 8 Gb/s target link speed.
+
+ All other encodings are reserved.
+
+ If a value is written to this field that does not correspond to a speed included in the
+ supported link speeds field, the result is undefined. For both upstream and downstream
+ ports, this field is used to set the target compliance mode speed when software is using
+ the enter compliance bit to force a link into compliance mode.
+ The reset value of this field is controlled by the value read from PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode). */
+#else /* Word 0 - Little Endian */
+ uint32_t tls : 4; /**< [ 3: 0](R/W) Target link speed. For downstream ports, this field sets an upper limit on link
+ operational speed by restricting the values advertised by the upstream component in its
+ training sequences:
+
+ 0x1 = 2.5 Gb/s target link speed.
+ 0x2 = 5 Gb/s target link speed.
+ 0x3 = 8 Gb/s target link speed.
+
+ All other encodings are reserved.
+
+ If a value is written to this field that does not correspond to a speed included in the
+ supported link speeds field, the result is undefined. For both upstream and downstream
+ ports, this field is used to set the target compliance mode speed when software is using
+ the enter compliance bit to force a link into compliance mode.
+ The reset value of this field is controlled by the value read from PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode). */
+ uint32_t ec : 1; /**< [ 4: 4](R/W) Enter compliance. Software is permitted to force a link to enter compliance mode at the
+ speed indicated in the target link speed field by setting this bit to 1 in both components
+ on a link and then initiating a hot reset on the link. */
+ uint32_t hasd : 1; /**< [ 5: 5](R/W) Hardware autonomous speed disable. When asserted, the application must disable hardware
+ from changing the link speed for device-specific reasons other than attempting to correct
+ unreliable link operation by reducing link speed. Initial transition to the highest
+ supported common link speed is not blocked by this signal. */
+ uint32_t sde : 1; /**< [ 6: 6](RO/WRSL) Selectable deemphasis. When the link is operating at 5.0 GT/s speed, selects the level of
+ deemphasis:
+ 1 = -3.5 dB.
+ 0 = -6 dB.
+
+ When the link is operating at 2.5 GT/s speed, the setting of this bit has no effect. */
+ uint32_t tm : 3; /**< [ 9: 7](R/W/H) Transmit margin. This field controls the value of the non-deemphasized voltage level at
+ the transmitter signals:
+ 0x0 = 800-1200 mV for full swing 400-600 mV for half-swing.
+ 0x1-0x2 = Values must be monotonic with a nonzero slope.
+ 0x3 = 200-400 mV for full-swing and 100-200 mV for half-swing.
+ 0x4-0x7 = Reserved.
+ This field is reset to 0x0 on entry to the LTSSM Polling.Compliance substate. When
+ operating in 5.0 GT/s mode with full swing, the deemphasis ratio must be maintained within
+ +/- 1 dB from the specification-defined operational value either -3.5 or -6 dB. */
+ uint32_t emc : 1; /**< [ 10: 10](R/W) Enter modified compliance. When this bit is set to 1, the device transmits a modified
+ compliance pattern if the LTSSM enters Polling.Compliance state. */
+ uint32_t csos : 1; /**< [ 11: 11](R/W) Compliance SOS. When set to 1, the LTSSM is required to send SKP ordered sets periodically
+ in between the (modified) compliance patterns.
+ When the link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t cde : 4; /**< [ 15: 12](R/W) Compliance deemphasis. This bit sets the deemphasis level in Polling.Compliance state if
+ the entry occurred due to the TX compliance receive bit being 1. Encodings:
+ 0x1 = -3.5 dB.
+ 0x0 = -6 dB.
+
+ When the Link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t cdl : 1; /**< [ 16: 16](RO/H) Current deemphasis level. When the link is operating at 5 GT/s speed, this bit reflects
+ the level of deemphasis. Encodings:
+ 1 = -3.5 dB.
+ 0 = -6 dB.
+
+ The value in this bit is undefined when the link is operating at 2.5 GT/s speed. */
+ uint32_t eqc : 1; /**< [ 17: 17](RO/H) Equalization complete */
+ uint32_t ep1s : 1; /**< [ 18: 18](RO/H) Equalization phase 1 successful */
+ uint32_t ep2s : 1; /**< [ 19: 19](RO/H) Equalization phase 2 successful */
+ uint32_t ep3s : 1; /**< [ 20: 20](RO/H) Equalization phase 3 successful */
+ uint32_t ler : 1; /**< [ 21: 21](R/W1C/H) Link equalization request */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pciercx_cfg040_cn81xx cn88xx; */
+ struct bdk_pciercx_cfg040_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t rtd : 1; /**< [ 22: 22](RO) Retimer presence detected. */
+ uint32_t ler : 1; /**< [ 21: 21](R/W1C/H) Link equalization request */
+ uint32_t ep3s : 1; /**< [ 20: 20](RO/H) Equalization phase 3 successful */
+ uint32_t ep2s : 1; /**< [ 19: 19](RO/H) Equalization phase 2 successful */
+ uint32_t ep1s : 1; /**< [ 18: 18](RO/H) Equalization phase 1 successful */
+ uint32_t eqc : 1; /**< [ 17: 17](RO/H) Equalization complete */
+ uint32_t cdl : 1; /**< [ 16: 16](RO/H) Current deemphasis level. When the link is operating at 5 GT/s speed, this bit reflects
+ the level of deemphasis. Encodings:
+ 1 = -3.5 dB.
+ 0 = -6 dB.
+
+ The value in this bit is undefined when the link is operating at 2.5 GT/s speed. */
+ uint32_t cde : 4; /**< [ 15: 12](R/W) Compliance deemphasis. This bit sets the deemphasis level in Polling.Compliance state if
+ the entry occurred due to the TX compliance receive bit being 1. Encodings:
+ 0x1 = -3.5 dB.
+ 0x0 = -6 dB.
+
+ When the Link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t csos : 1; /**< [ 11: 11](R/W) Compliance SOS. When set to 1, the LTSSM is required to send SKP ordered sets periodically
+ in between the (modified) compliance patterns.
+ When the link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t emc : 1; /**< [ 10: 10](R/W) Enter modified compliance. When this bit is set to 1, the device transmits a modified
+ compliance pattern if the LTSSM enters Polling.Compliance state. */
+ uint32_t tm : 3; /**< [ 9: 7](R/W/H) Transmit margin. This field controls the value of the non-deemphasized voltage level at
+ the transmitter signals:
+ 0x0 = 800-1200 mV for full swing 400-600 mV for half-swing.
+ 0x1-0x2 = Values must be monotonic with a nonzero slope.
+ 0x3 = 200-400 mV for full-swing and 100-200 mV for half-swing.
+ 0x4-0x7 = Reserved.
+ This field is reset to 0x0 on entry to the LTSSM Polling.Compliance substate. When
+ operating in 5.0 GT/s mode with full swing, the deemphasis ratio must be maintained within
+ +/- 1 dB from the specification-defined operational value either -3.5 or -6 dB. */
+ uint32_t sde : 1; /**< [ 6: 6](RO/WRSL) Selectable deemphasis. When the link is operating at 5.0 GT/s speed, selects the level of
+ deemphasis on the downstream device. Must be set prior to link training.
+ 0 = -6 dB.
+ 1 = -3.5 dB.
+
+ When the link is operating at 2.5 GT/s speed, the setting of this bit has no effect.
+
+ PCIERC()_CFG515[S_D_E] can be used to change the deemphasis on the upstream ports. */
+ uint32_t hasd : 1; /**< [ 5: 5](R/W) Hardware autonomous speed disable. When asserted, the application must disable hardware
+ from changing the link speed for device-specific reasons other than attempting to correct
+ unreliable link operation by reducing link speed. Initial transition to the highest
+ supported common link speed is not blocked by this signal. */
+ uint32_t ec : 1; /**< [ 4: 4](R/W) Enter compliance. Software is permitted to force a link to enter compliance mode at the
+ speed indicated in the target link speed field by setting this bit to 1 in both components
+ on a link and then initiating a hot reset on the link. */
+ uint32_t tls : 4; /**< [ 3: 0](R/W) Target link speed. For downstream ports, this field sets an upper limit on link
+ operational speed by restricting the values advertised by the upstream component in its
+ training sequences:
+
+ 0x1 = 2.5 Gb/s target link speed.
+ 0x2 = 5 Gb/s target link speed.
+ 0x3 = 8 Gb/s target link speed.
+
+ All other encodings are reserved.
+
+ If a value is written to this field that does not correspond to a speed included in the
+ supported link speeds field, the result is undefined. For both upstream and downstream
+ ports, this field is used to set the target compliance mode speed when software is using
+ the enter compliance bit to force a link into compliance mode.
+ The reset value of this field is controlled by the value read from PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode). */
+#else /* Word 0 - Little Endian */
+ uint32_t tls : 4; /**< [ 3: 0](R/W) Target link speed. For downstream ports, this field sets an upper limit on link
+ operational speed by restricting the values advertised by the upstream component in its
+ training sequences:
+
+ 0x1 = 2.5 Gb/s target link speed.
+ 0x2 = 5 Gb/s target link speed.
+ 0x3 = 8 Gb/s target link speed.
+
+ All other encodings are reserved.
+
+ If a value is written to this field that does not correspond to a speed included in the
+ supported link speeds field, the result is undefined. For both upstream and downstream
+ ports, this field is used to set the target compliance mode speed when software is using
+ the enter compliance bit to force a link into compliance mode.
+ The reset value of this field is controlled by the value read from PEM()_CFG[MD].
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode). */
+ uint32_t ec : 1; /**< [ 4: 4](R/W) Enter compliance. Software is permitted to force a link to enter compliance mode at the
+ speed indicated in the target link speed field by setting this bit to 1 in both components
+ on a link and then initiating a hot reset on the link. */
+ uint32_t hasd : 1; /**< [ 5: 5](R/W) Hardware autonomous speed disable. When asserted, the application must disable hardware
+ from changing the link speed for device-specific reasons other than attempting to correct
+ unreliable link operation by reducing link speed. Initial transition to the highest
+ supported common link speed is not blocked by this signal. */
+ uint32_t sde : 1; /**< [ 6: 6](RO/WRSL) Selectable deemphasis. When the link is operating at 5.0 GT/s speed, selects the level of
+ deemphasis on the downstream device. Must be set prior to link training.
+ 0 = -6 dB.
+ 1 = -3.5 dB.
+
+ When the link is operating at 2.5 GT/s speed, the setting of this bit has no effect.
+
+ PCIERC()_CFG515[S_D_E] can be used to change the deemphasis on the upstream ports. */
+ uint32_t tm : 3; /**< [ 9: 7](R/W/H) Transmit margin. This field controls the value of the non-deemphasized voltage level at
+ the transmitter signals:
+ 0x0 = 800-1200 mV for full swing 400-600 mV for half-swing.
+ 0x1-0x2 = Values must be monotonic with a nonzero slope.
+ 0x3 = 200-400 mV for full-swing and 100-200 mV for half-swing.
+ 0x4-0x7 = Reserved.
+ This field is reset to 0x0 on entry to the LTSSM Polling.Compliance substate. When
+ operating in 5.0 GT/s mode with full swing, the deemphasis ratio must be maintained within
+ +/- 1 dB from the specification-defined operational value either -3.5 or -6 dB. */
+ uint32_t emc : 1; /**< [ 10: 10](R/W) Enter modified compliance. When this bit is set to 1, the device transmits a modified
+ compliance pattern if the LTSSM enters Polling.Compliance state. */
+ uint32_t csos : 1; /**< [ 11: 11](R/W) Compliance SOS. When set to 1, the LTSSM is required to send SKP ordered sets periodically
+ in between the (modified) compliance patterns.
+ When the link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t cde : 4; /**< [ 15: 12](R/W) Compliance deemphasis. This bit sets the deemphasis level in Polling.Compliance state if
+ the entry occurred due to the TX compliance receive bit being 1. Encodings:
+ 0x1 = -3.5 dB.
+ 0x0 = -6 dB.
+
+ When the Link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t cdl : 1; /**< [ 16: 16](RO/H) Current deemphasis level. When the link is operating at 5 GT/s speed, this bit reflects
+ the level of deemphasis. Encodings:
+ 1 = -3.5 dB.
+ 0 = -6 dB.
+
+ The value in this bit is undefined when the link is operating at 2.5 GT/s speed. */
+ uint32_t eqc : 1; /**< [ 17: 17](RO/H) Equalization complete */
+ uint32_t ep1s : 1; /**< [ 18: 18](RO/H) Equalization phase 1 successful */
+ uint32_t ep2s : 1; /**< [ 19: 19](RO/H) Equalization phase 2 successful */
+ uint32_t ep3s : 1; /**< [ 20: 20](RO/H) Equalization phase 3 successful */
+ uint32_t ler : 1; /**< [ 21: 21](R/W1C/H) Link equalization request */
+ uint32_t rtd : 1; /**< [ 22: 22](RO) Retimer presence detected. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg040 bdk_pciercx_cfg040_t;
+
+static inline uint64_t BDK_PCIERCX_CFG040(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG040(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000a0ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000a0ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x200000000a0ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG040", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG040(a) bdk_pciercx_cfg040_t
+#define bustype_BDK_PCIERCX_CFG040(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG040(a) "PCIERCX_CFG040"
+#define busnum_BDK_PCIERCX_CFG040(a) (a)
+#define arguments_BDK_PCIERCX_CFG040(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg041
+ *
+ * PCIe RC Slot Capabilities 2 Register
+ * This register contains the forty-second 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg041
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg041_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg041_s cn; */
+};
+typedef union bdk_pciercx_cfg041 bdk_pciercx_cfg041_t;
+
+static inline uint64_t BDK_PCIERCX_CFG041(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG041(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000a4ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000a4ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x200000000a4ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG041", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG041(a) bdk_pciercx_cfg041_t
+#define bustype_BDK_PCIERCX_CFG041(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG041(a) "PCIERCX_CFG041"
+#define busnum_BDK_PCIERCX_CFG041(a) (a)
+#define arguments_BDK_PCIERCX_CFG041(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg042
+ *
+ * PCIe RC Slot Control 2 Register/Slot Status 2 Register
+ * This register contains the forty-third 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg042
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg042_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg042_s cn; */
+};
+typedef union bdk_pciercx_cfg042 bdk_pciercx_cfg042_t;
+
+static inline uint64_t BDK_PCIERCX_CFG042(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG042(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000a8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000a8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x200000000a8ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG042", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG042(a) bdk_pciercx_cfg042_t
+#define bustype_BDK_PCIERCX_CFG042(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG042(a) "PCIERCX_CFG042"
+#define busnum_BDK_PCIERCX_CFG042(a) (a)
+#define arguments_BDK_PCIERCX_CFG042(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg044
+ *
+ * PCIe RC PCI Express MSI-X Capability ID/MSI-X Next Item Pointer/MSI-X Control Register
+ * This register contains the forty-fifth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg044
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg044_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixen : 1; /**< [ 31: 31](RO/WRSL) MSI-X enable. */
+ uint32_t funm : 1; /**< [ 30: 30](RO/WRSL) Function mask.
+ 0 = Each vectors mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits. */
+ uint32_t reserved_27_29 : 3;
+ uint32_t msixts : 11; /**< [ 26: 16](RO/WRSL) MSI-X table size encoded as (table size - 1). */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) "Next capability pointer. Writable through PEM#_CFG_WR. However, the application must not
+ change this field." */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO) MSI-X capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO) MSI-X capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) "Next capability pointer. Writable through PEM#_CFG_WR. However, the application must not
+ change this field." */
+ uint32_t msixts : 11; /**< [ 26: 16](RO/WRSL) MSI-X table size encoded as (table size - 1). */
+ uint32_t reserved_27_29 : 3;
+ uint32_t funm : 1; /**< [ 30: 30](RO/WRSL) Function mask.
+ 0 = Each vectors mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits. */
+ uint32_t msixen : 1; /**< [ 31: 31](RO/WRSL) MSI-X enable. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg044_s cn81xx; */
+ struct bdk_pciercx_cfg044_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixen : 1; /**< [ 31: 31](RO/WRSL) MSI-X enable. If MSI-X is enabled, MSI and INTx must be disabled. */
+ uint32_t funm : 1; /**< [ 30: 30](RO/WRSL) Function mask.
+ 0 = Each vectors mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits. */
+ uint32_t reserved_27_29 : 3;
+ uint32_t msixts : 11; /**< [ 26: 16](RO/WRSL/H) MSI-X table size encoded as (table size - 1). */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) "Next capability pointer. Writable through PEM#_CFG_WR. However, the application must not
+ change this field." */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO/H) MSI-X capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO/H) MSI-X capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) "Next capability pointer. Writable through PEM#_CFG_WR. However, the application must not
+ change this field." */
+ uint32_t msixts : 11; /**< [ 26: 16](RO/WRSL/H) MSI-X table size encoded as (table size - 1). */
+ uint32_t reserved_27_29 : 3;
+ uint32_t funm : 1; /**< [ 30: 30](RO/WRSL) Function mask.
+ 0 = Each vectors mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits. */
+ uint32_t msixen : 1; /**< [ 31: 31](RO/WRSL) MSI-X enable. If MSI-X is enabled, MSI and INTx must be disabled. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pciercx_cfg044_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixen : 1; /**< [ 31: 31](RO/WRSL) MSI-X enable. If MSI-X is enabled, MSI and INTx must be disabled. */
+ uint32_t funm : 1; /**< [ 30: 30](RO/WRSL) Function mask.
+ 0 = Each vectors mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits. */
+ uint32_t reserved_27_29 : 3;
+ uint32_t msixts : 11; /**< [ 26: 16](RO/WRSL) MSI-X table size encoded as (table size - 1). */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) "Next capability pointer. Writable through PEM#_CFG_WR. However, the application must not
+ change this field." */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO/H) MSI-X capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO/H) MSI-X capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) "Next capability pointer. Writable through PEM#_CFG_WR. However, the application must not
+ change this field." */
+ uint32_t msixts : 11; /**< [ 26: 16](RO/WRSL) MSI-X table size encoded as (table size - 1). */
+ uint32_t reserved_27_29 : 3;
+ uint32_t funm : 1; /**< [ 30: 30](RO/WRSL) Function mask.
+ 0 = Each vectors mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits. */
+ uint32_t msixen : 1; /**< [ 31: 31](RO/WRSL) MSI-X enable. If MSI-X is enabled, MSI and INTx must be disabled. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg044 bdk_pciercx_cfg044_t;
+
+static inline uint64_t BDK_PCIERCX_CFG044(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG044(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000b0ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000b0ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x200000000b0ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG044", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG044(a) bdk_pciercx_cfg044_t
+#define bustype_BDK_PCIERCX_CFG044(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG044(a) "PCIERCX_CFG044"
+#define busnum_BDK_PCIERCX_CFG044(a) (a)
+#define arguments_BDK_PCIERCX_CFG044(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg045
+ *
+ * PCIe RC PCI Express MSI-X Table Offset and BIR Register
+ * This register contains the forty-sixth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg045
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg045_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixtoffs : 29; /**< [ 31: 3](RO/WRSL) MSI-X table offset register. Base address of the MSI-X table, as an offset from the base
+ address of the BAR indicated by the table BIR bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t msixtbir : 3; /**< [ 2: 0](RO/WRSL) "MSI-X table BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ table into memory space.
+ Writable through PEM()_CFG_WR. However, the application must not change this field." */
+#else /* Word 0 - Little Endian */
+ uint32_t msixtbir : 3; /**< [ 2: 0](RO/WRSL) "MSI-X table BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ table into memory space.
+ Writable through PEM()_CFG_WR. However, the application must not change this field." */
+ uint32_t msixtoffs : 29; /**< [ 31: 3](RO/WRSL) MSI-X table offset register. Base address of the MSI-X table, as an offset from the base
+ address of the BAR indicated by the table BIR bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg045_s cn81xx; */
+ struct bdk_pciercx_cfg045_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixtoffs : 29; /**< [ 31: 3](RO/WRSL/H) MSI-X table offset register. Base address of the MSI-X table, as an offset from the base
+ address of the BAR indicated by the table BIR bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t msixtbir : 3; /**< [ 2: 0](RO/WRSL) "MSI-X table BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ table into memory space.
+ Writable through PEM()_CFG_WR. However, the application must not change this field." */
+#else /* Word 0 - Little Endian */
+ uint32_t msixtbir : 3; /**< [ 2: 0](RO/WRSL) "MSI-X table BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ table into memory space.
+ Writable through PEM()_CFG_WR. However, the application must not change this field." */
+ uint32_t msixtoffs : 29; /**< [ 31: 3](RO/WRSL/H) MSI-X table offset register. Base address of the MSI-X table, as an offset from the base
+ address of the BAR indicated by the table BIR bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pciercx_cfg045_s cn83xx; */
+};
+typedef union bdk_pciercx_cfg045 bdk_pciercx_cfg045_t;
+
+static inline uint64_t BDK_PCIERCX_CFG045(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG045(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000b4ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000b4ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x200000000b4ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG045", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG045(a) bdk_pciercx_cfg045_t
+#define bustype_BDK_PCIERCX_CFG045(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG045(a) "PCIERCX_CFG045"
+#define busnum_BDK_PCIERCX_CFG045(a) (a)
+#define arguments_BDK_PCIERCX_CFG045(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg046
+ *
+ * PCIe RC PCI Express MSI-X PBA Offset and BIR Register
+ * This register contains the forty-seventh 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg046
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg046_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixpoffs : 29; /**< [ 31: 3](RO/WRSL) MSI-X table offset register. Base address of the MSI-X PBA, as an offset from the base
+ address of the BAR indicated by the table PBA bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t msixpbir : 3; /**< [ 2: 0](RO/WRSL) "MSI-X PBA BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ pending bit array into memory space.
+ Writable through PEM()_CFG_WR. However, the application must not change this field." */
+#else /* Word 0 - Little Endian */
+ uint32_t msixpbir : 3; /**< [ 2: 0](RO/WRSL) "MSI-X PBA BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ pending bit array into memory space.
+ Writable through PEM()_CFG_WR. However, the application must not change this field." */
+ uint32_t msixpoffs : 29; /**< [ 31: 3](RO/WRSL) MSI-X table offset register. Base address of the MSI-X PBA, as an offset from the base
+ address of the BAR indicated by the table PBA bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg046_s cn81xx; */
+ struct bdk_pciercx_cfg046_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixpoffs : 29; /**< [ 31: 3](RO/WRSL/H) MSI-X table offset register. Base address of the MSI-X PBA, as an offset from the base
+ address of the BAR indicated by the table PBA bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t msixpbir : 3; /**< [ 2: 0](RO/WRSL) "MSI-X PBA BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ pending bit array into memory space.
+ Writable through PEM()_CFG_WR. However, the application must not change this field." */
+#else /* Word 0 - Little Endian */
+ uint32_t msixpbir : 3; /**< [ 2: 0](RO/WRSL) "MSI-X PBA BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ pending bit array into memory space.
+ Writable through PEM()_CFG_WR. However, the application must not change this field." */
+ uint32_t msixpoffs : 29; /**< [ 31: 3](RO/WRSL/H) MSI-X table offset register. Base address of the MSI-X PBA, as an offset from the base
+ address of the BAR indicated by the table PBA bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pciercx_cfg046_cn88xx cn83xx; */
+};
+typedef union bdk_pciercx_cfg046 bdk_pciercx_cfg046_t;
+
+static inline uint64_t BDK_PCIERCX_CFG046(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG046(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000b8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000b8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x200000000b8ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG046", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG046(a) bdk_pciercx_cfg046_t
+#define bustype_BDK_PCIERCX_CFG046(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG046(a) "PCIERCX_CFG046"
+#define busnum_BDK_PCIERCX_CFG046(a) (a)
+#define arguments_BDK_PCIERCX_CFG046(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg047
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg047
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg047_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg047_s cn; */
+};
+typedef union bdk_pciercx_cfg047 bdk_pciercx_cfg047_t;
+
+static inline uint64_t BDK_PCIERCX_CFG047(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG047(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000bcll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000bcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG047", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG047(a) bdk_pciercx_cfg047_t
+#define bustype_BDK_PCIERCX_CFG047(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG047(a) "PCIERCX_CFG047"
+#define busnum_BDK_PCIERCX_CFG047(a) (a)
+#define arguments_BDK_PCIERCX_CFG047(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg048
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg048
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg048_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg048_s cn; */
+};
+typedef union bdk_pciercx_cfg048 bdk_pciercx_cfg048_t;
+
+static inline uint64_t BDK_PCIERCX_CFG048(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG048(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000c0ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000c0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG048", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG048(a) bdk_pciercx_cfg048_t
+#define bustype_BDK_PCIERCX_CFG048(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG048(a) "PCIERCX_CFG048"
+#define busnum_BDK_PCIERCX_CFG048(a) (a)
+#define arguments_BDK_PCIERCX_CFG048(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg049
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg049
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg049_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg049_s cn; */
+};
+typedef union bdk_pciercx_cfg049 bdk_pciercx_cfg049_t;
+
+static inline uint64_t BDK_PCIERCX_CFG049(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG049(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000c4ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000c4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG049", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG049(a) bdk_pciercx_cfg049_t
+#define bustype_BDK_PCIERCX_CFG049(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG049(a) "PCIERCX_CFG049"
+#define busnum_BDK_PCIERCX_CFG049(a) (a)
+#define arguments_BDK_PCIERCX_CFG049(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg050
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg050
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg050_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg050_s cn; */
+};
+typedef union bdk_pciercx_cfg050 bdk_pciercx_cfg050_t;
+
+static inline uint64_t BDK_PCIERCX_CFG050(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG050(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000c8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000c8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG050", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG050(a) bdk_pciercx_cfg050_t
+#define bustype_BDK_PCIERCX_CFG050(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG050(a) "PCIERCX_CFG050"
+#define busnum_BDK_PCIERCX_CFG050(a) (a)
+#define arguments_BDK_PCIERCX_CFG050(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg051
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg051
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg051_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg051_s cn; */
+};
+typedef union bdk_pciercx_cfg051 bdk_pciercx_cfg051_t;
+
+static inline uint64_t BDK_PCIERCX_CFG051(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG051(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000ccll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000ccll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG051", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG051(a) bdk_pciercx_cfg051_t
+#define bustype_BDK_PCIERCX_CFG051(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG051(a) "PCIERCX_CFG051"
+#define busnum_BDK_PCIERCX_CFG051(a) (a)
+#define arguments_BDK_PCIERCX_CFG051(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg052
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg052
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg052_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg052_s cn; */
+};
+typedef union bdk_pciercx_cfg052 bdk_pciercx_cfg052_t;
+
+static inline uint64_t BDK_PCIERCX_CFG052(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG052(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000d0ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000d0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG052", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG052(a) bdk_pciercx_cfg052_t
+#define bustype_BDK_PCIERCX_CFG052(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG052(a) "PCIERCX_CFG052"
+#define busnum_BDK_PCIERCX_CFG052(a) (a)
+#define arguments_BDK_PCIERCX_CFG052(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg053
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg053
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg053_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg053_s cn; */
+};
+typedef union bdk_pciercx_cfg053 bdk_pciercx_cfg053_t;
+
+static inline uint64_t BDK_PCIERCX_CFG053(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG053(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000d4ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000d4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG053", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG053(a) bdk_pciercx_cfg053_t
+#define bustype_BDK_PCIERCX_CFG053(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG053(a) "PCIERCX_CFG053"
+#define busnum_BDK_PCIERCX_CFG053(a) (a)
+#define arguments_BDK_PCIERCX_CFG053(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg054
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg054
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg054_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg054_s cn; */
+};
+typedef union bdk_pciercx_cfg054 bdk_pciercx_cfg054_t;
+
+static inline uint64_t BDK_PCIERCX_CFG054(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG054(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000d8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000d8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG054", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG054(a) bdk_pciercx_cfg054_t
+#define bustype_BDK_PCIERCX_CFG054(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG054(a) "PCIERCX_CFG054"
+#define busnum_BDK_PCIERCX_CFG054(a) (a)
+#define arguments_BDK_PCIERCX_CFG054(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg055
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg055
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg055_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg055_s cn; */
+};
+typedef union bdk_pciercx_cfg055 bdk_pciercx_cfg055_t;
+
+static inline uint64_t BDK_PCIERCX_CFG055(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG055(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000dcll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000dcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG055", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG055(a) bdk_pciercx_cfg055_t
+#define bustype_BDK_PCIERCX_CFG055(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG055(a) "PCIERCX_CFG055"
+#define busnum_BDK_PCIERCX_CFG055(a) (a)
+#define arguments_BDK_PCIERCX_CFG055(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg056
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg056
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg056_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg056_s cn; */
+};
+typedef union bdk_pciercx_cfg056 bdk_pciercx_cfg056_t;
+
+static inline uint64_t BDK_PCIERCX_CFG056(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG056(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000e0ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000e0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG056", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG056(a) bdk_pciercx_cfg056_t
+#define bustype_BDK_PCIERCX_CFG056(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG056(a) "PCIERCX_CFG056"
+#define busnum_BDK_PCIERCX_CFG056(a) (a)
+#define arguments_BDK_PCIERCX_CFG056(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg057
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg057
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg057_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg057_s cn; */
+};
+typedef union bdk_pciercx_cfg057 bdk_pciercx_cfg057_t;
+
+static inline uint64_t BDK_PCIERCX_CFG057(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG057(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000e4ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000e4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG057", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG057(a) bdk_pciercx_cfg057_t
+#define bustype_BDK_PCIERCX_CFG057(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG057(a) "PCIERCX_CFG057"
+#define busnum_BDK_PCIERCX_CFG057(a) (a)
+#define arguments_BDK_PCIERCX_CFG057(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg058
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg058
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg058_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg058_s cn; */
+};
+typedef union bdk_pciercx_cfg058 bdk_pciercx_cfg058_t;
+
+static inline uint64_t BDK_PCIERCX_CFG058(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG058(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000e8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000e8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG058", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG058(a) bdk_pciercx_cfg058_t
+#define bustype_BDK_PCIERCX_CFG058(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG058(a) "PCIERCX_CFG058"
+#define busnum_BDK_PCIERCX_CFG058(a) (a)
+#define arguments_BDK_PCIERCX_CFG058(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg059
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg059
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg059_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg059_s cn; */
+};
+typedef union bdk_pciercx_cfg059 bdk_pciercx_cfg059_t;
+
+static inline uint64_t BDK_PCIERCX_CFG059(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG059(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000ecll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000ecll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG059", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG059(a) bdk_pciercx_cfg059_t
+#define bustype_BDK_PCIERCX_CFG059(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG059(a) "PCIERCX_CFG059"
+#define busnum_BDK_PCIERCX_CFG059(a) (a)
+#define arguments_BDK_PCIERCX_CFG059(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg060
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg060
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg060_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg060_s cn; */
+};
+typedef union bdk_pciercx_cfg060 bdk_pciercx_cfg060_t;
+
+static inline uint64_t BDK_PCIERCX_CFG060(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG060(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000f0ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000f0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG060", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG060(a) bdk_pciercx_cfg060_t
+#define bustype_BDK_PCIERCX_CFG060(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG060(a) "PCIERCX_CFG060"
+#define busnum_BDK_PCIERCX_CFG060(a) (a)
+#define arguments_BDK_PCIERCX_CFG060(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg061
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg061
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg061_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg061_s cn; */
+};
+typedef union bdk_pciercx_cfg061 bdk_pciercx_cfg061_t;
+
+static inline uint64_t BDK_PCIERCX_CFG061(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG061(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000f4ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000f4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG061", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG061(a) bdk_pciercx_cfg061_t
+#define bustype_BDK_PCIERCX_CFG061(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG061(a) "PCIERCX_CFG061"
+#define busnum_BDK_PCIERCX_CFG061(a) (a)
+#define arguments_BDK_PCIERCX_CFG061(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg062
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg062
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg062_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg062_s cn; */
+};
+typedef union bdk_pciercx_cfg062 bdk_pciercx_cfg062_t;
+
+static inline uint64_t BDK_PCIERCX_CFG062(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG062(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000f8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000f8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG062", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG062(a) bdk_pciercx_cfg062_t
+#define bustype_BDK_PCIERCX_CFG062(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG062(a) "PCIERCX_CFG062"
+#define busnum_BDK_PCIERCX_CFG062(a) (a)
+#define arguments_BDK_PCIERCX_CFG062(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg063
+ *
+ * PCIe RC Unused Capability Registers
+ * This register contains 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg063
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg063_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC() hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg063_s cn; */
+};
+typedef union bdk_pciercx_cfg063 bdk_pciercx_cfg063_t;
+
+static inline uint64_t BDK_PCIERCX_CFG063(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG063(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000000fcll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000000fcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG063", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG063(a) bdk_pciercx_cfg063_t
+#define bustype_BDK_PCIERCX_CFG063(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG063(a) "PCIERCX_CFG063"
+#define busnum_BDK_PCIERCX_CFG063(a) (a)
+#define arguments_BDK_PCIERCX_CFG063(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg064
+ *
+ * PCIe RC PCI Express Extended Capability Header Register
+ * This register contains the sixty-fifth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg064
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg064_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset. Points to the secondary PCI Express capabilities by default.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset. Points to the secondary PCI Express capabilities by default.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg064_s cn; */
+};
+typedef union bdk_pciercx_cfg064 bdk_pciercx_cfg064_t;
+
+static inline uint64_t BDK_PCIERCX_CFG064(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG064(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000100ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000100ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000100ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG064", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG064(a) bdk_pciercx_cfg064_t
+#define bustype_BDK_PCIERCX_CFG064(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG064(a) "PCIERCX_CFG064"
+#define busnum_BDK_PCIERCX_CFG064(a) (a)
+#define arguments_BDK_PCIERCX_CFG064(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg065
+ *
+ * PCIe RC Uncorrectable Error Status Register
+ * This register contains the sixty-sixth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg065
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg065_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error status. */
+ uint32_t uatombs : 1; /**< [ 24: 24](RO) Unsupported AtomicOp egress blocked status. */
+ uint32_t reserved_23 : 1;
+ uint32_t ucies : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error status. */
+ uint32_t reserved_21 : 1;
+ uint32_t ures : 1; /**< [ 20: 20](R/W1C/H) Unsupported request error status. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W1C/H) ECRC error status. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W1C/H) Malformed TLP status. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W1C/H) Receiver overflow status. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W1C/H) Unexpected completion status */
+ uint32_t cas : 1; /**< [ 15: 15](R/W1C/H) Completer abort status. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W1C/H) Completion timeout status. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W1C/H) Flow control protocol error status. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP status. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](RO) Surprise down error status (not supported). */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W1C/H) Data link protocol error status. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W1C/H) Data link protocol error status. */
+ uint32_t sdes : 1; /**< [ 5: 5](RO) Surprise down error status (not supported). */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP status. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W1C/H) Flow control protocol error status. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W1C/H) Completion timeout status. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W1C/H) Completer abort status. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W1C/H) Unexpected completion status */
+ uint32_t ros : 1; /**< [ 17: 17](R/W1C/H) Receiver overflow status. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W1C/H) Malformed TLP status. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W1C/H) ECRC error status. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W1C/H) Unsupported request error status. */
+ uint32_t reserved_21 : 1;
+ uint32_t ucies : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error status. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombs : 1; /**< [ 24: 24](RO) Unsupported AtomicOp egress blocked status. */
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error status. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg065_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error status. */
+ uint32_t uatombs : 1; /**< [ 24: 24](RO) Unsupported AtomicOp egress blocked status. */
+ uint32_t reserved_23 : 1;
+ uint32_t ucies : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error status. */
+ uint32_t reserved_21 : 1;
+ uint32_t ures : 1; /**< [ 20: 20](R/W1C/H) Unsupported request error status. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W1C/H) ECRC error status. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W1C/H) Malformed TLP status. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W1C/H) Receiver overflow status. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W1C/H) Unexpected completion status */
+ uint32_t cas : 1; /**< [ 15: 15](R/W1C/H) Completer abort status. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W1C/H) Completion timeout status. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W1C/H) Flow control protocol error status. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP status. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](RO) Surprise down error status (not supported). */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W1C/H) Data link protocol error status. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W1C/H) Data link protocol error status. */
+ uint32_t sdes : 1; /**< [ 5: 5](RO) Surprise down error status (not supported). */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP status. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W1C/H) Flow control protocol error status. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W1C/H) Completion timeout status. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W1C/H) Completer abort status. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W1C/H) Unexpected completion status */
+ uint32_t ros : 1; /**< [ 17: 17](R/W1C/H) Receiver overflow status. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W1C/H) Malformed TLP status. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W1C/H) ECRC error status. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W1C/H) Unsupported request error status. */
+ uint32_t reserved_21 : 1;
+ uint32_t ucies : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error status. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombs : 1; /**< [ 24: 24](RO) Unsupported AtomicOp egress blocked status. */
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error status. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pciercx_cfg065_cn81xx cn88xx; */
+ struct bdk_pciercx_cfg065_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error status. */
+ uint32_t uatombs : 1; /**< [ 24: 24](RO) Unsupported AtomicOp egress blocked status. */
+ uint32_t reserved_23 : 1;
+ uint32_t ucies : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error status. */
+ uint32_t reserved_21 : 1;
+ uint32_t ures : 1; /**< [ 20: 20](R/W1C/H) Unsupported request error status. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W1C/H) ECRC error status. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W1C/H) Malformed TLP status. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W1C/H) Receiver overflow status. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W1C/H) Unexpected completion status */
+ uint32_t cas : 1; /**< [ 15: 15](R/W1C/H) Completer abort status. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W1C/H) Completion timeout status. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W1C/H) Flow control protocol error status. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP status. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](R/W1C/H) Surprise down error status. */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W1C/H) Data link protocol error status. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W1C/H) Data link protocol error status. */
+ uint32_t sdes : 1; /**< [ 5: 5](R/W1C/H) Surprise down error status. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP status. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W1C/H) Flow control protocol error status. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W1C/H) Completion timeout status. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W1C/H) Completer abort status. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W1C/H) Unexpected completion status */
+ uint32_t ros : 1; /**< [ 17: 17](R/W1C/H) Receiver overflow status. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W1C/H) Malformed TLP status. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W1C/H) ECRC error status. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W1C/H) Unsupported request error status. */
+ uint32_t reserved_21 : 1;
+ uint32_t ucies : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error status. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombs : 1; /**< [ 24: 24](RO) Unsupported AtomicOp egress blocked status. */
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error status. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg065 bdk_pciercx_cfg065_t;
+
+static inline uint64_t BDK_PCIERCX_CFG065(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG065(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000104ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000104ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000104ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG065", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG065(a) bdk_pciercx_cfg065_t
+#define bustype_BDK_PCIERCX_CFG065(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG065(a) "PCIERCX_CFG065"
+#define busnum_BDK_PCIERCX_CFG065(a) (a)
+#define arguments_BDK_PCIERCX_CFG065(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg066
+ *
+ * PCIe RC Uncorrectable Error Mask Register
+ * This register contains the sixty-seventh 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg066
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg066_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbem : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error mask. */
+ uint32_t uatombm : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked status. */
+ uint32_t reserved_23 : 1;
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t reserved_21 : 1;
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< [ 5: 5](RO) Surprise down error mask (not supported). */
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t sdem : 1; /**< [ 5: 5](RO) Surprise down error mask (not supported). */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t reserved_21 : 1;
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombm : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked status. */
+ uint32_t tpbem : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error mask. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg066_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbem : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error mask. */
+ uint32_t uatombm : 1; /**< [ 24: 24](RO/H) Unsupported AtomicOp egress blocked status. */
+ uint32_t reserved_23 : 1;
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t reserved_21 : 1;
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< [ 5: 5](RO) Surprise down error mask (not supported). */
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t sdem : 1; /**< [ 5: 5](RO) Surprise down error mask (not supported). */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t reserved_21 : 1;
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombm : 1; /**< [ 24: 24](RO/H) Unsupported AtomicOp egress blocked status. */
+ uint32_t tpbem : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error mask. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pciercx_cfg066_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbem : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error mask. */
+ uint32_t uatombm : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked status. */
+ uint32_t reserved_23 : 1;
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t reserved_21 : 1;
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< [ 5: 5](RO) Surprise down error mask (not supported). */
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t sdem : 1; /**< [ 5: 5](RO) Surprise down error mask (not supported). */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t reserved_21 : 1;
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombm : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked status. */
+ uint32_t tpbem : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error mask. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg066_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbem : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error mask. */
+ uint32_t uatombm : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked status. */
+ uint32_t reserved_23 : 1;
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t reserved_21 : 1;
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< [ 5: 5](R/W) Surprise down error mask. Writeable when PCIERC()_CFG031[SDERC] is set.
+ When PCIERC()_CFG031[SDERC] is clear, will always read as clear. */
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t sdem : 1; /**< [ 5: 5](R/W) Surprise down error mask. Writeable when PCIERC()_CFG031[SDERC] is set.
+ When PCIERC()_CFG031[SDERC] is clear, will always read as clear. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t reserved_21 : 1;
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombm : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked status. */
+ uint32_t tpbem : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error mask. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn83xx;
+ /* struct bdk_pciercx_cfg066_cn81xx cn88xxp2; */
+};
+typedef union bdk_pciercx_cfg066 bdk_pciercx_cfg066_t;
+
+static inline uint64_t BDK_PCIERCX_CFG066(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG066(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000108ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000108ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000108ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG066", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG066(a) bdk_pciercx_cfg066_t
+#define bustype_BDK_PCIERCX_CFG066(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG066(a) "PCIERCX_CFG066"
+#define busnum_BDK_PCIERCX_CFG066(a) (a)
+#define arguments_BDK_PCIERCX_CFG066(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg067
+ *
+ * PCIe RC Uncorrectable Error Severity Register
+ * This register contains the sixty-eighth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg067
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg067_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error severity. */
+ uint32_t uatombs : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked severity. */
+ uint32_t unsuperr : 3; /**< [ 23: 21](RO/H) Reserved. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](RO) Surprise down error severity (not supported). */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t sdes : 1; /**< [ 5: 5](RO) Surprise down error severity (not supported). */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t unsuperr : 3; /**< [ 23: 21](RO/H) Reserved. */
+ uint32_t uatombs : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked severity. */
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error severity. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg067_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error severity. */
+ uint32_t uatombs : 1; /**< [ 24: 24](RO/H) Unsupported AtomicOp egress blocked severity. */
+ uint32_t unsuperr : 3; /**< [ 23: 21](RO/H) Reserved. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](RO) Surprise down error severity (not supported). */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t sdes : 1; /**< [ 5: 5](RO) Surprise down error severity (not supported). */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t unsuperr : 3; /**< [ 23: 21](RO/H) Reserved. */
+ uint32_t uatombs : 1; /**< [ 24: 24](RO/H) Unsupported AtomicOp egress blocked severity. */
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error severity. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ struct bdk_pciercx_cfg067_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error severity. */
+ uint32_t uatombs : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked severity. */
+ uint32_t unsuperr : 3; /**< [ 23: 21](RO/H) Reserved. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](RO) Surprise down error severity (not supported). */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t sdes : 1; /**< [ 5: 5](RO) Surprise down error severity (not supported). */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t unsuperr : 3; /**< [ 23: 21](RO/H) Reserved. */
+ uint32_t uatombs : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked severity. */
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error severity. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg067_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error severity. */
+ uint32_t uatombs : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked severity. */
+ uint32_t unsuperr : 3; /**< [ 23: 21](RO/H) Reserved. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](R/W) Surprise down error severity. Writeable when PCIERC()_CFG031[SDERC] is set.
+ When PCIERC()_CFG031[SDERC] is clear, will always read as set. */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t sdes : 1; /**< [ 5: 5](R/W) Surprise down error severity. Writeable when PCIERC()_CFG031[SDERC] is set.
+ When PCIERC()_CFG031[SDERC] is clear, will always read as set. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t unsuperr : 3; /**< [ 23: 21](RO/H) Reserved. */
+ uint32_t uatombs : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked severity. */
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error severity. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn83xx;
+ /* struct bdk_pciercx_cfg067_cn81xx cn88xxp2; */
+};
+typedef union bdk_pciercx_cfg067 bdk_pciercx_cfg067_t;
+
+static inline uint64_t BDK_PCIERCX_CFG067(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG067(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000010cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000010cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000010cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG067", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG067(a) bdk_pciercx_cfg067_t
+#define bustype_BDK_PCIERCX_CFG067(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG067(a) "PCIERCX_CFG067"
+#define busnum_BDK_PCIERCX_CFG067(a) (a)
+#define arguments_BDK_PCIERCX_CFG067(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg068
+ *
+ * PCIe RC Correctable Error Status Register
+ * This register contains the sixty-ninth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg068
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg068_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t chlo : 1; /**< [ 15: 15](R/W1C/H) Corrected header log overflow status. */
+ uint32_t cies : 1; /**< [ 14: 14](R/W1C) Corrected internal error status. */
+ uint32_t anfes : 1; /**< [ 13: 13](R/W1C/H) Advisory nonfatal error status. */
+ uint32_t rtts : 1; /**< [ 12: 12](R/W1C/H) Replay timer timeout status. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrs : 1; /**< [ 8: 8](R/W1C/H) REPLAY_NUM rollover status. */
+ uint32_t bdllps : 1; /**< [ 7: 7](R/W1C/H) Bad DLLP status. */
+ uint32_t btlps : 1; /**< [ 6: 6](R/W1C/H) Bad TLP status. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t res : 1; /**< [ 0: 0](R/W1C/H) Receiver error status. */
+#else /* Word 0 - Little Endian */
+ uint32_t res : 1; /**< [ 0: 0](R/W1C/H) Receiver error status. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlps : 1; /**< [ 6: 6](R/W1C/H) Bad TLP status. */
+ uint32_t bdllps : 1; /**< [ 7: 7](R/W1C/H) Bad DLLP status. */
+ uint32_t rnrs : 1; /**< [ 8: 8](R/W1C/H) REPLAY_NUM rollover status. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rtts : 1; /**< [ 12: 12](R/W1C/H) Replay timer timeout status. */
+ uint32_t anfes : 1; /**< [ 13: 13](R/W1C/H) Advisory nonfatal error status. */
+ uint32_t cies : 1; /**< [ 14: 14](R/W1C) Corrected internal error status. */
+ uint32_t chlo : 1; /**< [ 15: 15](R/W1C/H) Corrected header log overflow status. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg068_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_15_31 : 17;
+ uint32_t cies : 1; /**< [ 14: 14](R/W1C) Corrected internal error status. */
+ uint32_t anfes : 1; /**< [ 13: 13](R/W1C/H) Advisory nonfatal error status. */
+ uint32_t rtts : 1; /**< [ 12: 12](R/W1C/H) Replay timer timeout status. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrs : 1; /**< [ 8: 8](R/W1C/H) REPLAY_NUM rollover status. */
+ uint32_t bdllps : 1; /**< [ 7: 7](R/W1C/H) Bad DLLP status. */
+ uint32_t btlps : 1; /**< [ 6: 6](R/W1C/H) Bad TLP status. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t res : 1; /**< [ 0: 0](R/W1C/H) Receiver error status. */
+#else /* Word 0 - Little Endian */
+ uint32_t res : 1; /**< [ 0: 0](R/W1C/H) Receiver error status. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlps : 1; /**< [ 6: 6](R/W1C/H) Bad TLP status. */
+ uint32_t bdllps : 1; /**< [ 7: 7](R/W1C/H) Bad DLLP status. */
+ uint32_t rnrs : 1; /**< [ 8: 8](R/W1C/H) REPLAY_NUM rollover status. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rtts : 1; /**< [ 12: 12](R/W1C/H) Replay timer timeout status. */
+ uint32_t anfes : 1; /**< [ 13: 13](R/W1C/H) Advisory nonfatal error status. */
+ uint32_t cies : 1; /**< [ 14: 14](R/W1C) Corrected internal error status. */
+ uint32_t reserved_15_31 : 17;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pciercx_cfg068_cn81xx cn88xx; */
+ struct bdk_pciercx_cfg068_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t chlo : 1; /**< [ 15: 15](R/W1C/H) Corrected header log overflow status. */
+ uint32_t cies : 1; /**< [ 14: 14](R/W1C/H) Corrected internal error status. */
+ uint32_t anfes : 1; /**< [ 13: 13](R/W1C/H) Advisory nonfatal error status. */
+ uint32_t rtts : 1; /**< [ 12: 12](R/W1C/H) Replay timer timeout status. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrs : 1; /**< [ 8: 8](R/W1C/H) REPLAY_NUM rollover status. */
+ uint32_t bdllps : 1; /**< [ 7: 7](R/W1C/H) Bad DLLP status. */
+ uint32_t btlps : 1; /**< [ 6: 6](R/W1C/H) Bad TLP status. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t res : 1; /**< [ 0: 0](R/W1C/H) Receiver error status. */
+#else /* Word 0 - Little Endian */
+ uint32_t res : 1; /**< [ 0: 0](R/W1C/H) Receiver error status. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlps : 1; /**< [ 6: 6](R/W1C/H) Bad TLP status. */
+ uint32_t bdllps : 1; /**< [ 7: 7](R/W1C/H) Bad DLLP status. */
+ uint32_t rnrs : 1; /**< [ 8: 8](R/W1C/H) REPLAY_NUM rollover status. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rtts : 1; /**< [ 12: 12](R/W1C/H) Replay timer timeout status. */
+ uint32_t anfes : 1; /**< [ 13: 13](R/W1C/H) Advisory nonfatal error status. */
+ uint32_t cies : 1; /**< [ 14: 14](R/W1C/H) Corrected internal error status. */
+ uint32_t chlo : 1; /**< [ 15: 15](R/W1C/H) Corrected header log overflow status. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg068 bdk_pciercx_cfg068_t;
+
+static inline uint64_t BDK_PCIERCX_CFG068(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG068(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000110ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000110ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000110ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG068", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG068(a) bdk_pciercx_cfg068_t
+#define bustype_BDK_PCIERCX_CFG068(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG068(a) "PCIERCX_CFG068"
+#define busnum_BDK_PCIERCX_CFG068(a) (a)
+#define arguments_BDK_PCIERCX_CFG068(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg069
+ *
+ * PCIe RC Correctable Error Mask Register
+ * This register contains the seventieth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg069
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg069_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t chlom : 1; /**< [ 15: 15](R/W) Corrected header log overflow error mask. */
+ uint32_t ciem : 1; /**< [ 14: 14](R/W) Corrected internal error mask. */
+ uint32_t anfem : 1; /**< [ 13: 13](R/W) Advisory nonfatal error mask. */
+ uint32_t rttm : 1; /**< [ 12: 12](R/W) Replay timer timeout mask. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrm : 1; /**< [ 8: 8](R/W) REPLAY_NUM rollover mask. */
+ uint32_t bdllpm : 1; /**< [ 7: 7](R/W) Bad DLLP mask. */
+ uint32_t btlpm : 1; /**< [ 6: 6](R/W) Bad TLP mask. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t rem : 1; /**< [ 0: 0](R/W) Receiver error mask. */
+#else /* Word 0 - Little Endian */
+ uint32_t rem : 1; /**< [ 0: 0](R/W) Receiver error mask. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlpm : 1; /**< [ 6: 6](R/W) Bad TLP mask. */
+ uint32_t bdllpm : 1; /**< [ 7: 7](R/W) Bad DLLP mask. */
+ uint32_t rnrm : 1; /**< [ 8: 8](R/W) REPLAY_NUM rollover mask. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rttm : 1; /**< [ 12: 12](R/W) Replay timer timeout mask. */
+ uint32_t anfem : 1; /**< [ 13: 13](R/W) Advisory nonfatal error mask. */
+ uint32_t ciem : 1; /**< [ 14: 14](R/W) Corrected internal error mask. */
+ uint32_t chlom : 1; /**< [ 15: 15](R/W) Corrected header log overflow error mask. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg069_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_15_31 : 17;
+ uint32_t ciem : 1; /**< [ 14: 14](R/W) Corrected internal error mask. */
+ uint32_t anfem : 1; /**< [ 13: 13](R/W) Advisory nonfatal error mask. */
+ uint32_t rttm : 1; /**< [ 12: 12](R/W) Replay timer timeout mask. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrm : 1; /**< [ 8: 8](R/W) REPLAY_NUM rollover mask. */
+ uint32_t bdllpm : 1; /**< [ 7: 7](R/W) Bad DLLP mask. */
+ uint32_t btlpm : 1; /**< [ 6: 6](R/W) Bad TLP mask. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t rem : 1; /**< [ 0: 0](R/W) Receiver error mask. */
+#else /* Word 0 - Little Endian */
+ uint32_t rem : 1; /**< [ 0: 0](R/W) Receiver error mask. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlpm : 1; /**< [ 6: 6](R/W) Bad TLP mask. */
+ uint32_t bdllpm : 1; /**< [ 7: 7](R/W) Bad DLLP mask. */
+ uint32_t rnrm : 1; /**< [ 8: 8](R/W) REPLAY_NUM rollover mask. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rttm : 1; /**< [ 12: 12](R/W) Replay timer timeout mask. */
+ uint32_t anfem : 1; /**< [ 13: 13](R/W) Advisory nonfatal error mask. */
+ uint32_t ciem : 1; /**< [ 14: 14](R/W) Corrected internal error mask. */
+ uint32_t reserved_15_31 : 17;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pciercx_cfg069_cn81xx cn88xx; */
+ /* struct bdk_pciercx_cfg069_s cn83xx; */
+};
+typedef union bdk_pciercx_cfg069 bdk_pciercx_cfg069_t;
+
+static inline uint64_t BDK_PCIERCX_CFG069(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG069(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000114ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000114ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000114ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG069", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG069(a) bdk_pciercx_cfg069_t
+#define bustype_BDK_PCIERCX_CFG069(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG069(a) "PCIERCX_CFG069"
+#define busnum_BDK_PCIERCX_CFG069(a) (a)
+#define arguments_BDK_PCIERCX_CFG069(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg070
+ *
+ * PCIe RC Advanced Capabilities and Control Register
+ * This register contains the seventy-first 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg070
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg070_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_11_31 : 21;
+ uint32_t mult_hdr_en : 1; /**< [ 10: 10](RO) Multiple header recording enable (not supported). */
+ uint32_t mult_hdr_cap : 1; /**< [ 9: 9](RO) Multiple header recording capability (not supported). */
+ uint32_t ce : 1; /**< [ 8: 8](R/W) ECRC check enable. */
+ uint32_t cc : 1; /**< [ 7: 7](RO) ECRC check capable. */
+ uint32_t ge : 1; /**< [ 6: 6](R/W) ECRC generation enable. */
+ uint32_t gc : 1; /**< [ 5: 5](RO) ECRC generation capability. */
+ uint32_t fep : 5; /**< [ 4: 0](RO) First error pointer. */
+#else /* Word 0 - Little Endian */
+ uint32_t fep : 5; /**< [ 4: 0](RO) First error pointer. */
+ uint32_t gc : 1; /**< [ 5: 5](RO) ECRC generation capability. */
+ uint32_t ge : 1; /**< [ 6: 6](R/W) ECRC generation enable. */
+ uint32_t cc : 1; /**< [ 7: 7](RO) ECRC check capable. */
+ uint32_t ce : 1; /**< [ 8: 8](R/W) ECRC check enable. */
+ uint32_t mult_hdr_cap : 1; /**< [ 9: 9](RO) Multiple header recording capability (not supported). */
+ uint32_t mult_hdr_en : 1; /**< [ 10: 10](RO) Multiple header recording enable (not supported). */
+ uint32_t reserved_11_31 : 21;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg070_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t tplp : 1; /**< [ 11: 11](RO) TLP prefix log present. */
+ uint32_t reserved_9_10 : 2;
+ uint32_t ce : 1; /**< [ 8: 8](R/W) ECRC check enable. */
+ uint32_t cc : 1; /**< [ 7: 7](RO) ECRC check capable. */
+ uint32_t ge : 1; /**< [ 6: 6](R/W) ECRC generation enable. */
+ uint32_t gc : 1; /**< [ 5: 5](RO) ECRC generation capability. */
+ uint32_t fep : 5; /**< [ 4: 0](RO) First error pointer. */
+#else /* Word 0 - Little Endian */
+ uint32_t fep : 5; /**< [ 4: 0](RO) First error pointer. */
+ uint32_t gc : 1; /**< [ 5: 5](RO) ECRC generation capability. */
+ uint32_t ge : 1; /**< [ 6: 6](R/W) ECRC generation enable. */
+ uint32_t cc : 1; /**< [ 7: 7](RO) ECRC check capable. */
+ uint32_t ce : 1; /**< [ 8: 8](R/W) ECRC check enable. */
+ uint32_t reserved_9_10 : 2;
+ uint32_t tplp : 1; /**< [ 11: 11](RO) TLP prefix log present. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pciercx_cfg070_cn81xx cn88xx; */
+ struct bdk_pciercx_cfg070_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t tlp_plp : 1; /**< [ 11: 11](RO) TLP prefix log present. */
+ uint32_t mult_hdr_en : 1; /**< [ 10: 10](RO) Multiple header recording enable (not supported). */
+ uint32_t mult_hdr_cap : 1; /**< [ 9: 9](RO) Multiple header recording capability (not supported). */
+ uint32_t ce : 1; /**< [ 8: 8](R/W) ECRC check enable. */
+ uint32_t cc : 1; /**< [ 7: 7](RO) ECRC check capable. */
+ uint32_t ge : 1; /**< [ 6: 6](R/W) ECRC generation enable. */
+ uint32_t gc : 1; /**< [ 5: 5](RO) ECRC generation capability. */
+ uint32_t fep : 5; /**< [ 4: 0](RO) First error pointer. */
+#else /* Word 0 - Little Endian */
+ uint32_t fep : 5; /**< [ 4: 0](RO) First error pointer. */
+ uint32_t gc : 1; /**< [ 5: 5](RO) ECRC generation capability. */
+ uint32_t ge : 1; /**< [ 6: 6](R/W) ECRC generation enable. */
+ uint32_t cc : 1; /**< [ 7: 7](RO) ECRC check capable. */
+ uint32_t ce : 1; /**< [ 8: 8](R/W) ECRC check enable. */
+ uint32_t mult_hdr_cap : 1; /**< [ 9: 9](RO) Multiple header recording capability (not supported). */
+ uint32_t mult_hdr_en : 1; /**< [ 10: 10](RO) Multiple header recording enable (not supported). */
+ uint32_t tlp_plp : 1; /**< [ 11: 11](RO) TLP prefix log present. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg070 bdk_pciercx_cfg070_t;
+
+static inline uint64_t BDK_PCIERCX_CFG070(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG070(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000118ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000118ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000118ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG070", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG070(a) bdk_pciercx_cfg070_t
+#define bustype_BDK_PCIERCX_CFG070(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG070(a) "PCIERCX_CFG070"
+#define busnum_BDK_PCIERCX_CFG070(a) (a)
+#define arguments_BDK_PCIERCX_CFG070(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg071
+ *
+ * PCIe RC Header Log Register 1
+ * This register contains the seventy-second 32-bits of PCIe type 1 configuration space. The
+ * header log registers collect the header for the TLP corresponding to a detected error.
+ */
+union bdk_pciercx_cfg071
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg071_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword1 : 32; /**< [ 31: 0](RO) Header log register (first DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword1 : 32; /**< [ 31: 0](RO) Header log register (first DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg071_s cn; */
+};
+typedef union bdk_pciercx_cfg071 bdk_pciercx_cfg071_t;
+
+static inline uint64_t BDK_PCIERCX_CFG071(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG071(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000011cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000011cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000011cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG071", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG071(a) bdk_pciercx_cfg071_t
+#define bustype_BDK_PCIERCX_CFG071(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG071(a) "PCIERCX_CFG071"
+#define busnum_BDK_PCIERCX_CFG071(a) (a)
+#define arguments_BDK_PCIERCX_CFG071(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg072
+ *
+ * PCIe RC Header Log Register 2
+ * This register contains the seventy-third 32-bits of PCIe type 1 configuration space. The
+ * header log registers collect the header for the TLP corresponding to a detected error.
+ */
+union bdk_pciercx_cfg072
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg072_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword2 : 32; /**< [ 31: 0](RO) Header log register (second DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword2 : 32; /**< [ 31: 0](RO) Header log register (second DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg072_s cn; */
+};
+typedef union bdk_pciercx_cfg072 bdk_pciercx_cfg072_t;
+
+static inline uint64_t BDK_PCIERCX_CFG072(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG072(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000120ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000120ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000120ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG072", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG072(a) bdk_pciercx_cfg072_t
+#define bustype_BDK_PCIERCX_CFG072(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG072(a) "PCIERCX_CFG072"
+#define busnum_BDK_PCIERCX_CFG072(a) (a)
+#define arguments_BDK_PCIERCX_CFG072(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg073
+ *
+ * PCIe RC Header Log Register 3
+ * This register contains the seventy-fourth 32-bits of PCIe type 1 configuration space. The
+ * header log registers collect the header for the TLP corresponding to a detected error.
+ */
+union bdk_pciercx_cfg073
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg073_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword3 : 32; /**< [ 31: 0](RO) Header log register (third DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword3 : 32; /**< [ 31: 0](RO) Header log register (third DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg073_s cn; */
+};
+typedef union bdk_pciercx_cfg073 bdk_pciercx_cfg073_t;
+
+static inline uint64_t BDK_PCIERCX_CFG073(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG073(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000124ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000124ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000124ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG073", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG073(a) bdk_pciercx_cfg073_t
+#define bustype_BDK_PCIERCX_CFG073(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG073(a) "PCIERCX_CFG073"
+#define busnum_BDK_PCIERCX_CFG073(a) (a)
+#define arguments_BDK_PCIERCX_CFG073(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg074
+ *
+ * PCIe RC Header Log Register 4
+ * This register contains the seventy-fifth 32-bits of PCIe type 1 configuration space. The
+ * header log registers collect the header for the TLP corresponding to a detected error.
+ */
+union bdk_pciercx_cfg074
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg074_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword4 : 32; /**< [ 31: 0](RO) Header log register (fourth DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword4 : 32; /**< [ 31: 0](RO) Header log register (fourth DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg074_s cn; */
+};
+typedef union bdk_pciercx_cfg074 bdk_pciercx_cfg074_t;
+
+static inline uint64_t BDK_PCIERCX_CFG074(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG074(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000128ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000128ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000128ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG074", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG074(a) bdk_pciercx_cfg074_t
+#define bustype_BDK_PCIERCX_CFG074(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG074(a) "PCIERCX_CFG074"
+#define busnum_BDK_PCIERCX_CFG074(a) (a)
+#define arguments_BDK_PCIERCX_CFG074(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg075
+ *
+ * PCIe RC Root Error Command Register
+ * This register contains the seventy-sixth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg075
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg075_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t fere : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. */
+ uint32_t nfere : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. */
+ uint32_t cere : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t cere : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. */
+ uint32_t nfere : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. */
+ uint32_t fere : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg075_s cn; */
+};
+typedef union bdk_pciercx_cfg075 bdk_pciercx_cfg075_t;
+
+static inline uint64_t BDK_PCIERCX_CFG075(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG075(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000012cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000012cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000012cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG075", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG075(a) bdk_pciercx_cfg075_t
+#define bustype_BDK_PCIERCX_CFG075(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG075(a) "PCIERCX_CFG075"
+#define busnum_BDK_PCIERCX_CFG075(a) (a)
+#define arguments_BDK_PCIERCX_CFG075(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg076
+ *
+ * PCIe RC Root Error Status Register
+ * This register contains the seventy-seventh 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg076
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg076_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t aeimn : 5; /**< [ 31: 27](RO/WRSL) Advanced error interrupt message number, writable through
+ PEM()_CFG_WR. */
+ uint32_t reserved_7_26 : 20;
+ uint32_t femr : 1; /**< [ 6: 6](R/W1C/H) Fatal error messages received. */
+ uint32_t nfemr : 1; /**< [ 5: 5](R/W1C/H) Nonfatal error messages received. */
+ uint32_t fuf : 1; /**< [ 4: 4](R/W1C/H) First uncorrectable fatal. */
+ uint32_t multi_efnfr : 1; /**< [ 3: 3](R/W1C/H) Multiple ERR_FATAL/NONFATAL received. */
+ uint32_t efnfr : 1; /**< [ 2: 2](R/W1C/H) ERR_FATAL/NONFATAL received. */
+ uint32_t multi_ecr : 1; /**< [ 1: 1](R/W1C/H) Multiple ERR_COR received. */
+ uint32_t ecr : 1; /**< [ 0: 0](R/W1C/H) ERR_COR received. */
+#else /* Word 0 - Little Endian */
+ uint32_t ecr : 1; /**< [ 0: 0](R/W1C/H) ERR_COR received. */
+ uint32_t multi_ecr : 1; /**< [ 1: 1](R/W1C/H) Multiple ERR_COR received. */
+ uint32_t efnfr : 1; /**< [ 2: 2](R/W1C/H) ERR_FATAL/NONFATAL received. */
+ uint32_t multi_efnfr : 1; /**< [ 3: 3](R/W1C/H) Multiple ERR_FATAL/NONFATAL received. */
+ uint32_t fuf : 1; /**< [ 4: 4](R/W1C/H) First uncorrectable fatal. */
+ uint32_t nfemr : 1; /**< [ 5: 5](R/W1C/H) Nonfatal error messages received. */
+ uint32_t femr : 1; /**< [ 6: 6](R/W1C/H) Fatal error messages received. */
+ uint32_t reserved_7_26 : 20;
+ uint32_t aeimn : 5; /**< [ 31: 27](RO/WRSL) Advanced error interrupt message number, writable through
+ PEM()_CFG_WR. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg076_s cn; */
+};
+typedef union bdk_pciercx_cfg076 bdk_pciercx_cfg076_t;
+
+static inline uint64_t BDK_PCIERCX_CFG076(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG076(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000130ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000130ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000130ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG076", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG076(a) bdk_pciercx_cfg076_t
+#define bustype_BDK_PCIERCX_CFG076(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG076(a) "PCIERCX_CFG076"
+#define busnum_BDK_PCIERCX_CFG076(a) (a)
+#define arguments_BDK_PCIERCX_CFG076(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg077
+ *
+ * PCIe RC Error Source Identification Register
+ * This register contains the seventy-eighth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg077
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg077_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t efnfsi : 16; /**< [ 31: 16](RO/H) ERR_FATAL/NONFATAL source identification. */
+ uint32_t ecsi : 16; /**< [ 15: 0](RO/H) ERR_COR source identification. */
+#else /* Word 0 - Little Endian */
+ uint32_t ecsi : 16; /**< [ 15: 0](RO/H) ERR_COR source identification. */
+ uint32_t efnfsi : 16; /**< [ 31: 16](RO/H) ERR_FATAL/NONFATAL source identification. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg077_s cn81xx; */
+ struct bdk_pciercx_cfg077_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t efnfsi : 16; /**< [ 31: 16](RO) ERR_FATAL/NONFATAL source identification. */
+ uint32_t ecsi : 16; /**< [ 15: 0](RO) ERR_COR source identification. */
+#else /* Word 0 - Little Endian */
+ uint32_t ecsi : 16; /**< [ 15: 0](RO) ERR_COR source identification. */
+ uint32_t efnfsi : 16; /**< [ 31: 16](RO) ERR_FATAL/NONFATAL source identification. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pciercx_cfg077_s cn83xx; */
+};
+typedef union bdk_pciercx_cfg077 bdk_pciercx_cfg077_t;
+
+static inline uint64_t BDK_PCIERCX_CFG077(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG077(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000134ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000134ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000134ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG077", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG077(a) bdk_pciercx_cfg077_t
+#define bustype_BDK_PCIERCX_CFG077(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG077(a) "PCIERCX_CFG077"
+#define busnum_BDK_PCIERCX_CFG077(a) (a)
+#define arguments_BDK_PCIERCX_CFG077(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg086
+ *
+ * PCIe RC PCI Express Secondary Capability (Gen3) Header Register
+ * This register contains the eighty-ninth 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg086
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg086_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCIE Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCIE Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg086_s cn81xx; */
+ struct bdk_pciercx_cfg086_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL/H) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCIE Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCIE Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL/H) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pciercx_cfg086_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset. Points to the Vendor Specific capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCIE Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCIE Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset. Points to the Vendor Specific capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg086 bdk_pciercx_cfg086_t;
+
+static inline uint64_t BDK_PCIERCX_CFG086(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG086(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000158ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000158ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000158ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG086", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG086(a) bdk_pciercx_cfg086_t
+#define bustype_BDK_PCIERCX_CFG086(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG086(a) "PCIERCX_CFG086"
+#define busnum_BDK_PCIERCX_CFG086(a) (a)
+#define arguments_BDK_PCIERCX_CFG086(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg087
+ *
+ * PCIe RC Link Control 3 Register
+ * This register contains the eighty-eighth 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg087
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg087_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t ler : 1; /**< [ 1: 1](RO/WRSL) Link equalization request interrupt enable. */
+ uint32_t pe : 1; /**< [ 0: 0](RO/WRSL) Perform equalization. */
+#else /* Word 0 - Little Endian */
+ uint32_t pe : 1; /**< [ 0: 0](RO/WRSL) Perform equalization. */
+ uint32_t ler : 1; /**< [ 1: 1](RO/WRSL) Link equalization request interrupt enable. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg087_s cn; */
+};
+typedef union bdk_pciercx_cfg087 bdk_pciercx_cfg087_t;
+
+static inline uint64_t BDK_PCIERCX_CFG087(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG087(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000015cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000015cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000015cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG087", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG087(a) bdk_pciercx_cfg087_t
+#define bustype_BDK_PCIERCX_CFG087(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG087(a) "PCIERCX_CFG087"
+#define busnum_BDK_PCIERCX_CFG087(a) (a)
+#define arguments_BDK_PCIERCX_CFG087(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg088
+ *
+ * PCIe RC Link Control 4 Register
+ * This register contains the eighty-ninth 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg088
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg088_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t les : 8; /**< [ 7: 0](R/W1C) Lane error status bits. */
+#else /* Word 0 - Little Endian */
+ uint32_t les : 8; /**< [ 7: 0](R/W1C) Lane error status bits. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg088_s cn; */
+};
+typedef union bdk_pciercx_cfg088 bdk_pciercx_cfg088_t;
+
+static inline uint64_t BDK_PCIERCX_CFG088(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG088(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000160ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000160ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000160ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG088", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG088(a) bdk_pciercx_cfg088_t
+#define bustype_BDK_PCIERCX_CFG088(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG088(a) "PCIERCX_CFG088"
+#define busnum_BDK_PCIERCX_CFG088(a) (a)
+#define arguments_BDK_PCIERCX_CFG088(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg089
+ *
+ * PCIe RC Equalization Control Lane 0/1 Register
+ * This register contains the ninetieth 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg089
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg089_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l1urph : 3; /**< [ 30: 28](RO/WRSL/H) Lane 1 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l1utp : 4; /**< [ 27: 24](RO/WRSL/H) Lane 1 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t l1drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 1 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_15_19 : 5;
+ uint32_t l0urph : 3; /**< [ 14: 12](RO/WRSL/H) Lane 0 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l0utp : 4; /**< [ 11: 8](RO/WRSL/H) Lane 0 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t l0drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 0 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l0dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 0 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t l0dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 0 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l0drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 0 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t l0utp : 4; /**< [ 11: 8](RO/WRSL/H) Lane 0 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t l0urph : 3; /**< [ 14: 12](RO/WRSL/H) Lane 0 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_15_19 : 5;
+ uint32_t l1drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 1 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t l1utp : 4; /**< [ 27: 24](RO/WRSL/H) Lane 1 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t l1urph : 3; /**< [ 30: 28](RO/WRSL/H) Lane 1 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg089_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l1urph : 3; /**< [ 30: 28](RO/WRSL/H) Lane 1 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l1utp : 4; /**< [ 27: 24](RO/WRSL/H) Lane 1 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t l1drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 1 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l1ddtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 1 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_15 : 1;
+ uint32_t l0urph : 3; /**< [ 14: 12](RO/WRSL/H) Lane 0 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l0utp : 4; /**< [ 11: 8](RO/WRSL/H) Lane 0 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t l0drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 0 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l0dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 0 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t l0dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 0 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l0drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 0 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t l0utp : 4; /**< [ 11: 8](RO/WRSL/H) Lane 0 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t l0urph : 3; /**< [ 14: 12](RO/WRSL/H) Lane 0 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_15 : 1;
+ uint32_t l1ddtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 1 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l1drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 1 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t l1utp : 4; /**< [ 27: 24](RO/WRSL/H) Lane 1 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t l1urph : 3; /**< [ 30: 28](RO/WRSL/H) Lane 1 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pciercx_cfg089_cn81xx cn88xx; */
+ struct bdk_pciercx_cfg089_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l1urph : 3; /**< [ 30: 28](RO/WRSL) Lane 1 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l1utp : 4; /**< [ 27: 24](RO/WRSL) Lane 1 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l1drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 1 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l1dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 1 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l0urph : 3; /**< [ 14: 12](RO/WRSL) Lane 0 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l0utp : 4; /**< [ 11: 8](RO/WRSL) Lane 0 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l0drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 0 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l0dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 0 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l0dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 0 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l0drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 0 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l0utp : 4; /**< [ 11: 8](RO/WRSL) Lane 0 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l0urph : 3; /**< [ 14: 12](RO/WRSL) Lane 0 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l1dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 1 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l1drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 1 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l1utp : 4; /**< [ 27: 24](RO/WRSL) Lane 1 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l1urph : 3; /**< [ 30: 28](RO/WRSL) Lane 1 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg089 bdk_pciercx_cfg089_t;
+
+static inline uint64_t BDK_PCIERCX_CFG089(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG089(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000164ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000164ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000164ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG089", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG089(a) bdk_pciercx_cfg089_t
+#define bustype_BDK_PCIERCX_CFG089(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG089(a) "PCIERCX_CFG089"
+#define busnum_BDK_PCIERCX_CFG089(a) (a)
+#define arguments_BDK_PCIERCX_CFG089(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg090
+ *
+ * PCIe RC Equalization Control Lane 2/3 Register
+ * This register contains the ninety-first 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg090
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg090_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l3urph : 3; /**< [ 30: 28](RO/WRSL/H) Lane 3 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l3utp : 4; /**< [ 27: 24](RO/WRSL/H) Lane 3 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t l3drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 3 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l3dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 3 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_15 : 1;
+ uint32_t l2urph : 3; /**< [ 14: 12](RO/WRSL/H) Lane 2 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l2utp : 4; /**< [ 11: 8](RO/WRSL/H) Lane 2 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t l2drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 2 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l2dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 2 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t l2dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 2 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l2drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 2 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t l2utp : 4; /**< [ 11: 8](RO/WRSL/H) Lane 2 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t l2urph : 3; /**< [ 14: 12](RO/WRSL/H) Lane 2 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_15 : 1;
+ uint32_t l3dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 3 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l3drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 3 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t l3utp : 4; /**< [ 27: 24](RO/WRSL/H) Lane 3 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t l3urph : 3; /**< [ 30: 28](RO/WRSL/H) Lane 3 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg090_s cn81xx; */
+ /* struct bdk_pciercx_cfg090_s cn88xx; */
+ struct bdk_pciercx_cfg090_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l3urph : 3; /**< [ 30: 28](RO/WRSL) Lane 3 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l3utp : 4; /**< [ 27: 24](RO/WRSL) Lane 3 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l3drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 3 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l3dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 3 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l2urph : 3; /**< [ 14: 12](RO/WRSL) Lane 2 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l2utp : 4; /**< [ 11: 8](RO/WRSL) Lane 2 upstream component transmitter preset. Writable through PEM()_CFG_WR. How */
+ uint32_t reserved_7 : 1;
+ uint32_t l2drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 2 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l2dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 2 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l2dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 2 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l2drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 2 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l2utp : 4; /**< [ 11: 8](RO/WRSL) Lane 2 upstream component transmitter preset. Writable through PEM()_CFG_WR. How */
+ uint32_t l2urph : 3; /**< [ 14: 12](RO/WRSL) Lane 2 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l3dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 3 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l3drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 3 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l3utp : 4; /**< [ 27: 24](RO/WRSL) Lane 3 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l3urph : 3; /**< [ 30: 28](RO/WRSL) Lane 3 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg090 bdk_pciercx_cfg090_t;
+
+static inline uint64_t BDK_PCIERCX_CFG090(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG090(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000168ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000168ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000168ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG090", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG090(a) bdk_pciercx_cfg090_t
+#define bustype_BDK_PCIERCX_CFG090(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG090(a) "PCIERCX_CFG090"
+#define busnum_BDK_PCIERCX_CFG090(a) (a)
+#define arguments_BDK_PCIERCX_CFG090(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg091
+ *
+ * PCIe RC Equalization Control Lane 4/5 Register
+ * This register contains the ninety-second 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg091
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg091_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l5urph : 3; /**< [ 30: 28](RO/WRSL/H) Lane 5 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l5utp : 4; /**< [ 27: 24](RO/WRSL/H) Lane 5 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t l5drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 5 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l5dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 5 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_15 : 1;
+ uint32_t l4urph : 3; /**< [ 14: 12](RO/WRSL/H) Lane 4 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l4utp : 4; /**< [ 11: 8](RO/WRSL/H) Lane 4 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t l4drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 4 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l4dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 4 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t l4dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 4 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l4drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 4 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t l4utp : 4; /**< [ 11: 8](RO/WRSL/H) Lane 4 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t l4urph : 3; /**< [ 14: 12](RO/WRSL/H) Lane 4 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_15 : 1;
+ uint32_t l5dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 5 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l5drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 5 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t l5utp : 4; /**< [ 27: 24](RO/WRSL/H) Lane 5 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t l5urph : 3; /**< [ 30: 28](RO/WRSL/H) Lane 5 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg091_s cn81xx; */
+ /* struct bdk_pciercx_cfg091_s cn88xx; */
+ struct bdk_pciercx_cfg091_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l5urph : 3; /**< [ 30: 28](RO/WRSL) Lane 5 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l5utp : 4; /**< [ 27: 24](RO/WRSL) Lane 5 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l5drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 5 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l5dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 5 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l4urph : 3; /**< [ 14: 12](RO/WRSL) Lane 4 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l4utp : 4; /**< [ 11: 8](RO/WRSL) Lane 4 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l4drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 4 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l4dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 4 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l4dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 4 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l4drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 4 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l4utp : 4; /**< [ 11: 8](RO/WRSL) Lane 4 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l4urph : 3; /**< [ 14: 12](RO/WRSL) Lane 4 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l5dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 5 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l5drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 5 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l5utp : 4; /**< [ 27: 24](RO/WRSL) Lane 5 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l5urph : 3; /**< [ 30: 28](RO/WRSL) Lane 5 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg091 bdk_pciercx_cfg091_t;
+
+static inline uint64_t BDK_PCIERCX_CFG091(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG091(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000016cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000016cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000016cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG091", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG091(a) bdk_pciercx_cfg091_t
+#define bustype_BDK_PCIERCX_CFG091(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG091(a) "PCIERCX_CFG091"
+#define busnum_BDK_PCIERCX_CFG091(a) (a)
+#define arguments_BDK_PCIERCX_CFG091(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg092
+ *
+ * PCIe RC Equalization Control Lane 6/7 Register
+ * This register contains the ninety-third 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg092
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg092_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l7urph : 3; /**< [ 30: 28](RO/WRSL/H) Lane 7 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l7utp : 4; /**< [ 27: 24](RO/WRSL/H) Lane 7 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t l7drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 7 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l7dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 7 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_15 : 1;
+ uint32_t l6urph : 3; /**< [ 14: 12](RO/WRSL/H) Lane 6 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l6utp : 4; /**< [ 11: 8](RO/WRSL/H) Lane 6 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t l6drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 6 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l6dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 6 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t l6dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 6 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l6drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 6 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t l6utp : 4; /**< [ 11: 8](RO/WRSL/H) Lane 6 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t l6urph : 3; /**< [ 14: 12](RO/WRSL/H) Lane 6 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_15 : 1;
+ uint32_t l7dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 7 downstream component transmitter preset. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t l7drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 7 downstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t l7utp : 4; /**< [ 27: 24](RO/WRSL/H) Lane 7 upstream component transmitter preset. Writable through PEM()_CFG_WR. However,
+ the application must not change this field. */
+ uint32_t l7urph : 3; /**< [ 30: 28](RO/WRSL/H) Lane 7 upstream component receiver preset hint. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg092_s cn81xx; */
+ /* struct bdk_pciercx_cfg092_s cn88xx; */
+ struct bdk_pciercx_cfg092_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l7urph : 3; /**< [ 30: 28](RO/WRSL) Lane 7 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l7utp : 4; /**< [ 27: 24](RO/WRSL) Lane 7 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l7drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 7 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l7dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 7 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l6urph : 3; /**< [ 14: 12](RO/WRSL) Lane 6 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l6utp : 4; /**< [ 11: 8](RO/WRSL) Lane 6 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l6drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 6 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l6dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 6 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l6dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 6 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l6drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 6 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l6utp : 4; /**< [ 11: 8](RO/WRSL) Lane 6 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l6urph : 3; /**< [ 14: 12](RO/WRSL) Lane 6 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l7dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 7 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l7drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 7 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l7utp : 4; /**< [ 27: 24](RO/WRSL) Lane 7 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l7urph : 3; /**< [ 30: 28](RO/WRSL) Lane 7 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg092 bdk_pciercx_cfg092_t;
+
+static inline uint64_t BDK_PCIERCX_CFG092(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG092(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000170ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000170ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000170ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG092", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG092(a) bdk_pciercx_cfg092_t
+#define bustype_BDK_PCIERCX_CFG092(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG092(a) "PCIERCX_CFG092"
+#define busnum_BDK_PCIERCX_CFG092(a) (a)
+#define arguments_BDK_PCIERCX_CFG092(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg110
+ *
+ * PCIe RC Vendor Specific RAS DES Capability Header Register
+ * This register contains the one hundred eleventh 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg110
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg110_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset. Points to the Vendor Specific RAS Data Path Protection
+ capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset. Points to the Vendor Specific RAS Data Path Protection
+ capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg110_s cn; */
+};
+typedef union bdk_pciercx_cfg110 bdk_pciercx_cfg110_t;
+
+static inline uint64_t BDK_PCIERCX_CFG110(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG110(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001b8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG110", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG110(a) bdk_pciercx_cfg110_t
+#define bustype_BDK_PCIERCX_CFG110(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG110(a) "PCIERCX_CFG110"
+#define busnum_BDK_PCIERCX_CFG110(a) (a)
+#define arguments_BDK_PCIERCX_CFG110(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg111
+ *
+ * PCIe RC Vendor RAS DES Header Register
+ * This register contains the one hundred twelfth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg111
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg111_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vsec_length : 12; /**< [ 31: 20](RO) VSEC length. */
+ uint32_t vsec_rev : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsec_id : 16; /**< [ 15: 0](RO) PCI Express extended capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t vsec_id : 16; /**< [ 15: 0](RO) PCI Express extended capability. */
+ uint32_t vsec_rev : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsec_length : 12; /**< [ 31: 20](RO) VSEC length. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg111_s cn; */
+};
+typedef union bdk_pciercx_cfg111 bdk_pciercx_cfg111_t;
+
+static inline uint64_t BDK_PCIERCX_CFG111(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG111(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001bcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG111", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG111(a) bdk_pciercx_cfg111_t
+#define bustype_BDK_PCIERCX_CFG111(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG111(a) "PCIERCX_CFG111"
+#define busnum_BDK_PCIERCX_CFG111(a) (a)
+#define arguments_BDK_PCIERCX_CFG111(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg112
+ *
+ * PCIe RC Vendor RAS DES Event Counter Control Register
+ * This register contains the one hundred thirteenth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg112
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg112_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t ev_cntr_data_sel : 12; /**< [ 27: 16](R/W) Event counter data select. This field in conjunction with [EV_CNTR_LANE_SEL]
+ selects PCIERC()_CFG113[EV_CNTR_DATA].
+ _ \<27:24\> = Group number (0..0x7).
+ _ \<23:16\> = Event number (0..0x13). */
+ uint32_t reserved_12_15 : 4;
+ uint32_t ev_cntr_lane_sel : 4; /**< [ 11: 8](R/W) Event counter lane select. This field in conjunction with [EV_CNTR_DATA_SEL]
+ indexes the event counter data returned in the PCIERC()_CFG113[EV_CNTR_DATA].
+
+ 0x0-0x7 = Lane number.
+ 0x8-0xF = Reserved. */
+ uint32_t ev_cntr_stat : 1; /**< [ 7: 7](RO/H) Event counter status. Returns the Enable status of the event counter
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL]. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t ev_cntr_en : 3; /**< [ 4: 2](WO) Event counter enable. Enables/disables the event counter
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL].
+ By default, all event counters are disabled. This field
+ always reads zeros.
+
+ 0x0 = No change.
+ 0x1 = Per event off.
+ 0x2 = No change.
+ 0x3 = Per event on.
+ 0x4 = No change.
+ 0x5 = All off.
+ 0x6 = No change.
+ 0x7 = All on. */
+ uint32_t ev_cntr_clr : 2; /**< [ 1: 0](WO) Event counter clear. Clears the event counters
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL].
+ By default, all event counters are disabled. This field
+ always reads zeros.
+
+ 0x0 = No change.
+ 0x1 = Per clear.
+ 0x2 = No change.
+ 0x3 = All clear. */
+#else /* Word 0 - Little Endian */
+ uint32_t ev_cntr_clr : 2; /**< [ 1: 0](WO) Event counter clear. Clears the event counters
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL].
+ By default, all event counters are disabled. This field
+ always reads zeros.
+
+ 0x0 = No change.
+ 0x1 = Per clear.
+ 0x2 = No change.
+ 0x3 = All clear. */
+ uint32_t ev_cntr_en : 3; /**< [ 4: 2](WO) Event counter enable. Enables/disables the event counter
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL].
+ By default, all event counters are disabled. This field
+ always reads zeros.
+
+ 0x0 = No change.
+ 0x1 = Per event off.
+ 0x2 = No change.
+ 0x3 = Per event on.
+ 0x4 = No change.
+ 0x5 = All off.
+ 0x6 = No change.
+ 0x7 = All on. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t ev_cntr_stat : 1; /**< [ 7: 7](RO/H) Event counter status. Returns the Enable status of the event counter
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL]. */
+ uint32_t ev_cntr_lane_sel : 4; /**< [ 11: 8](R/W) Event counter lane select. This field in conjunction with [EV_CNTR_DATA_SEL]
+ indexes the event counter data returned in the PCIERC()_CFG113[EV_CNTR_DATA].
+
+ 0x0-0x7 = Lane number.
+ 0x8-0xF = Reserved. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t ev_cntr_data_sel : 12; /**< [ 27: 16](R/W) Event counter data select. This field in conjunction with [EV_CNTR_LANE_SEL]
+ selects PCIERC()_CFG113[EV_CNTR_DATA].
+ _ \<27:24\> = Group number (0..0x7).
+ _ \<23:16\> = Event number (0..0x13). */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg112_s cn; */
+};
+typedef union bdk_pciercx_cfg112 bdk_pciercx_cfg112_t;
+
+static inline uint64_t BDK_PCIERCX_CFG112(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG112(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001c0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG112", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG112(a) bdk_pciercx_cfg112_t
+#define bustype_BDK_PCIERCX_CFG112(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG112(a) "PCIERCX_CFG112"
+#define busnum_BDK_PCIERCX_CFG112(a) (a)
+#define arguments_BDK_PCIERCX_CFG112(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg113
+ *
+ * PCIe RC Vendor RAS DES Data Register
+ * This register contains the one hundred fourteenth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg113
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg113_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ev_cntr_data : 32; /**< [ 31: 0](RO) Event counter data. This field returns data selected by PCIERC()_CFG113[EV_CNTR_DATA_SEL]
+ and PCIERC()_CFG113[EV_CNTR_LANE_SEL]. */
+#else /* Word 0 - Little Endian */
+ uint32_t ev_cntr_data : 32; /**< [ 31: 0](RO) Event counter data. This field returns data selected by PCIERC()_CFG113[EV_CNTR_DATA_SEL]
+ and PCIERC()_CFG113[EV_CNTR_LANE_SEL]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg113_s cn; */
+};
+typedef union bdk_pciercx_cfg113 bdk_pciercx_cfg113_t;
+
+static inline uint64_t BDK_PCIERCX_CFG113(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG113(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001c4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG113", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG113(a) bdk_pciercx_cfg113_t
+#define bustype_BDK_PCIERCX_CFG113(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG113(a) "PCIERCX_CFG113"
+#define busnum_BDK_PCIERCX_CFG113(a) (a)
+#define arguments_BDK_PCIERCX_CFG113(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg114
+ *
+ * PCIe RC Vendor RAS DES Time Based Analysis Control Register
+ * This register contains the one hundred fifteenth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg114
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg114_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tbase_rpt_sel : 8; /**< [ 31: 24](R/W) Time-based report select. Selects what type of data is measured for the selected
+ duration.
+ TBASE_DUR_SEL. Data is returned in PCIERC()_CFG115[TBASE_DATA].
+
+ Each type of data is measured using one of three types of units.
+
+ Core clock cycles.
+ 0x0 = Duration of 1 cycle.
+ 0x1 = TxL0s.
+ 0x2 = RxL0s.
+ 0x3 = L0.
+ 0x4 = L1.
+ 0x7 = Configuration/recovery.
+
+ Aux_clk cycles.
+ 0x5 = L1.1.
+ 0x6 = L1.2.
+
+ Data bytes. Actual amount is 16x value.
+ 0x20 = TX TLP Bytes.
+ 0x21 = RX TLP Bytes. */
+ uint32_t reserved_16_23 : 8;
+ uint32_t tbase_dur_sel : 8; /**< [ 15: 8](R/W) Time-based duration select. Selects the duration of time-based
+ analysis.
+
+ 0x0 = Manual control. Analysis controlled by [TIMER_START].
+ 0x1 = 1ms.
+ 0x2 = 10ms.
+ 0x3 = 100ms.
+ 0x4 = 1s.
+ 0x5 = 2s.
+ 0x6 = 4s.
+ 0x7 - 0xF = Reserved. */
+ uint32_t reserved_1_7 : 7;
+ uint32_t timer_start : 1; /**< [ 0: 0](R/W) Timer start.
+
+ 0x0 = Start/Restart
+ 0x1 = Stop.
+
+ This bit will be cleared automatically when the measurement is finished. */
+#else /* Word 0 - Little Endian */
+ uint32_t timer_start : 1; /**< [ 0: 0](R/W) Timer start.
+
+ 0x0 = Start/Restart
+ 0x1 = Stop.
+
+ This bit will be cleared automatically when the measurement is finished. */
+ uint32_t reserved_1_7 : 7;
+ uint32_t tbase_dur_sel : 8; /**< [ 15: 8](R/W) Time-based duration select. Selects the duration of time-based
+ analysis.
+
+ 0x0 = Manual control. Analysis controlled by [TIMER_START].
+ 0x1 = 1ms.
+ 0x2 = 10ms.
+ 0x3 = 100ms.
+ 0x4 = 1s.
+ 0x5 = 2s.
+ 0x6 = 4s.
+ 0x7 - 0xF = Reserved. */
+ uint32_t reserved_16_23 : 8;
+ uint32_t tbase_rpt_sel : 8; /**< [ 31: 24](R/W) Time-based report select. Selects what type of data is measured for the selected
+ duration.
+ TBASE_DUR_SEL. Data is returned in PCIERC()_CFG115[TBASE_DATA].
+
+ Each type of data is measured using one of three types of units.
+
+ Core clock cycles.
+ 0x0 = Duration of 1 cycle.
+ 0x1 = TxL0s.
+ 0x2 = RxL0s.
+ 0x3 = L0.
+ 0x4 = L1.
+ 0x7 = Configuration/recovery.
+
+ Aux_clk cycles.
+ 0x5 = L1.1.
+ 0x6 = L1.2.
+
+ Data bytes. Actual amount is 16x value.
+ 0x20 = TX TLP Bytes.
+ 0x21 = RX TLP Bytes. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg114_s cn; */
+};
+typedef union bdk_pciercx_cfg114 bdk_pciercx_cfg114_t;
+
+static inline uint64_t BDK_PCIERCX_CFG114(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG114(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001c8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG114", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG114(a) bdk_pciercx_cfg114_t
+#define bustype_BDK_PCIERCX_CFG114(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG114(a) "PCIERCX_CFG114"
+#define busnum_BDK_PCIERCX_CFG114(a) (a)
+#define arguments_BDK_PCIERCX_CFG114(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg115
+ *
+ * PCIe RC Vendor RAS DES Time Based Analysis Data Register
+ * This register contains the one hundred sixteenth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg115
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg115_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tbase_data : 32; /**< [ 31: 0](RO/H) Time-based analysis data. This register returns data selected in the
+ PCIERC()_CFG114[TBASE_RPT_SEL] field. The results are cleared when
+ the next measurement starts. */
+#else /* Word 0 - Little Endian */
+ uint32_t tbase_data : 32; /**< [ 31: 0](RO/H) Time-based analysis data. This register returns data selected in the
+ PCIERC()_CFG114[TBASE_RPT_SEL] field. The results are cleared when
+ the next measurement starts. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg115_s cn; */
+};
+typedef union bdk_pciercx_cfg115 bdk_pciercx_cfg115_t;
+
+static inline uint64_t BDK_PCIERCX_CFG115(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG115(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001ccll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG115", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG115(a) bdk_pciercx_cfg115_t
+#define bustype_BDK_PCIERCX_CFG115(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG115(a) "PCIERCX_CFG115"
+#define busnum_BDK_PCIERCX_CFG115(a) (a)
+#define arguments_BDK_PCIERCX_CFG115(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg121
+ *
+ * PCIe RC Vendor RAS DES Error Injection Enable Register
+ * This register contains the one hundred twenty-first 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg121
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg121_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_7_31 : 25;
+ uint32_t einj6_en : 1; /**< [ 6: 6](R/W) Specific TLP error injection enable. Enables insertion of errors into the
+ packet selected. For more details, refer to PCIERC()_CFG128. */
+ uint32_t einj5_en : 1; /**< [ 5: 5](R/W) TLP duplicate/nullify error injection enable. Enables insertion of duplicate/nullified
+ TLPs. For more details, refer to PCIERC()_CFG127. */
+ uint32_t einj4_en : 1; /**< [ 4: 4](R/W) FC credit update error injection enable. Enables insertion of errors into
+ Updated FCs. See PCIERC()_CFG126. */
+ uint32_t einj3_en : 1; /**< [ 3: 3](R/W) Symbol datak mask or sync header error enable. Enables data masking of special
+ symbols or the breaking of the sync header. See PCIERC()_CFG125. */
+ uint32_t einj2_en : 1; /**< [ 2: 2](R/W) DLLP error injection enable. enables insertion of DLLP errors.
+ See PCIERC()_CFG124. */
+ uint32_t einj1_en : 1; /**< [ 1: 1](R/W) Sequence number error injection enable. Enables insertion of errors into
+ sequence numbers.
+ See PCIERC()_CFG123. */
+ uint32_t einj0_en : 1; /**< [ 0: 0](R/W) CRC error injection enable. Enables insertion of errors into various CRC.
+ See PCIERC()_CFG122. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj0_en : 1; /**< [ 0: 0](R/W) CRC error injection enable. Enables insertion of errors into various CRC.
+ See PCIERC()_CFG122. */
+ uint32_t einj1_en : 1; /**< [ 1: 1](R/W) Sequence number error injection enable. Enables insertion of errors into
+ sequence numbers.
+ See PCIERC()_CFG123. */
+ uint32_t einj2_en : 1; /**< [ 2: 2](R/W) DLLP error injection enable. enables insertion of DLLP errors.
+ See PCIERC()_CFG124. */
+ uint32_t einj3_en : 1; /**< [ 3: 3](R/W) Symbol datak mask or sync header error enable. Enables data masking of special
+ symbols or the breaking of the sync header. See PCIERC()_CFG125. */
+ uint32_t einj4_en : 1; /**< [ 4: 4](R/W) FC credit update error injection enable. Enables insertion of errors into
+ Updated FCs. See PCIERC()_CFG126. */
+ uint32_t einj5_en : 1; /**< [ 5: 5](R/W) TLP duplicate/nullify error injection enable. Enables insertion of duplicate/nullified
+ TLPs. For more details, refer to PCIERC()_CFG127. */
+ uint32_t einj6_en : 1; /**< [ 6: 6](R/W) Specific TLP error injection enable. Enables insertion of errors into the
+ packet selected. For more details, refer to PCIERC()_CFG128. */
+ uint32_t reserved_7_31 : 25;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg121_s cn; */
+};
+typedef union bdk_pciercx_cfg121 bdk_pciercx_cfg121_t;
+
+static inline uint64_t BDK_PCIERCX_CFG121(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG121(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001e8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG121", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG121(a) bdk_pciercx_cfg121_t
+#define bustype_BDK_PCIERCX_CFG121(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG121(a) "PCIERCX_CFG121"
+#define busnum_BDK_PCIERCX_CFG121(a) (a)
+#define arguments_BDK_PCIERCX_CFG121(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg122
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 0 (CRC) Register
+ * This register contains the one hundred twenty-third 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg122
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg122_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t einj0_crc_type : 4; /**< [ 11: 8](R/W) Error injection type. Selects the type of CRC error tp in inserted.
+
+ TX path:
+ 0x0 = New TLP's LCRC error injection.
+ 0x1 = 16bCRC error injection of ACK/NAK DLLP.
+ 0x2 = 16bCRC error injection of Update-FC DLLP.
+ 0x3 = New TLP's ECRC error injection.
+ 0x4 = TLP's FCRC error injection (128b/130b).
+ 0x5 = Parity error of TSOS (128b/130b).
+ 0x6 = Parity error of SKPOS (128b/130b).
+ 0x7 = Reserved.
+
+ RX Path:
+ 0x8 = LCRC error injection.
+ 0x9 = ECRC error injection.
+ 0xA - 0xF = Reserved. */
+ uint32_t einj0_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG116[EINJ0_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG121[EINJ0_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ0_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj0_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG116[EINJ0_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG121[EINJ0_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ0_EN] is cleared. */
+ uint32_t einj0_crc_type : 4; /**< [ 11: 8](R/W) Error injection type. Selects the type of CRC error tp in inserted.
+
+ TX path:
+ 0x0 = New TLP's LCRC error injection.
+ 0x1 = 16bCRC error injection of ACK/NAK DLLP.
+ 0x2 = 16bCRC error injection of Update-FC DLLP.
+ 0x3 = New TLP's ECRC error injection.
+ 0x4 = TLP's FCRC error injection (128b/130b).
+ 0x5 = Parity error of TSOS (128b/130b).
+ 0x6 = Parity error of SKPOS (128b/130b).
+ 0x7 = Reserved.
+
+ RX Path:
+ 0x8 = LCRC error injection.
+ 0x9 = ECRC error injection.
+ 0xA - 0xF = Reserved. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg122_s cn; */
+};
+typedef union bdk_pciercx_cfg122 bdk_pciercx_cfg122_t;
+
+static inline uint64_t BDK_PCIERCX_CFG122(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG122(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001ecll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG122", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG122(a) bdk_pciercx_cfg122_t
+#define bustype_BDK_PCIERCX_CFG122(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG122(a) "PCIERCX_CFG122"
+#define busnum_BDK_PCIERCX_CFG122(a) (a)
+#define arguments_BDK_PCIERCX_CFG122(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg123
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 1 (SEQNUM) Register
+ * This register contains the one hundred twenty-fourth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg123
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg123_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t einj1_bad_seqnum : 13; /**< [ 28: 16](R/W) Bad sequence number. Indicates the value to add/subtract
+ from the naturally-assigned sequence numbers. This value is
+ represented by two's complement.
+
+ 0x0FFF = +4095.
+
+ 0x0002 = +2.
+ 0x0001 = +1.
+ 0x0000 = 0.
+ 0x1FFF = -1.
+ 0x1FFE = -2.
+
+ 0x1001 = -4095. */
+ uint32_t reserved_9_15 : 7;
+ uint32_t einj1_seqnum_type : 1; /**< [ 8: 8](R/W) Sequence number type. Selects the type of sequence number.
+
+ 0x0 = Insertion of New TLP's SEQ error.
+ 0x1 = Insertion of ACK/NAK DLLP's SEQ error. */
+ uint32_t einj1_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG121[EINJ1_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG121[EINJ1_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ1_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj1_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG121[EINJ1_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG121[EINJ1_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ1_EN] is cleared. */
+ uint32_t einj1_seqnum_type : 1; /**< [ 8: 8](R/W) Sequence number type. Selects the type of sequence number.
+
+ 0x0 = Insertion of New TLP's SEQ error.
+ 0x1 = Insertion of ACK/NAK DLLP's SEQ error. */
+ uint32_t reserved_9_15 : 7;
+ uint32_t einj1_bad_seqnum : 13; /**< [ 28: 16](R/W) Bad sequence number. Indicates the value to add/subtract
+ from the naturally-assigned sequence numbers. This value is
+ represented by two's complement.
+
+ 0x0FFF = +4095.
+
+ 0x0002 = +2.
+ 0x0001 = +1.
+ 0x0000 = 0.
+ 0x1FFF = -1.
+ 0x1FFE = -2.
+
+ 0x1001 = -4095. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg123_s cn; */
+};
+typedef union bdk_pciercx_cfg123 bdk_pciercx_cfg123_t;
+
+static inline uint64_t BDK_PCIERCX_CFG123(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG123(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001f0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG123", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG123(a) bdk_pciercx_cfg123_t
+#define bustype_BDK_PCIERCX_CFG123(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG123(a) "PCIERCX_CFG123"
+#define busnum_BDK_PCIERCX_CFG123(a) (a)
+#define arguments_BDK_PCIERCX_CFG123(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg124
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 2 (DLLP) Register
+ * This register contains the one hundred twenty-fifth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg124
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg124_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t einj2_dllp_type : 2; /**< [ 9: 8](R/W) DLLP type. Selects the type of DLLP errors to be inserted.
+
+ 0x0 = ACK/NAK DLLP transmission block.
+ 0x1 = Update FC DLLP's transmission block.
+ 0x2 = Always transmission for NAK DLLP.
+ 0x3 = Reserved. */
+ uint32_t einj2_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG121[EINJ2_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG121[EINJ2_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ2_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj2_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG121[EINJ2_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG121[EINJ2_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ2_EN] is cleared. */
+ uint32_t einj2_dllp_type : 2; /**< [ 9: 8](R/W) DLLP type. Selects the type of DLLP errors to be inserted.
+
+ 0x0 = ACK/NAK DLLP transmission block.
+ 0x1 = Update FC DLLP's transmission block.
+ 0x2 = Always transmission for NAK DLLP.
+ 0x3 = Reserved. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg124_s cn; */
+};
+typedef union bdk_pciercx_cfg124 bdk_pciercx_cfg124_t;
+
+static inline uint64_t BDK_PCIERCX_CFG124(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG124(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001f4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG124", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG124(a) bdk_pciercx_cfg124_t
+#define bustype_BDK_PCIERCX_CFG124(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG124(a) "PCIERCX_CFG124"
+#define busnum_BDK_PCIERCX_CFG124(a) (a)
+#define arguments_BDK_PCIERCX_CFG124(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg125
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 3 (Symbol) Register
+ * This register contains the one hundred twenty-sixth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg125
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg125_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_11_31 : 21;
+ uint32_t einj3_symbol_type : 3; /**< [ 10: 8](R/W) Error type, 8 b/10 b encoding - Mask K symbol.
+
+ 0x0 = Reserved.
+ 0x1 = COM/PAD(TS1 Order Set).
+ 0x2 = COM/PAD(TS2 Order Set).
+ 0x3 = COM/FTS(FTS Order Set).
+ 0x4 = COM/IDLE(E-Idle Order Set).
+ 0x5 = END/EDB Symbol.
+ 0x6 = STP/SDP Symbol.
+ 0x7 = COM/SKP(SKP Order set). */
+ uint32_t einj3_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG121[EINJ3_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG121[EINJ3_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ3_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj3_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG121[EINJ3_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG121[EINJ3_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ3_EN] is cleared. */
+ uint32_t einj3_symbol_type : 3; /**< [ 10: 8](R/W) Error type, 8 b/10 b encoding - Mask K symbol.
+
+ 0x0 = Reserved.
+ 0x1 = COM/PAD(TS1 Order Set).
+ 0x2 = COM/PAD(TS2 Order Set).
+ 0x3 = COM/FTS(FTS Order Set).
+ 0x4 = COM/IDLE(E-Idle Order Set).
+ 0x5 = END/EDB Symbol.
+ 0x6 = STP/SDP Symbol.
+ 0x7 = COM/SKP(SKP Order set). */
+ uint32_t reserved_11_31 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg125_s cn; */
+};
+typedef union bdk_pciercx_cfg125 bdk_pciercx_cfg125_t;
+
+static inline uint64_t BDK_PCIERCX_CFG125(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG125(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001f8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG125", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG125(a) bdk_pciercx_cfg125_t
+#define bustype_BDK_PCIERCX_CFG125(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG125(a) "PCIERCX_CFG125"
+#define busnum_BDK_PCIERCX_CFG125(a) (a)
+#define arguments_BDK_PCIERCX_CFG125(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg126
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 4 (FC Credit) Register
+ * This register contains the one hundred twenty-seventh 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg126
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg126_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t einj4_bad_updfc_val : 13; /**< [ 28: 16](R/W) Bad update-FC credit value. Indicates the value to add/subtract
+ from the UpdateFC credit. The value is represented by two's
+ compliment.
+
+ 0x0FFF = +4095.
+
+ 0x0002 = +2.
+ 0x0001 = +1.
+ 0x0000 = 0.
+ 0x1FFF = -1.
+ 0x1FFE = -2.
+
+ 0x1001 = -4095. */
+ uint32_t reserved_15 : 1;
+ uint32_t einj4_vc_num : 3; /**< [ 14: 12](R/W) VC number. Indicates the target VC Number. */
+ uint32_t reserved_11 : 1;
+ uint32_t einj4_vc_type : 3; /**< [ 10: 8](R/W) Update-FC type. Selects the credit type.
+
+ 0x0 = Posted TLP header credit value control.
+ 0x1 = Non-Posted TLP header credit value control.
+ 0x2 = Completion TLP header credit value control.
+ 0x3 = Reserved.
+ 0x4 = Posted TLP data credit value control.
+ 0x5 = Non-Posted TLP data credit value control.
+ 0x6 = Completion TLP data credit value control.
+ 0x7 = Reserved. */
+ uint32_t einj4_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG121[EINJ4_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG116[EINJ4_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ4_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj4_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG121[EINJ4_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG116[EINJ4_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ4_EN] is cleared. */
+ uint32_t einj4_vc_type : 3; /**< [ 10: 8](R/W) Update-FC type. Selects the credit type.
+
+ 0x0 = Posted TLP header credit value control.
+ 0x1 = Non-Posted TLP header credit value control.
+ 0x2 = Completion TLP header credit value control.
+ 0x3 = Reserved.
+ 0x4 = Posted TLP data credit value control.
+ 0x5 = Non-Posted TLP data credit value control.
+ 0x6 = Completion TLP data credit value control.
+ 0x7 = Reserved. */
+ uint32_t reserved_11 : 1;
+ uint32_t einj4_vc_num : 3; /**< [ 14: 12](R/W) VC number. Indicates the target VC Number. */
+ uint32_t reserved_15 : 1;
+ uint32_t einj4_bad_updfc_val : 13; /**< [ 28: 16](R/W) Bad update-FC credit value. Indicates the value to add/subtract
+ from the UpdateFC credit. The value is represented by two's
+ compliment.
+
+ 0x0FFF = +4095.
+
+ 0x0002 = +2.
+ 0x0001 = +1.
+ 0x0000 = 0.
+ 0x1FFF = -1.
+ 0x1FFE = -2.
+
+ 0x1001 = -4095. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg126_s cn; */
+};
+typedef union bdk_pciercx_cfg126 bdk_pciercx_cfg126_t;
+
+static inline uint64_t BDK_PCIERCX_CFG126(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG126(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000001fcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG126", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG126(a) bdk_pciercx_cfg126_t
+#define bustype_BDK_PCIERCX_CFG126(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG126(a) "PCIERCX_CFG126"
+#define busnum_BDK_PCIERCX_CFG126(a) (a)
+#define arguments_BDK_PCIERCX_CFG126(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg127
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 5 (Specific TLP) Register
+ * This register contains the one hundred twenty-eighth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg127
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg127_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t einj5_sp_tlp : 1; /**< [ 8: 8](R/W) Specified TLP. Selects the specified TLP to be inserted.
+
+ 0x0 = Generates duplicate TLPs by handling ACK DLLP as NAK DLLP.
+ 0x1 = Generates nullified TLP (Original TLP will be stored in retry buffer). */
+ uint32_t einj5_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG121[EINJ5_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG121[EINJ5_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ5_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj5_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG121[EINJ5_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG121[EINJ5_EN] is set,
+ errors are inserted until PCIERC()_CFG121[EINJ5_EN] is cleared. */
+ uint32_t einj5_sp_tlp : 1; /**< [ 8: 8](R/W) Specified TLP. Selects the specified TLP to be inserted.
+
+ 0x0 = Generates duplicate TLPs by handling ACK DLLP as NAK DLLP.
+ 0x1 = Generates nullified TLP (Original TLP will be stored in retry buffer). */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg127_s cn; */
+};
+typedef union bdk_pciercx_cfg127 bdk_pciercx_cfg127_t;
+
+static inline uint64_t BDK_PCIERCX_CFG127(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG127(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000200ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG127", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG127(a) bdk_pciercx_cfg127_t
+#define bustype_BDK_PCIERCX_CFG127(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG127(a) "PCIERCX_CFG127"
+#define busnum_BDK_PCIERCX_CFG127(a) (a)
+#define arguments_BDK_PCIERCX_CFG127(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg128
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Point H0) Register
+ * This register contains the one hundred twenty-ninth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg128
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg128_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg128_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset. Points to the secondary PCI Express capabilities by default.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset. Points to the secondary PCI Express capabilities by default.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg128_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_pt_h0 : 32; /**< [ 31: 0](R/W) Packet compare point first DWORD.
+ Specifies which TX TLP header DWORD0 bits to compare
+ with the corresponding bits in PCIERC()_CFG127[EIN6_COM_VAL_H0].
+ When all specified bits (in the TX TLP header and
+ PCIERC()_CFG127[EIN6_COM_VAL_H0] match, an error is inserted into the TLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_pt_h0 : 32; /**< [ 31: 0](R/W) Packet compare point first DWORD.
+ Specifies which TX TLP header DWORD0 bits to compare
+ with the corresponding bits in PCIERC()_CFG127[EIN6_COM_VAL_H0].
+ When all specified bits (in the TX TLP header and
+ PCIERC()_CFG127[EIN6_COM_VAL_H0] match, an error is inserted into the TLP. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg128 bdk_pciercx_cfg128_t;
+
+static inline uint64_t BDK_PCIERCX_CFG128(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG128(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000200ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000204ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG128", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG128(a) bdk_pciercx_cfg128_t
+#define bustype_BDK_PCIERCX_CFG128(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG128(a) "PCIERCX_CFG128"
+#define busnum_BDK_PCIERCX_CFG128(a) (a)
+#define arguments_BDK_PCIERCX_CFG128(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg129
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Point H1) Register
+ * This register contains the one hundred thirtyith 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg129
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg129_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg129_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t dte : 1; /**< [ 22: 22](R/W) ACS direct translated P2P enable. */
+ uint32_t ece : 1; /**< [ 21: 21](R/W) ACS P2P egress control enable. */
+ uint32_t ufe : 1; /**< [ 20: 20](R/W) ACS upstream forwarding enable. */
+ uint32_t cre : 1; /**< [ 19: 19](R/W) ACS P2P completion redirect enable. */
+ uint32_t rre : 1; /**< [ 18: 18](R/W) ACS P2P request redirect enable. */
+ uint32_t tbe : 1; /**< [ 17: 17](R/W) ACS translation blocking enable. */
+ uint32_t sve : 1; /**< [ 16: 16](R/W) ACS source validation enable. */
+ uint32_t ecvs : 8; /**< [ 15: 8](RO/WRSL) Egress control vector size.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t dt : 1; /**< [ 6: 6](RO/WRSL) ACS direct translated P2P.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t ec : 1; /**< [ 5: 5](RO/WRSL) ACS P2P egress control.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t uf : 1; /**< [ 4: 4](RO/WRSL) ACS upstream forwarding.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cr : 1; /**< [ 3: 3](RO/WRSL) ACS P2P completion redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t rr : 1; /**< [ 2: 2](RO/WRSL) ACS P2P request redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t tb : 1; /**< [ 1: 1](RO/WRSL) ACS translation blocking.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t sv : 1; /**< [ 0: 0](RO/WRSL) ACS source validation.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sv : 1; /**< [ 0: 0](RO/WRSL) ACS source validation.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t tb : 1; /**< [ 1: 1](RO/WRSL) ACS translation blocking.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t rr : 1; /**< [ 2: 2](RO/WRSL) ACS P2P request redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cr : 1; /**< [ 3: 3](RO/WRSL) ACS P2P completion redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t uf : 1; /**< [ 4: 4](RO/WRSL) ACS upstream forwarding.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t ec : 1; /**< [ 5: 5](RO/WRSL) ACS P2P egress control.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t dt : 1; /**< [ 6: 6](RO/WRSL) ACS direct translated P2P.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t ecvs : 8; /**< [ 15: 8](RO/WRSL) Egress control vector size.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t sve : 1; /**< [ 16: 16](R/W) ACS source validation enable. */
+ uint32_t tbe : 1; /**< [ 17: 17](R/W) ACS translation blocking enable. */
+ uint32_t rre : 1; /**< [ 18: 18](R/W) ACS P2P request redirect enable. */
+ uint32_t cre : 1; /**< [ 19: 19](R/W) ACS P2P completion redirect enable. */
+ uint32_t ufe : 1; /**< [ 20: 20](R/W) ACS upstream forwarding enable. */
+ uint32_t ece : 1; /**< [ 21: 21](R/W) ACS P2P egress control enable. */
+ uint32_t dte : 1; /**< [ 22: 22](R/W) ACS direct translated P2P enable. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg129_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_pt_h1 : 32; /**< [ 31: 0](R/W) Packet compare point second DWORD.
+ Specifies which TX TLP header DWORD1 bits to compare
+ with the corresponding bits in PCIERC()_CFG134[EIN6_COM_VAL_H1].
+ When all specified bits (in the TX TLP header and
+ PCIERC()_CFG134[EIN6_COM_VAL_H1] match, an error is inserted into the TLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_pt_h1 : 32; /**< [ 31: 0](R/W) Packet compare point second DWORD.
+ Specifies which TX TLP header DWORD1 bits to compare
+ with the corresponding bits in PCIERC()_CFG134[EIN6_COM_VAL_H1].
+ When all specified bits (in the TX TLP header and
+ PCIERC()_CFG134[EIN6_COM_VAL_H1] match, an error is inserted into the TLP. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg129 bdk_pciercx_cfg129_t;
+
+static inline uint64_t BDK_PCIERCX_CFG129(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG129(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000204ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000208ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG129", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG129(a) bdk_pciercx_cfg129_t
+#define bustype_BDK_PCIERCX_CFG129(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG129(a) "PCIERCX_CFG129"
+#define busnum_BDK_PCIERCX_CFG129(a) (a)
+#define arguments_BDK_PCIERCX_CFG129(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg130
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Point H2) Register
+ * This register contains the one hundred thirty-first 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg130
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg130_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg130_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ecv : 32; /**< [ 31: 0](R/W) Egress control vector. */
+#else /* Word 0 - Little Endian */
+ uint32_t ecv : 32; /**< [ 31: 0](R/W) Egress control vector. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg130_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_pt_h2 : 32; /**< [ 31: 0](R/W) Packet compare point third DWORD.
+ Specifies which TX TLP header DWORD2 bits to compare
+ with the corresponding bits in PCIERC()_CFG134[EIN6_COM_VAL_H2].
+ When all specified bits (in the TX TLP header and
+ PCIERC()_CFG134[EIN6_COM_VAL_H2] match, an error is inserted into the TLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_pt_h2 : 32; /**< [ 31: 0](R/W) Packet compare point third DWORD.
+ Specifies which TX TLP header DWORD2 bits to compare
+ with the corresponding bits in PCIERC()_CFG134[EIN6_COM_VAL_H2].
+ When all specified bits (in the TX TLP header and
+ PCIERC()_CFG134[EIN6_COM_VAL_H2] match, an error is inserted into the TLP. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg130 bdk_pciercx_cfg130_t;
+
+static inline uint64_t BDK_PCIERCX_CFG130(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG130(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000208ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000020cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG130", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG130(a) bdk_pciercx_cfg130_t
+#define bustype_BDK_PCIERCX_CFG130(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG130(a) "PCIERCX_CFG130"
+#define busnum_BDK_PCIERCX_CFG130(a) (a)
+#define arguments_BDK_PCIERCX_CFG130(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg131
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Point H3) Register
+ * This register contains the one hundred thirty-second 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg131
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg131_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_pt_h3 : 32; /**< [ 31: 0](R/W) Packet compare point fourth DWORD.
+ Specifies which TX TLP header DWORD3 bits to compare
+ with the corresponding bits in PCIERC()_CFG135[EIN6_COM_VAL_H3].
+ When all specified bits (in the TX TLP header and
+ PCIERC()_CFG135[EIN6_COM_VAL_H3] match, an error is inserted into the TLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_pt_h3 : 32; /**< [ 31: 0](R/W) Packet compare point fourth DWORD.
+ Specifies which TX TLP header DWORD3 bits to compare
+ with the corresponding bits in PCIERC()_CFG135[EIN6_COM_VAL_H3].
+ When all specified bits (in the TX TLP header and
+ PCIERC()_CFG135[EIN6_COM_VAL_H3] match, an error is inserted into the TLP. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg131_s cn; */
+};
+typedef union bdk_pciercx_cfg131 bdk_pciercx_cfg131_t;
+
+static inline uint64_t BDK_PCIERCX_CFG131(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG131(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000210ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG131", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG131(a) bdk_pciercx_cfg131_t
+#define bustype_BDK_PCIERCX_CFG131(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG131(a) "PCIERCX_CFG131"
+#define busnum_BDK_PCIERCX_CFG131(a) (a)
+#define arguments_BDK_PCIERCX_CFG131(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg132
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Value H0) Register
+ * This register contains the one hundred thirty-third 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg132
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg132_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_val_h0 : 32; /**< [ 31: 0](R/W) Packet compare value first DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD0 bits specified in PCIERC()_CFG128[EINJ_COM_PT_H0]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_val_h0 : 32; /**< [ 31: 0](R/W) Packet compare value first DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD0 bits specified in PCIERC()_CFG128[EINJ_COM_PT_H0]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg132_s cn; */
+};
+typedef union bdk_pciercx_cfg132 bdk_pciercx_cfg132_t;
+
+static inline uint64_t BDK_PCIERCX_CFG132(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG132(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000214ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG132", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG132(a) bdk_pciercx_cfg132_t
+#define bustype_BDK_PCIERCX_CFG132(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG132(a) "PCIERCX_CFG132"
+#define busnum_BDK_PCIERCX_CFG132(a) (a)
+#define arguments_BDK_PCIERCX_CFG132(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg133
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Value H1) Register
+ * This register contains the one hundred thirty-forth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg133
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg133_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_val_h1 : 32; /**< [ 31: 0](R/W) Packet compare value second DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD1 bits specified in PCIERC()_CFG129[EINJ_COM_PT_H1]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_val_h1 : 32; /**< [ 31: 0](R/W) Packet compare value second DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD1 bits specified in PCIERC()_CFG129[EINJ_COM_PT_H1]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg133_s cn; */
+};
+typedef union bdk_pciercx_cfg133 bdk_pciercx_cfg133_t;
+
+static inline uint64_t BDK_PCIERCX_CFG133(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG133(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000218ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG133", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG133(a) bdk_pciercx_cfg133_t
+#define bustype_BDK_PCIERCX_CFG133(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG133(a) "PCIERCX_CFG133"
+#define busnum_BDK_PCIERCX_CFG133(a) (a)
+#define arguments_BDK_PCIERCX_CFG133(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg134
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Value H2) Register
+ * This register contains the one hundred thirty-fifth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg134
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg134_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_val_h2 : 32; /**< [ 31: 0](R/W) Packet compare value third DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD2 bits specified in the PCIERC()_CFG130[EINJ_COM_PT_H3]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_val_h2 : 32; /**< [ 31: 0](R/W) Packet compare value third DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD2 bits specified in the PCIERC()_CFG130[EINJ_COM_PT_H3]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg134_s cn; */
+};
+typedef union bdk_pciercx_cfg134 bdk_pciercx_cfg134_t;
+
+static inline uint64_t BDK_PCIERCX_CFG134(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG134(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000021cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG134", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG134(a) bdk_pciercx_cfg134_t
+#define bustype_BDK_PCIERCX_CFG134(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG134(a) "PCIERCX_CFG134"
+#define busnum_BDK_PCIERCX_CFG134(a) (a)
+#define arguments_BDK_PCIERCX_CFG134(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg135
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Value H3) Register
+ * This register contains the one hundred thirty-ssixth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg135
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg135_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_val_h3 : 32; /**< [ 31: 0](R/W) Packet compare value fourth DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD3 bits specified in the PCIERC()_CFG131[EINJ_COM_PT_H4]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_val_h3 : 32; /**< [ 31: 0](R/W) Packet compare value fourth DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD3 bits specified in the PCIERC()_CFG131[EINJ_COM_PT_H4]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg135_s cn; */
+};
+typedef union bdk_pciercx_cfg135 bdk_pciercx_cfg135_t;
+
+static inline uint64_t BDK_PCIERCX_CFG135(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG135(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000220ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG135", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG135(a) bdk_pciercx_cfg135_t
+#define bustype_BDK_PCIERCX_CFG135(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG135(a) "PCIERCX_CFG135"
+#define busnum_BDK_PCIERCX_CFG135(a) (a)
+#define arguments_BDK_PCIERCX_CFG135(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg136
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Point H0) Register
+ * This register contains the one hundred thirty-seventh 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg136
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg136_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_pt_h0 : 32; /**< [ 31: 0](R/W) Packet change point first DWORD.
+ Specifies which TX TLP header DWORD0 bits to replace
+ with the corresponding bits in PCIERC()_CFG140[EINJ6_CHG_VAL_H0]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_pt_h0 : 32; /**< [ 31: 0](R/W) Packet change point first DWORD.
+ Specifies which TX TLP header DWORD0 bits to replace
+ with the corresponding bits in PCIERC()_CFG140[EINJ6_CHG_VAL_H0]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg136_s cn; */
+};
+typedef union bdk_pciercx_cfg136 bdk_pciercx_cfg136_t;
+
+static inline uint64_t BDK_PCIERCX_CFG136(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG136(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000224ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG136", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG136(a) bdk_pciercx_cfg136_t
+#define bustype_BDK_PCIERCX_CFG136(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG136(a) "PCIERCX_CFG136"
+#define busnum_BDK_PCIERCX_CFG136(a) (a)
+#define arguments_BDK_PCIERCX_CFG136(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg137
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Point H1) Register
+ * This register contains the one hundred thirty-eighth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg137
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg137_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_pt_h1 : 32; /**< [ 31: 0](R/W) Packet change point second DWORD.
+ Specifies which TX TLP header DWORD0 bits to replace
+ with the corresponding bits in PCIERC()_CFG141[EINJ6_CHG_VAL_H1]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_pt_h1 : 32; /**< [ 31: 0](R/W) Packet change point second DWORD.
+ Specifies which TX TLP header DWORD0 bits to replace
+ with the corresponding bits in PCIERC()_CFG141[EINJ6_CHG_VAL_H1]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg137_s cn; */
+};
+typedef union bdk_pciercx_cfg137 bdk_pciercx_cfg137_t;
+
+static inline uint64_t BDK_PCIERCX_CFG137(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG137(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000228ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG137", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG137(a) bdk_pciercx_cfg137_t
+#define bustype_BDK_PCIERCX_CFG137(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG137(a) "PCIERCX_CFG137"
+#define busnum_BDK_PCIERCX_CFG137(a) (a)
+#define arguments_BDK_PCIERCX_CFG137(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg138
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Point H2) Register
+ * This register contains the one hundred thirty-ninth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg138
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg138_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_pt_h2 : 32; /**< [ 31: 0](R/W) Packet change point third DWORD.
+ Specifies which TX TLP header DWORD2 bits to replace
+ with the corresponding bits in PCIERC()_CFG142[EINJ6_CHG_VAL_H2]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_pt_h2 : 32; /**< [ 31: 0](R/W) Packet change point third DWORD.
+ Specifies which TX TLP header DWORD2 bits to replace
+ with the corresponding bits in PCIERC()_CFG142[EINJ6_CHG_VAL_H2]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg138_s cn; */
+};
+typedef union bdk_pciercx_cfg138 bdk_pciercx_cfg138_t;
+
+static inline uint64_t BDK_PCIERCX_CFG138(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG138(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000022cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG138", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG138(a) bdk_pciercx_cfg138_t
+#define bustype_BDK_PCIERCX_CFG138(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG138(a) "PCIERCX_CFG138"
+#define busnum_BDK_PCIERCX_CFG138(a) (a)
+#define arguments_BDK_PCIERCX_CFG138(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg139
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Point H3) Register
+ * This register contains the one hundred fortieth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg139
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg139_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_pt_h3 : 32; /**< [ 31: 0](R/W) Packet change point first DWORD.
+ Specifies which TX TLP header DWORD3 bits to replace
+ with the corresponding bits in PCIERC()_CFG143[EINJ6_CHG_VAL_H3]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_pt_h3 : 32; /**< [ 31: 0](R/W) Packet change point first DWORD.
+ Specifies which TX TLP header DWORD3 bits to replace
+ with the corresponding bits in PCIERC()_CFG143[EINJ6_CHG_VAL_H3]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg139_s cn; */
+};
+typedef union bdk_pciercx_cfg139 bdk_pciercx_cfg139_t;
+
+static inline uint64_t BDK_PCIERCX_CFG139(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG139(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000230ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG139", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG139(a) bdk_pciercx_cfg139_t
+#define bustype_BDK_PCIERCX_CFG139(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG139(a) "PCIERCX_CFG139"
+#define busnum_BDK_PCIERCX_CFG139(a) (a)
+#define arguments_BDK_PCIERCX_CFG139(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg140
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Value H0) Register
+ * This register contains the one hundred forty-first 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg140
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg140_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_val_h0 : 32; /**< [ 31: 0](R/W) Packet change value first DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD0 bits defined in the PCIERC()_CFG136[EINJ6_CHG_PT_H0].
+ Only applies when PCIERC()_CFG144[EINJ6_INV_CNTL] is not set. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_val_h0 : 32; /**< [ 31: 0](R/W) Packet change value first DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD0 bits defined in the PCIERC()_CFG136[EINJ6_CHG_PT_H0].
+ Only applies when PCIERC()_CFG144[EINJ6_INV_CNTL] is not set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg140_s cn; */
+};
+typedef union bdk_pciercx_cfg140 bdk_pciercx_cfg140_t;
+
+static inline uint64_t BDK_PCIERCX_CFG140(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG140(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000234ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG140", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG140(a) bdk_pciercx_cfg140_t
+#define bustype_BDK_PCIERCX_CFG140(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG140(a) "PCIERCX_CFG140"
+#define busnum_BDK_PCIERCX_CFG140(a) (a)
+#define arguments_BDK_PCIERCX_CFG140(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg141
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Value H1) Register
+ * This register contains the one hundred forty-second 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg141
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg141_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_val_h1 : 32; /**< [ 31: 0](R/W) Packet change value second DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD1 bits defined in the PCIERC()_CFG137[EINJ6_CHG_PT_H1].
+ Only applies when PCIERC()_CFG144[EINJ6_INV_CNTL] is not set. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_val_h1 : 32; /**< [ 31: 0](R/W) Packet change value second DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD1 bits defined in the PCIERC()_CFG137[EINJ6_CHG_PT_H1].
+ Only applies when PCIERC()_CFG144[EINJ6_INV_CNTL] is not set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg141_s cn; */
+};
+typedef union bdk_pciercx_cfg141 bdk_pciercx_cfg141_t;
+
+static inline uint64_t BDK_PCIERCX_CFG141(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG141(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000238ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG141", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG141(a) bdk_pciercx_cfg141_t
+#define bustype_BDK_PCIERCX_CFG141(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG141(a) "PCIERCX_CFG141"
+#define busnum_BDK_PCIERCX_CFG141(a) (a)
+#define arguments_BDK_PCIERCX_CFG141(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg142
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Value H2) Register
+ * This register contains the one hundred forty-third 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg142
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg142_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_val_h2 : 32; /**< [ 31: 0](R/W) Packet change value third DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD2 bits defined in the PCIERC()_CFG138[EINJ6_CHG_PT_H2].
+ Only applies when PCIERC()_CFG144[EINJ6_INV_CNTL] is not set." */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_val_h2 : 32; /**< [ 31: 0](R/W) Packet change value third DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD2 bits defined in the PCIERC()_CFG138[EINJ6_CHG_PT_H2].
+ Only applies when PCIERC()_CFG144[EINJ6_INV_CNTL] is not set." */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg142_s cn; */
+};
+typedef union bdk_pciercx_cfg142 bdk_pciercx_cfg142_t;
+
+static inline uint64_t BDK_PCIERCX_CFG142(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG142(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000023cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG142", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG142(a) bdk_pciercx_cfg142_t
+#define bustype_BDK_PCIERCX_CFG142(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG142(a) "PCIERCX_CFG142"
+#define busnum_BDK_PCIERCX_CFG142(a) (a)
+#define arguments_BDK_PCIERCX_CFG142(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg143
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Value H3) Register
+ * This register contains the one hundred forty-forth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg143
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg143_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_val_h3 : 32; /**< [ 31: 0](R/W) Packet change value fourth DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD3 bits defined in the PCIERC()_CFG139[EINJ6_CHG_PT_H3].
+ Only applies when PCIERC()_CFG144[EINJ6_INV_CNTL] is not set. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_val_h3 : 32; /**< [ 31: 0](R/W) Packet change value fourth DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD3 bits defined in the PCIERC()_CFG139[EINJ6_CHG_PT_H3].
+ Only applies when PCIERC()_CFG144[EINJ6_INV_CNTL] is not set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg143_s cn; */
+};
+typedef union bdk_pciercx_cfg143 bdk_pciercx_cfg143_t;
+
+static inline uint64_t BDK_PCIERCX_CFG143(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG143(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000240ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG143", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG143(a) bdk_pciercx_cfg143_t
+#define bustype_BDK_PCIERCX_CFG143(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG143(a) "PCIERCX_CFG143"
+#define busnum_BDK_PCIERCX_CFG143(a) (a)
+#define arguments_BDK_PCIERCX_CFG143(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg144
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Packet Error) Register
+ * This register contains the one hundred forty-fifth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg144
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg144_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t einj6_pkt_typ : 3; /**< [ 11: 9](R/W) Packet type. Selects the TLP packets to inject errors into.
+
+ 0x0 = TLP Header.
+ 0x1 = TLP Prefix 1st 4-DWORDs.
+ 0x2 = TLP Prefix 2nd 4-DWORDs.
+ 0x3 - 0x7 = Reserved. */
+ uint32_t einj6_inv_cntrl : 1; /**< [ 8: 8](R/W) Inverted error injection control.
+
+ 0x0 = EINJ6_CHG_VAL_H[0/1/2/3] is used to replace bits specified by
+ EINJ6_CHG_PT_H[0/1/2/3].
+ 0x1 = EINJ6_CHG_VAL_H[0/1/2/3] is ignored and inverts bits specified by
+ EINJ6_CHG_PT_H[0/1/2/3]. */
+ uint32_t einj6_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG122[EINJ6_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG122[EINJ6_EN] is set,
+ errors are inserted until PCIERC()_CFG122[EINJ6_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC()_CFG122[EINJ6_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC()_CFG122[EINJ6_EN] is set,
+ errors are inserted until PCIERC()_CFG122[EINJ6_EN] is cleared. */
+ uint32_t einj6_inv_cntrl : 1; /**< [ 8: 8](R/W) Inverted error injection control.
+
+ 0x0 = EINJ6_CHG_VAL_H[0/1/2/3] is used to replace bits specified by
+ EINJ6_CHG_PT_H[0/1/2/3].
+ 0x1 = EINJ6_CHG_VAL_H[0/1/2/3] is ignored and inverts bits specified by
+ EINJ6_CHG_PT_H[0/1/2/3]. */
+ uint32_t einj6_pkt_typ : 3; /**< [ 11: 9](R/W) Packet type. Selects the TLP packets to inject errors into.
+
+ 0x0 = TLP Header.
+ 0x1 = TLP Prefix 1st 4-DWORDs.
+ 0x2 = TLP Prefix 2nd 4-DWORDs.
+ 0x3 - 0x7 = Reserved. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg144_s cn; */
+};
+typedef union bdk_pciercx_cfg144 bdk_pciercx_cfg144_t;
+
+static inline uint64_t BDK_PCIERCX_CFG144(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG144(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000244ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG144", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG144(a) bdk_pciercx_cfg144_t
+#define bustype_BDK_PCIERCX_CFG144(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG144(a) "PCIERCX_CFG144"
+#define busnum_BDK_PCIERCX_CFG144(a) (a)
+#define arguments_BDK_PCIERCX_CFG144(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg149
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Control 1 Register
+ * This register contains the one hundred fiftyith 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg149
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg149_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t lp_intv : 2; /**< [ 23: 22](R/W) Low power entry interval time.
+ Interval time that the core starts monitoring RXELECIDLE
+ signal after L0s/L1/L2 entry. You should set the value
+ according to the latency from receiving EIOS to,
+ RXELECIDLE assertion at the PHY
+
+ 0x0 = 40ns.
+ 0x1 = 160ns.
+ 0x2 = 320ns.
+ 0x3 - 640ns. */
+ uint32_t tx_eios_num : 2; /**< [ 21: 20](R/W) Number of TX EIOS.
+ This register sets the number of transmit EIOS for L0s/L1
+ entry and disable/loopback/hot-reset exit. The core selects
+ the greater value between this register and the value defined
+ by the PCI-SIG specification.
+
+ Gen1 or Gen3
+ 0x0 = 1.
+ 0x1 = 4.
+ 0x2 = 8.
+ 0x3 - 16.
+
+ Gen2
+ 0x0 = 2.
+ 0x1 = 8.
+ 0x2 = 16.
+ 0x3 - 32. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t force_detect_lane_en : 1; /**< [ 16: 16](R/W) Force detect lane enable.
+ When this bit is set, the core ignores receiver detection from
+ PHY during LTSSM detect state and uses
+ [FORCE_DETECT_LANE]. */
+ uint32_t force_detect_lane : 16; /**< [ 15: 0](R/W) Force detect lane.
+ When set, the core
+ ignores receiver detection from PHY during LTSSM detect
+ state and uses this value instead.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+
+ 0x7 = Lane7. */
+#else /* Word 0 - Little Endian */
+ uint32_t force_detect_lane : 16; /**< [ 15: 0](R/W) Force detect lane.
+ When set, the core
+ ignores receiver detection from PHY during LTSSM detect
+ state and uses this value instead.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+
+ 0x7 = Lane7. */
+ uint32_t force_detect_lane_en : 1; /**< [ 16: 16](R/W) Force detect lane enable.
+ When this bit is set, the core ignores receiver detection from
+ PHY during LTSSM detect state and uses
+ [FORCE_DETECT_LANE]. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t tx_eios_num : 2; /**< [ 21: 20](R/W) Number of TX EIOS.
+ This register sets the number of transmit EIOS for L0s/L1
+ entry and disable/loopback/hot-reset exit. The core selects
+ the greater value between this register and the value defined
+ by the PCI-SIG specification.
+
+ Gen1 or Gen3
+ 0x0 = 1.
+ 0x1 = 4.
+ 0x2 = 8.
+ 0x3 - 16.
+
+ Gen2
+ 0x0 = 2.
+ 0x1 = 8.
+ 0x2 = 16.
+ 0x3 - 32. */
+ uint32_t lp_intv : 2; /**< [ 23: 22](R/W) Low power entry interval time.
+ Interval time that the core starts monitoring RXELECIDLE
+ signal after L0s/L1/L2 entry. You should set the value
+ according to the latency from receiving EIOS to,
+ RXELECIDLE assertion at the PHY
+
+ 0x0 = 40ns.
+ 0x1 = 160ns.
+ 0x2 = 320ns.
+ 0x3 - 640ns. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg149_s cn; */
+};
+typedef union bdk_pciercx_cfg149 bdk_pciercx_cfg149_t;
+
+static inline uint64_t BDK_PCIERCX_CFG149(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG149(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000258ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG149", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG149(a) bdk_pciercx_cfg149_t
+#define bustype_BDK_PCIERCX_CFG149(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG149(a) "PCIERCX_CFG149"
+#define busnum_BDK_PCIERCX_CFG149(a) (a)
+#define arguments_BDK_PCIERCX_CFG149(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg150
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Control 2 Register
+ * This register contains the one hundred fifty-first 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg150
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg150_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_17_31 : 15;
+ uint32_t fr_err_rcvy_dis : 1; /**< [ 16: 16](R/W) Framing error recovery disable.
+ This bit disables a transition to recovery state when a framing
+ error has occurred. */
+ uint32_t reserved_11_15 : 5;
+ uint32_t dir_lpbslv_to_exit : 1; /**< [ 10: 10](R/W) Direct loopback slave to exit.
+ When set and the LTSSM is in loopback slave active state,
+ the LTSSM transitions to the loopback slave exit state. */
+ uint32_t dir_polcmp_to_det : 1; /**< [ 9: 9](R/W) Direct Polling.Compliance to detect.
+ When this bit is set and the LTSSM is in polling compliance
+ state, the LTSSM transitions to detect state. */
+ uint32_t dir_recidle_config : 1; /**< [ 8: 8](R/W) Direct Recovery.Idle to configuration.
+ When this bit is set and the LTSSM is in recovery idle state,
+ the LTSSM transitions to configuration state. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t noack_force_lnkdn : 1; /**< [ 2: 2](R/W) Force link down.
+ When this bit is set and the core detects REPLY_NUM rolling
+ over 4 times, the LTSSM transitions to detect state. */
+ uint32_t rcry_req : 1; /**< [ 1: 1](WO) Recovery request.
+ When this bit is set in L0 or L0s, the LTSSM starts
+ transitioning to recovery state. This request does not cause
+ a speed change or re-equalization. This bit always reads
+ a zero. */
+ uint32_t hold_ltssm : 1; /**< [ 0: 0](R/W) Hold and release LTSSM.
+ For as long as this is set, the core stays in the current
+ LTSSM. */
+#else /* Word 0 - Little Endian */
+ uint32_t hold_ltssm : 1; /**< [ 0: 0](R/W) Hold and release LTSSM.
+ For as long as this is set, the core stays in the current
+ LTSSM. */
+ uint32_t rcry_req : 1; /**< [ 1: 1](WO) Recovery request.
+ When this bit is set in L0 or L0s, the LTSSM starts
+ transitioning to recovery state. This request does not cause
+ a speed change or re-equalization. This bit always reads
+ a zero. */
+ uint32_t noack_force_lnkdn : 1; /**< [ 2: 2](R/W) Force link down.
+ When this bit is set and the core detects REPLY_NUM rolling
+ over 4 times, the LTSSM transitions to detect state. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t dir_recidle_config : 1; /**< [ 8: 8](R/W) Direct Recovery.Idle to configuration.
+ When this bit is set and the LTSSM is in recovery idle state,
+ the LTSSM transitions to configuration state. */
+ uint32_t dir_polcmp_to_det : 1; /**< [ 9: 9](R/W) Direct Polling.Compliance to detect.
+ When this bit is set and the LTSSM is in polling compliance
+ state, the LTSSM transitions to detect state. */
+ uint32_t dir_lpbslv_to_exit : 1; /**< [ 10: 10](R/W) Direct loopback slave to exit.
+ When set and the LTSSM is in loopback slave active state,
+ the LTSSM transitions to the loopback slave exit state. */
+ uint32_t reserved_11_15 : 5;
+ uint32_t fr_err_rcvy_dis : 1; /**< [ 16: 16](R/W) Framing error recovery disable.
+ This bit disables a transition to recovery state when a framing
+ error has occurred. */
+ uint32_t reserved_17_31 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg150_s cn; */
+};
+typedef union bdk_pciercx_cfg150 bdk_pciercx_cfg150_t;
+
+static inline uint64_t BDK_PCIERCX_CFG150(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG150(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000025cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG150", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG150(a) bdk_pciercx_cfg150_t
+#define bustype_BDK_PCIERCX_CFG150(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG150(a) "PCIERCX_CFG150"
+#define busnum_BDK_PCIERCX_CFG150(a) (a)
+#define arguments_BDK_PCIERCX_CFG150(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg153
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status L1Lane Register
+ * This register contains the one hundred fifty-forth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg153
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg153_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t deskew_ptr : 8; /**< [ 31: 24](RO/H) Deskew pointer.
+ Indicates deskew pointer of internal deskew buffer of
+ selected lane number (LANE_SELECT). */
+ uint32_t reserved_21_23 : 3;
+ uint32_t pipe_txelecidle : 1; /**< [ 20: 20](RO/H) PIPE:TxElecIdle.
+ Indicates PIPE TXELECIDLE signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_rxelecidle : 1; /**< [ 19: 19](RO/H) PIPE:RxElecIdle.
+ Indicates PIPE RXELECIDLE signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_rxvalid : 1; /**< [ 18: 18](RO/H) PIPE:RxValid.
+ Indicates PIPE RXVALID signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_det_lane : 1; /**< [ 17: 17](RO/H) PIPE:Detect Lane.
+ Indicates whether PHY indicates receiver detection or not on
+ selected lane number ([LANE_SELECT]). */
+ uint32_t pipe_rxpol : 1; /**< [ 16: 16](RO/H) PIPE:RxPolarity.
+ Indicates PIPE RXPOLARITY signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lane_select : 4; /**< [ 3: 0](R/W) Lane select.
+ Lane select register for silicon debug status register of
+ Layer1-PerLane.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+
+ 0x7 = Lane7.
+ 0x8-0xF = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t lane_select : 4; /**< [ 3: 0](R/W) Lane select.
+ Lane select register for silicon debug status register of
+ Layer1-PerLane.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+
+ 0x7 = Lane7.
+ 0x8-0xF = Reserved. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pipe_rxpol : 1; /**< [ 16: 16](RO/H) PIPE:RxPolarity.
+ Indicates PIPE RXPOLARITY signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_det_lane : 1; /**< [ 17: 17](RO/H) PIPE:Detect Lane.
+ Indicates whether PHY indicates receiver detection or not on
+ selected lane number ([LANE_SELECT]). */
+ uint32_t pipe_rxvalid : 1; /**< [ 18: 18](RO/H) PIPE:RxValid.
+ Indicates PIPE RXVALID signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_rxelecidle : 1; /**< [ 19: 19](RO/H) PIPE:RxElecIdle.
+ Indicates PIPE RXELECIDLE signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_txelecidle : 1; /**< [ 20: 20](RO/H) PIPE:TxElecIdle.
+ Indicates PIPE TXELECIDLE signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t reserved_21_23 : 3;
+ uint32_t deskew_ptr : 8; /**< [ 31: 24](RO/H) Deskew pointer.
+ Indicates deskew pointer of internal deskew buffer of
+ selected lane number (LANE_SELECT). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg153_s cn; */
+};
+typedef union bdk_pciercx_cfg153 bdk_pciercx_cfg153_t;
+
+static inline uint64_t BDK_PCIERCX_CFG153(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG153(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000268ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG153", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG153(a) bdk_pciercx_cfg153_t
+#define bustype_BDK_PCIERCX_CFG153(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG153(a) "PCIERCX_CFG153"
+#define busnum_BDK_PCIERCX_CFG153(a) (a)
+#define arguments_BDK_PCIERCX_CFG153(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg154
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status L1LTSSM Register
+ * This register contains the one hundred fifty-fifth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg154
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg154_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ltssm_var : 16; /**< [ 31: 16](RO/H) LTSSM variable.
+ Indicates internal LTSSM variables defined in the PCI
+ Express base specification.
+ 0x0 = directed_speed change.
+ 0x1 = changed_speed_recovery.
+ 0x2 = successful_speed_negotiation.
+ 0x3 = upconfigure_capable; Set to 1 if both ports advertised
+ the UpConfigure capability in the last Config.Complete.
+ 0x4 = select_deemphasis.
+ 0x5 = start_equalization_w_preset.
+ 0x6 = equalization_done_8GT_data_rate.
+ 0x7 = equalization_done_16GT_data_rate.
+ 0x8-0xF = idle_to_rlock_transitioned. */
+ uint32_t lane_rev : 1; /**< [ 15: 15](RO/H) Lane reversal operation.
+ Receiver detected lane reversal. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t pipe_pwr_dwn : 3; /**< [ 10: 8](RO/H) PIPE:PowerDown.
+ Indicates PIPE PowerDown signal. */
+ uint32_t framing_err : 1; /**< [ 7: 7](R/W1C) Framing error.
+ Indicates framing error detection status. */
+ uint32_t framing_err_ptr : 7; /**< [ 6: 0](RO) First framing error pointer.
+ Identifies the first framing error using the following
+ encoding. The field contents are only valid value when
+ FRAMING_ERR =1.
+
+ Received unexpected framing token
+ 0x1 = When non-STP/SDP/IDL token was received and it
+ was not in TLP/DLLP reception.
+ 0x02 = When current token was not a valid EDB token and
+ previous token was an EDB. (128/256 bit core only).
+ 0x03 = When SDP token was received but not expected.
+ 0x04 = When STP token was received but not expected.
+ 0x05 = When EDS token was expected but not received or
+ whenever an EDS token was received but not expected.
+ 0x06 = When a framing error was detected in the deskew
+ block while a packet has been in progress in token_finder.
+ Received Unexpected STP Token
+ 0x11 = When framing CRC in STP token did not match.
+ 0x12 = When framing parity in STP token did not match.
+ 0x13 = When framing TLP length in STP token was
+ smaller than 5 DWORDs.
+
+ Received unexpected block
+ 0x21 = When receiving an OS block following SDS in datastream state.n.
+ 0x22 = When data block followed by OS block different
+ from SKP, EI, EIE in Datastream state.
+ 0x23 = When block with an undefined block type in datastream state.
+ 0x24 = When data stream without data over three cycles in datastream state.
+ 0x25 = When OS block during data stream in datastream state.
+ 0x26 = When RxStatus error was detected in datastream state.
+ 0x27 = When not all active lanes receiving SKP OS starting
+ at same cycle time in SKPOS state.
+ 0x28 = When a 2-block timeout occurs for SKP OS in SKPOS state.
+ 0x29 = When receiving consecutive OS blocks within a data stream in SKPOS state.n.
+ 0x2A = When Phy status error was detected in SKPOS state.
+ 0x2B = When not all active lanes receiving EIOS starting at
+ same cycle time in EIOS state.
+ 0x2C = When at least one symbol from the first 4 symbols
+ is not EIOS Symbol in EIOS state (CX_NB=2 only).
+ 0x2D = When not all active lanes receiving EIEOS starting
+ at same cycle time in EIEOS state.
+ 0x2E = When not full 16 eieos symbols are received in EIEOS state.
+
+ All other values not listed above are reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t framing_err_ptr : 7; /**< [ 6: 0](RO) First framing error pointer.
+ Identifies the first framing error using the following
+ encoding. The field contents are only valid value when
+ FRAMING_ERR =1.
+
+ Received unexpected framing token
+ 0x1 = When non-STP/SDP/IDL token was received and it
+ was not in TLP/DLLP reception.
+ 0x02 = When current token was not a valid EDB token and
+ previous token was an EDB. (128/256 bit core only).
+ 0x03 = When SDP token was received but not expected.
+ 0x04 = When STP token was received but not expected.
+ 0x05 = When EDS token was expected but not received or
+ whenever an EDS token was received but not expected.
+ 0x06 = When a framing error was detected in the deskew
+ block while a packet has been in progress in token_finder.
+ Received Unexpected STP Token
+ 0x11 = When framing CRC in STP token did not match.
+ 0x12 = When framing parity in STP token did not match.
+ 0x13 = When framing TLP length in STP token was
+ smaller than 5 DWORDs.
+
+ Received unexpected block
+ 0x21 = When receiving an OS block following SDS in datastream state.n.
+ 0x22 = When data block followed by OS block different
+ from SKP, EI, EIE in Datastream state.
+ 0x23 = When block with an undefined block type in datastream state.
+ 0x24 = When data stream without data over three cycles in datastream state.
+ 0x25 = When OS block during data stream in datastream state.
+ 0x26 = When RxStatus error was detected in datastream state.
+ 0x27 = When not all active lanes receiving SKP OS starting
+ at same cycle time in SKPOS state.
+ 0x28 = When a 2-block timeout occurs for SKP OS in SKPOS state.
+ 0x29 = When receiving consecutive OS blocks within a data stream in SKPOS state.n.
+ 0x2A = When Phy status error was detected in SKPOS state.
+ 0x2B = When not all active lanes receiving EIOS starting at
+ same cycle time in EIOS state.
+ 0x2C = When at least one symbol from the first 4 symbols
+ is not EIOS Symbol in EIOS state (CX_NB=2 only).
+ 0x2D = When not all active lanes receiving EIEOS starting
+ at same cycle time in EIEOS state.
+ 0x2E = When not full 16 eieos symbols are received in EIEOS state.
+
+ All other values not listed above are reserved. */
+ uint32_t framing_err : 1; /**< [ 7: 7](R/W1C) Framing error.
+ Indicates framing error detection status. */
+ uint32_t pipe_pwr_dwn : 3; /**< [ 10: 8](RO/H) PIPE:PowerDown.
+ Indicates PIPE PowerDown signal. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t lane_rev : 1; /**< [ 15: 15](RO/H) Lane reversal operation.
+ Receiver detected lane reversal. */
+ uint32_t ltssm_var : 16; /**< [ 31: 16](RO/H) LTSSM variable.
+ Indicates internal LTSSM variables defined in the PCI
+ Express base specification.
+ 0x0 = directed_speed change.
+ 0x1 = changed_speed_recovery.
+ 0x2 = successful_speed_negotiation.
+ 0x3 = upconfigure_capable; Set to 1 if both ports advertised
+ the UpConfigure capability in the last Config.Complete.
+ 0x4 = select_deemphasis.
+ 0x5 = start_equalization_w_preset.
+ 0x6 = equalization_done_8GT_data_rate.
+ 0x7 = equalization_done_16GT_data_rate.
+ 0x8-0xF = idle_to_rlock_transitioned. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg154_s cn; */
+};
+typedef union bdk_pciercx_cfg154 bdk_pciercx_cfg154_t;
+
+static inline uint64_t BDK_PCIERCX_CFG154(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG154(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000026cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG154", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG154(a) bdk_pciercx_cfg154_t
+#define bustype_BDK_PCIERCX_CFG154(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG154(a) "PCIERCX_CFG154"
+#define busnum_BDK_PCIERCX_CFG154(a) (a)
+#define arguments_BDK_PCIERCX_CFG154(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg155
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status PM Register
+ * This register contains the one hundred fifty-sixth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg155
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg155_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t latched_nfts : 8; /**< [ 23: 16](RO/H) Latched N_FTS.
+ Indicates the value of N_FTS in the received TS ordered
+ sets from the link partner. */
+ uint32_t l1sub_state : 3; /**< [ 15: 13](RO/H) Indicates the internal L1Sub state machine state.
+ Internal:
+ 0x0 = Idle state.
+ 0x1 = Wait for aux_clk_active.
+ 0x2 = Wait for pclkack.
+ 0x3 = Wait for clkreq.
+ 0x4 = Check clkreq_in_n is de-asserted for t_power_off time.
+ 0x5 = L1 substate, turn off txcommonmode circuits (L1.2 only)
+ and rx electrical idle detection circuits.
+ 0x6 = Locally/remotely initiated exit, assert pclkreq, wait for pclkack.
+ 0x7 = Wait for pclkack when aborting an attempt to enter L1_N. */
+ uint32_t pme_rsnd_flag : 1; /**< [ 12: 12](RO) PME re-send flag.
+ When the DUT sends a PM_PME message TLP, the DUT
+ sets PME_Status bit. If host software does not clear
+ PME_Status bit for 100ms (+50%/-5%), the DUT resends the
+ PM_PME message. This bit indicates that a PM_PME was
+ resent. */
+ uint32_t int_pm_sstate : 4; /**< [ 11: 8](RO/H) Internal PM state (slave).
+ Indicates internal state machine of power management
+ slave controller.
+ 0x00 = S_IDLE.
+ 0x01 = S_RESPOND_NAK.
+ 0x02 = S_BLOCK_TLP.
+ 0x03 = S_WAIT_LAST_TLP_ACK.
+ 0x04 = S_WAIT_EIDLE.
+ 0x08 = S_LINK_ENTR_L1.
+ 0x09 = S_L1.
+ 0x0A = S_L1_EXIT.
+ 0x0B = S_L23RDY.
+ 0x0C = S_LINK_ENTR_L23.
+ 0x0D = S_L23RDY_WAIT4ALIVE.
+ 0x0F = S_L23RDY_WAIT4IDLE.
+ 0x10 = S_WAIT_LAST_PMDLLP.
+ 0x10-0x1F = Reserved. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t int_pm_mstate : 5; /**< [ 4: 0](RO/H) Internal PM state (master).
+ Indicates internal state machine of power management
+ master controller.
+ 0x00 = IDLE.
+ 0x01 = L0.
+ 0x02 = L0S.
+ 0x03 = ENTER_L0S.
+ 0x04 = L0S_EXIT.
+ 0x08 = L1.
+ 0x09 = L1_BLOCK_TLP.
+ 0x0A = L1_WAIT_LAST_TLP_ACK.
+ 0x0B = L1_WAIT_PMDLLP_ACK.
+ 0x0C = L1_LINK_ENTR_L1.
+ 0x0D = L1_EXIT.
+ 0x0F = PREP_4L1.
+ 0x10 = L23_BLOCK_TLP.
+ 0x11 = L23_WAIT_LAST_TLP_ACK.
+ 0x12 = L23_WAIT_PMDLLP_ACK.
+ 0x13 = L23_ENTR_L23.
+ 0x14 = L23RDY.
+ 0x15 = PREP_4L23.
+ 0x16 = L23RDY_WAIT4ALIVE.
+ 0x17 = L0S_BLOCK_TLP.
+ 0x18 = WAIT_LAST_PMDLLP.
+ 0x19 = WAIT_DSTATE_UPDATE.
+ 0x20-0x1F = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t int_pm_mstate : 5; /**< [ 4: 0](RO/H) Internal PM state (master).
+ Indicates internal state machine of power management
+ master controller.
+ 0x00 = IDLE.
+ 0x01 = L0.
+ 0x02 = L0S.
+ 0x03 = ENTER_L0S.
+ 0x04 = L0S_EXIT.
+ 0x08 = L1.
+ 0x09 = L1_BLOCK_TLP.
+ 0x0A = L1_WAIT_LAST_TLP_ACK.
+ 0x0B = L1_WAIT_PMDLLP_ACK.
+ 0x0C = L1_LINK_ENTR_L1.
+ 0x0D = L1_EXIT.
+ 0x0F = PREP_4L1.
+ 0x10 = L23_BLOCK_TLP.
+ 0x11 = L23_WAIT_LAST_TLP_ACK.
+ 0x12 = L23_WAIT_PMDLLP_ACK.
+ 0x13 = L23_ENTR_L23.
+ 0x14 = L23RDY.
+ 0x15 = PREP_4L23.
+ 0x16 = L23RDY_WAIT4ALIVE.
+ 0x17 = L0S_BLOCK_TLP.
+ 0x18 = WAIT_LAST_PMDLLP.
+ 0x19 = WAIT_DSTATE_UPDATE.
+ 0x20-0x1F = Reserved. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t int_pm_sstate : 4; /**< [ 11: 8](RO/H) Internal PM state (slave).
+ Indicates internal state machine of power management
+ slave controller.
+ 0x00 = S_IDLE.
+ 0x01 = S_RESPOND_NAK.
+ 0x02 = S_BLOCK_TLP.
+ 0x03 = S_WAIT_LAST_TLP_ACK.
+ 0x04 = S_WAIT_EIDLE.
+ 0x08 = S_LINK_ENTR_L1.
+ 0x09 = S_L1.
+ 0x0A = S_L1_EXIT.
+ 0x0B = S_L23RDY.
+ 0x0C = S_LINK_ENTR_L23.
+ 0x0D = S_L23RDY_WAIT4ALIVE.
+ 0x0F = S_L23RDY_WAIT4IDLE.
+ 0x10 = S_WAIT_LAST_PMDLLP.
+ 0x10-0x1F = Reserved. */
+ uint32_t pme_rsnd_flag : 1; /**< [ 12: 12](RO) PME re-send flag.
+ When the DUT sends a PM_PME message TLP, the DUT
+ sets PME_Status bit. If host software does not clear
+ PME_Status bit for 100ms (+50%/-5%), the DUT resends the
+ PM_PME message. This bit indicates that a PM_PME was
+ resent. */
+ uint32_t l1sub_state : 3; /**< [ 15: 13](RO/H) Indicates the internal L1Sub state machine state.
+ Internal:
+ 0x0 = Idle state.
+ 0x1 = Wait for aux_clk_active.
+ 0x2 = Wait for pclkack.
+ 0x3 = Wait for clkreq.
+ 0x4 = Check clkreq_in_n is de-asserted for t_power_off time.
+ 0x5 = L1 substate, turn off txcommonmode circuits (L1.2 only)
+ and rx electrical idle detection circuits.
+ 0x6 = Locally/remotely initiated exit, assert pclkreq, wait for pclkack.
+ 0x7 = Wait for pclkack when aborting an attempt to enter L1_N. */
+ uint32_t latched_nfts : 8; /**< [ 23: 16](RO/H) Latched N_FTS.
+ Indicates the value of N_FTS in the received TS ordered
+ sets from the link partner. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg155_s cn; */
+};
+typedef union bdk_pciercx_cfg155 bdk_pciercx_cfg155_t;
+
+static inline uint64_t BDK_PCIERCX_CFG155(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG155(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000270ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG155", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG155(a) bdk_pciercx_cfg155_t
+#define bustype_BDK_PCIERCX_CFG155(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG155(a) "PCIERCX_CFG155"
+#define busnum_BDK_PCIERCX_CFG155(a) (a)
+#define arguments_BDK_PCIERCX_CFG155(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg156
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status L2 Register
+ * This register contains the one hundred fifty-seventh 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg156
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg156_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t fc_init2 : 1; /**< [ 27: 27](RO) FC_INIT2. Indicates the core is in FC_INIT2(VC0) state. */
+ uint32_t fc_init1 : 1; /**< [ 26: 26](RO) FC_INIT1. Indicates the core is in FC_INIT1(VC0) state. */
+ uint32_t dlcmsm : 2; /**< [ 25: 24](RO/H) DLCMSM.
+ Indicates the current DLCMSM.
+ 0x0 = DL_INACTIVE.
+ 0x1 = DL_FC_INIT.
+ 0x2 = Reserved.
+ 0x3 = DL_ACTIVE. */
+ uint32_t rx_ack_seq_no : 12; /**< [ 23: 12](RO/H) RX ACK sequence number.
+ Indicates ACKD_SEQ which is updated by receiving
+ ACK/NAK DLLP. */
+ uint32_t tx_ack_seq_no : 12; /**< [ 11: 0](RO/H) TX ACK sequence number.
+ Indicates next transmit sequence number for transmit TLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t tx_ack_seq_no : 12; /**< [ 11: 0](RO/H) TX ACK sequence number.
+ Indicates next transmit sequence number for transmit TLP. */
+ uint32_t rx_ack_seq_no : 12; /**< [ 23: 12](RO/H) RX ACK sequence number.
+ Indicates ACKD_SEQ which is updated by receiving
+ ACK/NAK DLLP. */
+ uint32_t dlcmsm : 2; /**< [ 25: 24](RO/H) DLCMSM.
+ Indicates the current DLCMSM.
+ 0x0 = DL_INACTIVE.
+ 0x1 = DL_FC_INIT.
+ 0x2 = Reserved.
+ 0x3 = DL_ACTIVE. */
+ uint32_t fc_init1 : 1; /**< [ 26: 26](RO) FC_INIT1. Indicates the core is in FC_INIT1(VC0) state. */
+ uint32_t fc_init2 : 1; /**< [ 27: 27](RO) FC_INIT2. Indicates the core is in FC_INIT2(VC0) state. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg156_s cn; */
+};
+typedef union bdk_pciercx_cfg156 bdk_pciercx_cfg156_t;
+
+static inline uint64_t BDK_PCIERCX_CFG156(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG156(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000274ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG156", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG156(a) bdk_pciercx_cfg156_t
+#define bustype_BDK_PCIERCX_CFG156(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG156(a) "PCIERCX_CFG156"
+#define busnum_BDK_PCIERCX_CFG156(a) (a)
+#define arguments_BDK_PCIERCX_CFG156(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg157
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status L2 Register
+ * This register contains the one hundred fifty-ninth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg157
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg157_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t credit_data1 : 12; /**< [ 31: 20](RO/H) Credit data 1.
+ Current FC credit data selected by the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields.
+ RX = Credit allocated value.
+ TX = Credit limit value. This value is valid when DLCMSM=0x3(DL_ACTIVE). */
+ uint32_t credit_data0 : 12; /**< [ 19: 8](RO/H) Credit data 0.
+ Current FC credit data selected by the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields.
+ RX = Credit received value.
+ TX = Credit consumed value. */
+ uint32_t reserved_7 : 1;
+ uint32_t credit_sel_hd : 1; /**< [ 6: 6](R/W) Credit select (HeaderData).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], and
+ [CREDIT_SEL_TLP_TYPE] viewport-select fields determines
+ that data that is returned by the [CREDIT_DATA0] and
+ [CREDIT_DATA1] data fields.
+ 0x0 = Header credit.
+ 0x1 = Data credit. */
+ uint32_t credit_sel_tlp_type : 2; /**< [ 5: 4](R/W) Credit select (TLP Type).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], and [CREDIT_SEL_HD]
+ viewport-select fields determines that data that is returned
+ by the [CREDIT_DATA0] and [CREDIT_DATA1] data fields.
+ 0x0 = Posted.
+ 0x1 = Non-posted.
+ 0x2 = Completion.
+ 0x3 = Reserved. */
+ uint32_t credit_sel_credit_type : 1; /**< [ 3: 3](R/W) Credit select (credit type).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_TLP_TYPE], and [CREDIT_SEL_HD] viewport-select
+ fields determines that data that is returned by the
+ [CREDIT_DATA0] and [CREDIT_DATA1] data fields.
+ 0x0 = RX.
+ 0x1 = TX. */
+ uint32_t credit_sel_vc : 3; /**< [ 2: 0](R/W) Credit select (VC).
+ This field in conjunction with the
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields determines that
+ data that is returned by the [CREDIT_DATA0] and
+ [CREDIT_DATA1] data fields.
+ 0x0 = VC0.
+ 0x1 = VC1.
+ 0x2 = VC2.
+ _ ...
+ 0x7 = VC7. */
+#else /* Word 0 - Little Endian */
+ uint32_t credit_sel_vc : 3; /**< [ 2: 0](R/W) Credit select (VC).
+ This field in conjunction with the
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields determines that
+ data that is returned by the [CREDIT_DATA0] and
+ [CREDIT_DATA1] data fields.
+ 0x0 = VC0.
+ 0x1 = VC1.
+ 0x2 = VC2.
+ _ ...
+ 0x7 = VC7. */
+ uint32_t credit_sel_credit_type : 1; /**< [ 3: 3](R/W) Credit select (credit type).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_TLP_TYPE], and [CREDIT_SEL_HD] viewport-select
+ fields determines that data that is returned by the
+ [CREDIT_DATA0] and [CREDIT_DATA1] data fields.
+ 0x0 = RX.
+ 0x1 = TX. */
+ uint32_t credit_sel_tlp_type : 2; /**< [ 5: 4](R/W) Credit select (TLP Type).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], and [CREDIT_SEL_HD]
+ viewport-select fields determines that data that is returned
+ by the [CREDIT_DATA0] and [CREDIT_DATA1] data fields.
+ 0x0 = Posted.
+ 0x1 = Non-posted.
+ 0x2 = Completion.
+ 0x3 = Reserved. */
+ uint32_t credit_sel_hd : 1; /**< [ 6: 6](R/W) Credit select (HeaderData).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], and
+ [CREDIT_SEL_TLP_TYPE] viewport-select fields determines
+ that data that is returned by the [CREDIT_DATA0] and
+ [CREDIT_DATA1] data fields.
+ 0x0 = Header credit.
+ 0x1 = Data credit. */
+ uint32_t reserved_7 : 1;
+ uint32_t credit_data0 : 12; /**< [ 19: 8](RO/H) Credit data 0.
+ Current FC credit data selected by the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields.
+ RX = Credit received value.
+ TX = Credit consumed value. */
+ uint32_t credit_data1 : 12; /**< [ 31: 20](RO/H) Credit data 1.
+ Current FC credit data selected by the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields.
+ RX = Credit allocated value.
+ TX = Credit limit value. This value is valid when DLCMSM=0x3(DL_ACTIVE). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg157_s cn; */
+};
+typedef union bdk_pciercx_cfg157 bdk_pciercx_cfg157_t;
+
+static inline uint64_t BDK_PCIERCX_CFG157(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG157(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000278ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG157", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG157(a) bdk_pciercx_cfg157_t
+#define bustype_BDK_PCIERCX_CFG157(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG157(a) "PCIERCX_CFG157"
+#define busnum_BDK_PCIERCX_CFG157(a) (a)
+#define arguments_BDK_PCIERCX_CFG157(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg158
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status L3 Register
+ * This register contains the one hundred fifty-ninth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg158
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg158_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t mftlp_status : 1; /**< [ 7: 7](R/W1C) Malformed TLP status.
+ Indicates malformed TLP has occurred. */
+ uint32_t mftlp_ptr : 7; /**< [ 6: 0](RO) First malformed TLP error pointer.
+ Indicates the element of the received first malformed TLP.
+ This pointer is validated by [MFTLP_STATUS].
+ 0x01 = AtomicOp address alignment.
+ 0x02 = AtomicOp operand.
+ 0x03 = AtomicOp byte enable.
+ 0x04 = TLP length miss match.
+ 0x05 = Max payload size.
+ 0x06 = Message TLP without TC0.
+ 0x07 = Invalid TC.
+ 0x08 = Unexpected route bit in message TLP.
+ 0x09 = Unexpected CRS status in completion TLP.
+ 0x0A = Byte enable.
+ 0x0B = Memory address 4KB boundary.
+ 0x0C = TLP prefix rules.
+ 0x0D = Translation request rules.
+ 0x0E = Invalid TLP type.
+ 0x0F = Completion rules.
+ 0x10-0x7E = Reserved.
+ 0x7F = Application. */
+#else /* Word 0 - Little Endian */
+ uint32_t mftlp_ptr : 7; /**< [ 6: 0](RO) First malformed TLP error pointer.
+ Indicates the element of the received first malformed TLP.
+ This pointer is validated by [MFTLP_STATUS].
+ 0x01 = AtomicOp address alignment.
+ 0x02 = AtomicOp operand.
+ 0x03 = AtomicOp byte enable.
+ 0x04 = TLP length miss match.
+ 0x05 = Max payload size.
+ 0x06 = Message TLP without TC0.
+ 0x07 = Invalid TC.
+ 0x08 = Unexpected route bit in message TLP.
+ 0x09 = Unexpected CRS status in completion TLP.
+ 0x0A = Byte enable.
+ 0x0B = Memory address 4KB boundary.
+ 0x0C = TLP prefix rules.
+ 0x0D = Translation request rules.
+ 0x0E = Invalid TLP type.
+ 0x0F = Completion rules.
+ 0x10-0x7E = Reserved.
+ 0x7F = Application. */
+ uint32_t mftlp_status : 1; /**< [ 7: 7](R/W1C) Malformed TLP status.
+ Indicates malformed TLP has occurred. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg158_s cn; */
+};
+typedef union bdk_pciercx_cfg158 bdk_pciercx_cfg158_t;
+
+static inline uint64_t BDK_PCIERCX_CFG158(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG158(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000027cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG158", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG158(a) bdk_pciercx_cfg158_t
+#define bustype_BDK_PCIERCX_CFG158(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG158(a) "PCIERCX_CFG158"
+#define busnum_BDK_PCIERCX_CFG158(a) (a)
+#define arguments_BDK_PCIERCX_CFG158(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg161
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Control 1 Register
+ * This register contains the one hundred sixty-second 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg161
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg161_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t fom_target : 8; /**< [ 31: 24](R/W) FOM target.
+ Indicates figure of merit target criteria value of EQ
+ master (DSP in EQ Phase3/USP in EQ Phase2).
+ This field is only valid when [GEN3_EQ_FB_MODE] is
+ 0x1 (Figure Of Merit). */
+ uint32_t fom_target_en : 1; /**< [ 23: 23](R/W) FOM target enable.
+ Enables the FOM_TARGET fields. */
+ uint32_t reserved_18_22 : 5;
+ uint32_t eval_interval_time : 2; /**< [ 17: 16](R/W) Eval interval time.
+ Indicates interval time of RxEqEval assertion.
+ 0x0 = 500ns.
+ 0x1 = 1us.
+ 0x2 = 2us.
+ 0x3 = 4us.
+
+ This field is used for EQ master (DSP in EQ Phase3/USP in
+ EQ Phase2). */
+ uint32_t reserved_10_15 : 6;
+ uint32_t ext_eq_timeout : 2; /**< [ 9: 8](R/W) Extends EQ Phase2/3 Timeout.
+ This field is used when the ltssm is in Recovery.EQ2/3.
+ When this field is set, the value of the EQ2/3 timeout is
+ extended.
+
+ EQ Master (DSP in EQ Phase 3/USP in EQ Phaase2)
+ 0x0 = 24ms (default).
+ 0x1 = 48ms
+ 0x2 = 240ms.
+ 0x3 = No timeout.
+
+ EQ Slave (DSP in EQ Phase 2/USP in EQ Phaase3)
+ 0x0 = 32ms (default).
+ 0x1 = 56ms
+ 0x2 = 248ms.
+ 0x3 = No timeout. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t eq_rate_sel : 1; /**< [ 4: 4](R/W) EQ status rate select.
+ Setting this field in conjunction with [EQ_LANE_SEL]
+ determines the per-lane silicon debug EQ status data
+ returned by the SD_EQ_CONTROL[2/3] and
+ SD_EQ_STATUS[1/2/3] viewport registers.
+ 0x0 = 8.0GT/s Speed
+ 0x1 = 16.0GT/s Speed (Not supported). */
+ uint32_t eq_lane_sel : 4; /**< [ 3: 0](R/W) EQ status lane select.
+ Setting this field in conjunction with [EQ_RATE_SEL]
+ determines the per-lane silicon debug EQ status data
+ returned by the SD_EQ_CONTROL[2/3] and
+ SD_EQ_STATUS[1/2/3] viewport registers.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+ _ ...
+ 0x7 = Lane7.
+ 0x8-0xF = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t eq_lane_sel : 4; /**< [ 3: 0](R/W) EQ status lane select.
+ Setting this field in conjunction with [EQ_RATE_SEL]
+ determines the per-lane silicon debug EQ status data
+ returned by the SD_EQ_CONTROL[2/3] and
+ SD_EQ_STATUS[1/2/3] viewport registers.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+ _ ...
+ 0x7 = Lane7.
+ 0x8-0xF = Reserved. */
+ uint32_t eq_rate_sel : 1; /**< [ 4: 4](R/W) EQ status rate select.
+ Setting this field in conjunction with [EQ_LANE_SEL]
+ determines the per-lane silicon debug EQ status data
+ returned by the SD_EQ_CONTROL[2/3] and
+ SD_EQ_STATUS[1/2/3] viewport registers.
+ 0x0 = 8.0GT/s Speed
+ 0x1 = 16.0GT/s Speed (Not supported). */
+ uint32_t reserved_5_7 : 3;
+ uint32_t ext_eq_timeout : 2; /**< [ 9: 8](R/W) Extends EQ Phase2/3 Timeout.
+ This field is used when the ltssm is in Recovery.EQ2/3.
+ When this field is set, the value of the EQ2/3 timeout is
+ extended.
+
+ EQ Master (DSP in EQ Phase 3/USP in EQ Phaase2)
+ 0x0 = 24ms (default).
+ 0x1 = 48ms
+ 0x2 = 240ms.
+ 0x3 = No timeout.
+
+ EQ Slave (DSP in EQ Phase 2/USP in EQ Phaase3)
+ 0x0 = 32ms (default).
+ 0x1 = 56ms
+ 0x2 = 248ms.
+ 0x3 = No timeout. */
+ uint32_t reserved_10_15 : 6;
+ uint32_t eval_interval_time : 2; /**< [ 17: 16](R/W) Eval interval time.
+ Indicates interval time of RxEqEval assertion.
+ 0x0 = 500ns.
+ 0x1 = 1us.
+ 0x2 = 2us.
+ 0x3 = 4us.
+
+ This field is used for EQ master (DSP in EQ Phase3/USP in
+ EQ Phase2). */
+ uint32_t reserved_18_22 : 5;
+ uint32_t fom_target_en : 1; /**< [ 23: 23](R/W) FOM target enable.
+ Enables the FOM_TARGET fields. */
+ uint32_t fom_target : 8; /**< [ 31: 24](R/W) FOM target.
+ Indicates figure of merit target criteria value of EQ
+ master (DSP in EQ Phase3/USP in EQ Phase2).
+ This field is only valid when [GEN3_EQ_FB_MODE] is
+ 0x1 (Figure Of Merit). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg161_s cn; */
+};
+typedef union bdk_pciercx_cfg161 bdk_pciercx_cfg161_t;
+
+static inline uint64_t BDK_PCIERCX_CFG161(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG161(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000288ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG161", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG161(a) bdk_pciercx_cfg161_t
+#define bustype_BDK_PCIERCX_CFG161(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG161(a) "PCIERCX_CFG161"
+#define busnum_BDK_PCIERCX_CFG161(a) (a)
+#define arguments_BDK_PCIERCX_CFG161(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg162
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Control 2 Register
+ * This register contains the one hundred sixty-third 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg162
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg162_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t force_loc_txpre_en : 1; /**< [ 30: 30](R/W) Force local transmitter preset enable.
+ Enables the FORCE_LOCAL_TX_PRESET field. */
+ uint32_t force_loc_rxhint_en : 1; /**< [ 29: 29](R/W) Force local receiver preset hint enable.
+ Enables the FORCE_LOCAL_RX_HINT field. */
+ uint32_t force_loc_txcoef_en : 1; /**< [ 28: 28](R/W) Force local transmitter coefficient enable.
+ Enables the following fields:
+ FORCE_LOCAL_TX_PRE_CURSOR.
+ FORCE_LOCAL_TX_CURSOR.
+ FORCE_LOCAL_TX_POST_CURSOR. */
+ uint32_t force_loc_txpre : 4; /**< [ 27: 24](R/W) Force local transmitter preset.
+ Indicates initial preset value of USP in EQ slave (EQ Phase2)
+ instead of receiving EQ TS2. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t force_loc_rxhint : 3; /**< [ 20: 18](R/W) Force local receiver preset hint.
+ Indicates the RxPresetHint value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of received or set value. */
+ uint32_t force_loc_txpost_cur : 6; /**< [ 17: 12](R/W) Force local transmitter postcursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+ uint32_t force_loc_tx_cur : 6; /**< [ 11: 6](R/W) Force local transmitter cursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+ uint32_t force_loc_txpre_cur : 6; /**< [ 5: 0](R/W) Force local transmitter precursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+#else /* Word 0 - Little Endian */
+ uint32_t force_loc_txpre_cur : 6; /**< [ 5: 0](R/W) Force local transmitter precursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+ uint32_t force_loc_tx_cur : 6; /**< [ 11: 6](R/W) Force local transmitter cursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+ uint32_t force_loc_txpost_cur : 6; /**< [ 17: 12](R/W) Force local transmitter postcursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+ uint32_t force_loc_rxhint : 3; /**< [ 20: 18](R/W) Force local receiver preset hint.
+ Indicates the RxPresetHint value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of received or set value. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t force_loc_txpre : 4; /**< [ 27: 24](R/W) Force local transmitter preset.
+ Indicates initial preset value of USP in EQ slave (EQ Phase2)
+ instead of receiving EQ TS2. */
+ uint32_t force_loc_txcoef_en : 1; /**< [ 28: 28](R/W) Force local transmitter coefficient enable.
+ Enables the following fields:
+ FORCE_LOCAL_TX_PRE_CURSOR.
+ FORCE_LOCAL_TX_CURSOR.
+ FORCE_LOCAL_TX_POST_CURSOR. */
+ uint32_t force_loc_rxhint_en : 1; /**< [ 29: 29](R/W) Force local receiver preset hint enable.
+ Enables the FORCE_LOCAL_RX_HINT field. */
+ uint32_t force_loc_txpre_en : 1; /**< [ 30: 30](R/W) Force local transmitter preset enable.
+ Enables the FORCE_LOCAL_TX_PRESET field. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg162_s cn; */
+};
+typedef union bdk_pciercx_cfg162 bdk_pciercx_cfg162_t;
+
+static inline uint64_t BDK_PCIERCX_CFG162(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG162(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000028cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG162", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG162(a) bdk_pciercx_cfg162_t
+#define bustype_BDK_PCIERCX_CFG162(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG162(a) "PCIERCX_CFG162"
+#define busnum_BDK_PCIERCX_CFG162(a) (a)
+#define arguments_BDK_PCIERCX_CFG162(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg163
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Control 3 Register
+ * This register contains the one hundred sixty-forth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg163
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg163_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t force_rem_txcoef_en : 1; /**< [ 28: 28](R/W) Force remote transmitter coefficient enable as selected by
+ PCIERC()_CFG161[EQ_LANE_SEL][RATE_SEL].
+ Enables the following fields:
+ FORCE_REMOTE_TX_PRE_CURSOR
+ FORCE_REMOTE_TX_CURSOR
+ FORCE_REMOTE_TX_POST_CURSOR */
+ uint32_t reserved_18_27 : 10;
+ uint32_t force_rem_txpost_cur : 6; /**< [ 17: 12](R/W) Force remote transmitter postcursor as selected by
+ PCIERC()_CFG161[EQ_LANE_SEL][RATE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+ uint32_t force_rem_tx_cur : 6; /**< [ 11: 6](R/W) Force remote transmitter cursors selected by
+ PCIERC()_CFG161[EQ_LANE_SEL][RATE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+ uint32_t force_rem_txpre_cur : 6; /**< [ 5: 0](RAZ) Force remote transmitter precursors selected by
+ PCIERC()_CFG161[EQ_LANE_SEL][RATE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+#else /* Word 0 - Little Endian */
+ uint32_t force_rem_txpre_cur : 6; /**< [ 5: 0](RAZ) Force remote transmitter precursors selected by
+ PCIERC()_CFG161[EQ_LANE_SEL][RATE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+ uint32_t force_rem_tx_cur : 6; /**< [ 11: 6](R/W) Force remote transmitter cursors selected by
+ PCIERC()_CFG161[EQ_LANE_SEL][RATE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+ uint32_t force_rem_txpost_cur : 6; /**< [ 17: 12](R/W) Force remote transmitter postcursor as selected by
+ PCIERC()_CFG161[EQ_LANE_SEL][RATE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+ uint32_t reserved_18_27 : 10;
+ uint32_t force_rem_txcoef_en : 1; /**< [ 28: 28](R/W) Force remote transmitter coefficient enable as selected by
+ PCIERC()_CFG161[EQ_LANE_SEL][RATE_SEL].
+ Enables the following fields:
+ FORCE_REMOTE_TX_PRE_CURSOR
+ FORCE_REMOTE_TX_CURSOR
+ FORCE_REMOTE_TX_POST_CURSOR */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg163_s cn; */
+};
+typedef union bdk_pciercx_cfg163 bdk_pciercx_cfg163_t;
+
+static inline uint64_t BDK_PCIERCX_CFG163(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG163(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000290ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG163", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG163(a) bdk_pciercx_cfg163_t
+#define bustype_BDK_PCIERCX_CFG163(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG163(a) "PCIERCX_CFG163"
+#define busnum_BDK_PCIERCX_CFG163(a) (a)
+#define arguments_BDK_PCIERCX_CFG163(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg165
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Status 1 Register
+ * This register contains the one hundred sixty-sixth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg165
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg165_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t eq_reject_event : 1; /**< [ 7: 7](RO/H) EQ reject event.
+ Indicates that the core receives two consecutive TS1 OS
+ w/Reject=1b during EQ master phase (DSP in EQ
+ Phase3/USP in EQ Phase2). This bit is automatically cleared
+ when the core starts EQ master phase again. */
+ uint32_t eq_rulec_viol : 1; /**< [ 6: 6](RO/H) EQ rule C violation.
+ Indicates that coefficient rule C violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rule C
+ correspond to the rules c) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ Master phase again. */
+ uint32_t eq_ruleb_viol : 1; /**< [ 5: 5](RO/H) EQ rule B violation.
+ Indicates that coefficients rule B violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rules B
+ correspond to the rules b) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ Master phase again. */
+ uint32_t eq_rulea_viol : 1; /**< [ 4: 4](RO/H) EQ rule A violation.
+ Indicates that coefficients rule A violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rules A
+ correspond to the rules a) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ Master phase again. */
+ uint32_t reserved_3 : 1;
+ uint32_t eq_conv_info : 2; /**< [ 2: 1](RO/H) EQ convergence info.
+ Indicates equalization convergence information.
+ 0x0 = Equalization is not attempted.
+ 0x1 = Equalization finished successfully.
+ 0x2 = Equalization finished unsuccessfully.
+ 0x3 = Reserved.
+ This bit is automatically cleared when the core starts EQ
+ master phase again. */
+ uint32_t eq_sequence : 1; /**< [ 0: 0](RO) EQ sequence.
+ Indicates that the core is starting the equalization sequence. */
+#else /* Word 0 - Little Endian */
+ uint32_t eq_sequence : 1; /**< [ 0: 0](RO) EQ sequence.
+ Indicates that the core is starting the equalization sequence. */
+ uint32_t eq_conv_info : 2; /**< [ 2: 1](RO/H) EQ convergence info.
+ Indicates equalization convergence information.
+ 0x0 = Equalization is not attempted.
+ 0x1 = Equalization finished successfully.
+ 0x2 = Equalization finished unsuccessfully.
+ 0x3 = Reserved.
+ This bit is automatically cleared when the core starts EQ
+ master phase again. */
+ uint32_t reserved_3 : 1;
+ uint32_t eq_rulea_viol : 1; /**< [ 4: 4](RO/H) EQ rule A violation.
+ Indicates that coefficients rule A violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rules A
+ correspond to the rules a) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ Master phase again. */
+ uint32_t eq_ruleb_viol : 1; /**< [ 5: 5](RO/H) EQ rule B violation.
+ Indicates that coefficients rule B violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rules B
+ correspond to the rules b) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ Master phase again. */
+ uint32_t eq_rulec_viol : 1; /**< [ 6: 6](RO/H) EQ rule C violation.
+ Indicates that coefficient rule C violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rule C
+ correspond to the rules c) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ Master phase again. */
+ uint32_t eq_reject_event : 1; /**< [ 7: 7](RO/H) EQ reject event.
+ Indicates that the core receives two consecutive TS1 OS
+ w/Reject=1b during EQ master phase (DSP in EQ
+ Phase3/USP in EQ Phase2). This bit is automatically cleared
+ when the core starts EQ master phase again. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg165_s cn; */
+};
+typedef union bdk_pciercx_cfg165 bdk_pciercx_cfg165_t;
+
+static inline uint64_t BDK_PCIERCX_CFG165(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG165(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000298ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG165", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG165(a) bdk_pciercx_cfg165_t
+#define bustype_BDK_PCIERCX_CFG165(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG165(a) "PCIERCX_CFG165"
+#define busnum_BDK_PCIERCX_CFG165(a) (a)
+#define arguments_BDK_PCIERCX_CFG165(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg166
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Status 2 Register
+ * This register contains the one hundred sixty-seventh 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg166
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg166_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t eq_loc_fom_val : 8; /**< [ 31: 24](RO/H) EQ local figure of merit.
+ Indicates local maximum figure of merit value. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t eq_loc_rxhint : 3; /**< [ 20: 18](RO/H) EQ local receiver preset hint.
+ Indicates local receiver preset hint value. */
+ uint32_t eq_loc_post_cur : 6; /**< [ 17: 12](RO/H) EQ local postcursor.
+ Indicates local post cursor coefficient value. */
+ uint32_t eq_loc_cur : 6; /**< [ 11: 6](RO/H) EQ local cursor.
+ Indicates local cursor coefficient value. */
+ uint32_t eq_loc_pre_cur : 6; /**< [ 5: 0](RO/H) EQ local precursor.
+ Indicates local precursor coefficient value. */
+#else /* Word 0 - Little Endian */
+ uint32_t eq_loc_pre_cur : 6; /**< [ 5: 0](RO/H) EQ local precursor.
+ Indicates local precursor coefficient value. */
+ uint32_t eq_loc_cur : 6; /**< [ 11: 6](RO/H) EQ local cursor.
+ Indicates local cursor coefficient value. */
+ uint32_t eq_loc_post_cur : 6; /**< [ 17: 12](RO/H) EQ local postcursor.
+ Indicates local post cursor coefficient value. */
+ uint32_t eq_loc_rxhint : 3; /**< [ 20: 18](RO/H) EQ local receiver preset hint.
+ Indicates local receiver preset hint value. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t eq_loc_fom_val : 8; /**< [ 31: 24](RO/H) EQ local figure of merit.
+ Indicates local maximum figure of merit value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg166_s cn; */
+};
+typedef union bdk_pciercx_cfg166 bdk_pciercx_cfg166_t;
+
+static inline uint64_t BDK_PCIERCX_CFG166(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG166(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000029cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG166", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG166(a) bdk_pciercx_cfg166_t
+#define bustype_BDK_PCIERCX_CFG166(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG166(a) "PCIERCX_CFG166"
+#define busnum_BDK_PCIERCX_CFG166(a) (a)
+#define arguments_BDK_PCIERCX_CFG166(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg167
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Status 3 Register
+ * This register contains the one hundred sixty-eighth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg167
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg167_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t eq_rem_fs : 6; /**< [ 29: 24](RO/H) EQ remote FS.
+ Indicates remote FS value. */
+ uint32_t eq_rem_lf : 6; /**< [ 23: 18](RO/H) EQ remote LF.
+ Indicates remote LF value. */
+ uint32_t eq_rem_post_cur : 6; /**< [ 17: 12](RO/H) EQ remote postcursor.
+ Indicates remote postcursor coefficient value. */
+ uint32_t eq_rem_cur : 6; /**< [ 11: 6](RO/H) EQ remote cursor.
+ Indicates remote cursor coefficient value. */
+ uint32_t eq_rem_pre_cur : 6; /**< [ 5: 0](RO/H) EQ remote precursor.
+ Indicates remote postcursor coefficient value. */
+#else /* Word 0 - Little Endian */
+ uint32_t eq_rem_pre_cur : 6; /**< [ 5: 0](RO/H) EQ remote precursor.
+ Indicates remote postcursor coefficient value. */
+ uint32_t eq_rem_cur : 6; /**< [ 11: 6](RO/H) EQ remote cursor.
+ Indicates remote cursor coefficient value. */
+ uint32_t eq_rem_post_cur : 6; /**< [ 17: 12](RO/H) EQ remote postcursor.
+ Indicates remote postcursor coefficient value. */
+ uint32_t eq_rem_lf : 6; /**< [ 23: 18](RO/H) EQ remote LF.
+ Indicates remote LF value. */
+ uint32_t eq_rem_fs : 6; /**< [ 29: 24](RO/H) EQ remote FS.
+ Indicates remote FS value. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg167_s cn; */
+};
+typedef union bdk_pciercx_cfg167 bdk_pciercx_cfg167_t;
+
+static inline uint64_t BDK_PCIERCX_CFG167(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG167(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002a0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG167", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG167(a) bdk_pciercx_cfg167_t
+#define bustype_BDK_PCIERCX_CFG167(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG167(a) "PCIERCX_CFG167"
+#define busnum_BDK_PCIERCX_CFG167(a) (a)
+#define arguments_BDK_PCIERCX_CFG167(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg174
+ *
+ * PCIe RC Vendor RAS Data Path Protection Header Register
+ * This register contains the one hundred seventy-fifth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg174
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg174_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset. Points to the ACS extended capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset. Points to the ACS extended capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg174_s cn; */
+};
+typedef union bdk_pciercx_cfg174 bdk_pciercx_cfg174_t;
+
+static inline uint64_t BDK_PCIERCX_CFG174(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG174(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002b8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG174", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG174(a) bdk_pciercx_cfg174_t
+#define bustype_BDK_PCIERCX_CFG174(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG174(a) "PCIERCX_CFG174"
+#define busnum_BDK_PCIERCX_CFG174(a) (a)
+#define arguments_BDK_PCIERCX_CFG174(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg175
+ *
+ * PCIe RC RAS Data Path Extended Capability Register
+ * This register contains the one hundred seventy-sixth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg175
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg175_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vsec_length : 12; /**< [ 31: 20](RO) VSEC length. */
+ uint32_t vsec_rev : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsec_id : 16; /**< [ 15: 0](RO) VSEC ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t vsec_id : 16; /**< [ 15: 0](RO) VSEC ID. */
+ uint32_t vsec_rev : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsec_length : 12; /**< [ 31: 20](RO) VSEC length. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg175_s cn; */
+};
+typedef union bdk_pciercx_cfg175 bdk_pciercx_cfg175_t;
+
+static inline uint64_t BDK_PCIERCX_CFG175(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG175(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002bcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG175", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG175(a) bdk_pciercx_cfg175_t
+#define bustype_BDK_PCIERCX_CFG175(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG175(a) "PCIERCX_CFG175"
+#define busnum_BDK_PCIERCX_CFG175(a) (a)
+#define arguments_BDK_PCIERCX_CFG175(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg176
+ *
+ * PCIe RC RAS Data Path Error Protection Control Register
+ * This register contains the one hundred seventy-seventh 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg176
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg176_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t ep_dis_adm_rx : 1; /**< [ 22: 22](R/W) Error correction disable for ADM RX path. */
+ uint32_t ep_dis_l3_rx : 1; /**< [ 21: 21](R/W) Error correction disable for layer 3 RX path. */
+ uint32_t ep_dis_l2_rx : 1; /**< [ 20: 20](R/W) Error correction disable for layer 2 RX path. */
+ uint32_t ep_dis_dma_rd : 1; /**< [ 19: 19](R/W) Error correction disable for DMA read (not supported). */
+ uint32_t ep_dis_axib_inbr : 1; /**< [ 18: 18](R/W) Error correction disable for AXI bridge inbound request path (not supported). */
+ uint32_t ep_dis_axib_inbc : 1; /**< [ 17: 17](R/W) Error correction disable for AXI bridge inbound completion composer (not supported). */
+ uint32_t ep_dis_rx : 1; /**< [ 16: 16](R/W) Global error correction disable for all RX layers. */
+ uint32_t reserved_7_15 : 9;
+ uint32_t ep_dis_adm_tx : 1; /**< [ 6: 6](R/W) Error correction disable for ADM TX path. */
+ uint32_t ep_dis_l3_tx : 1; /**< [ 5: 5](R/W) Error correction disable for layer 3 TX path. */
+ uint32_t ep_dis_l2_tx : 1; /**< [ 4: 4](R/W) Error correction disable for layer 2 TX path. */
+ uint32_t ep_dis_dma_wr : 1; /**< [ 3: 3](R/W) Error correction disable for DMA write (not supported). */
+ uint32_t ep_dis_axib_outb : 1; /**< [ 2: 2](R/W) Error correction disable for AXI bridge outbound request path (not supported). */
+ uint32_t ep_dis_axib_masc : 1; /**< [ 1: 1](R/W) Error correction disable for AXI bridge master completion buffer (not supported). */
+ uint32_t ep_dis_tx : 1; /**< [ 0: 0](R/W) Global error correction disable for all TX layers. */
+#else /* Word 0 - Little Endian */
+ uint32_t ep_dis_tx : 1; /**< [ 0: 0](R/W) Global error correction disable for all TX layers. */
+ uint32_t ep_dis_axib_masc : 1; /**< [ 1: 1](R/W) Error correction disable for AXI bridge master completion buffer (not supported). */
+ uint32_t ep_dis_axib_outb : 1; /**< [ 2: 2](R/W) Error correction disable for AXI bridge outbound request path (not supported). */
+ uint32_t ep_dis_dma_wr : 1; /**< [ 3: 3](R/W) Error correction disable for DMA write (not supported). */
+ uint32_t ep_dis_l2_tx : 1; /**< [ 4: 4](R/W) Error correction disable for layer 2 TX path. */
+ uint32_t ep_dis_l3_tx : 1; /**< [ 5: 5](R/W) Error correction disable for layer 3 TX path. */
+ uint32_t ep_dis_adm_tx : 1; /**< [ 6: 6](R/W) Error correction disable for ADM TX path. */
+ uint32_t reserved_7_15 : 9;
+ uint32_t ep_dis_rx : 1; /**< [ 16: 16](R/W) Global error correction disable for all RX layers. */
+ uint32_t ep_dis_axib_inbc : 1; /**< [ 17: 17](R/W) Error correction disable for AXI bridge inbound completion composer (not supported). */
+ uint32_t ep_dis_axib_inbr : 1; /**< [ 18: 18](R/W) Error correction disable for AXI bridge inbound request path (not supported). */
+ uint32_t ep_dis_dma_rd : 1; /**< [ 19: 19](R/W) Error correction disable for DMA read (not supported). */
+ uint32_t ep_dis_l2_rx : 1; /**< [ 20: 20](R/W) Error correction disable for layer 2 RX path. */
+ uint32_t ep_dis_l3_rx : 1; /**< [ 21: 21](R/W) Error correction disable for layer 3 RX path. */
+ uint32_t ep_dis_adm_rx : 1; /**< [ 22: 22](R/W) Error correction disable for ADM RX path. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg176_s cn; */
+};
+typedef union bdk_pciercx_cfg176 bdk_pciercx_cfg176_t;
+
+static inline uint64_t BDK_PCIERCX_CFG176(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG176(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002c0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG176", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG176(a) bdk_pciercx_cfg176_t
+#define bustype_BDK_PCIERCX_CFG176(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG176(a) "PCIERCX_CFG176"
+#define busnum_BDK_PCIERCX_CFG176(a) (a)
+#define arguments_BDK_PCIERCX_CFG176(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg177
+ *
+ * PCIe RC RAS Data Path Correctable Error Control Register
+ * This register contains the one hundred seventy-eighth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg177
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg177_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t corr_cnt_sel : 8; /**< [ 31: 24](R/W) Counter selection. This field selects the counter ID (within
+ the region defined by CORR_CNT_SEL_REG) whose contents
+ can be read from the CFG114 register. You can
+ cycle this field value from 0 to 255 to access all counters. */
+ uint32_t corr_cnt_sel_reg : 4; /**< [ 23: 20](R/W) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA read engine inbound (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion buffer path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_5_19 : 15;
+ uint32_t corr_en_cntrs : 1; /**< [ 4: 4](R/W) Error correction disable for ADM RX path. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t ep_dis_l3_rx : 1; /**< [ 0: 0](R/W1C) Clears all correctable error counters. */
+#else /* Word 0 - Little Endian */
+ uint32_t ep_dis_l3_rx : 1; /**< [ 0: 0](R/W1C) Clears all correctable error counters. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t corr_en_cntrs : 1; /**< [ 4: 4](R/W) Error correction disable for ADM RX path. */
+ uint32_t reserved_5_19 : 15;
+ uint32_t corr_cnt_sel_reg : 4; /**< [ 23: 20](R/W) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA read engine inbound (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion buffer path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t corr_cnt_sel : 8; /**< [ 31: 24](R/W) Counter selection. This field selects the counter ID (within
+ the region defined by CORR_CNT_SEL_REG) whose contents
+ can be read from the CFG114 register. You can
+ cycle this field value from 0 to 255 to access all counters. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg177_s cn; */
+};
+typedef union bdk_pciercx_cfg177 bdk_pciercx_cfg177_t;
+
+static inline uint64_t BDK_PCIERCX_CFG177(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG177(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002c4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG177", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG177(a) bdk_pciercx_cfg177_t
+#define bustype_BDK_PCIERCX_CFG177(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG177(a) "PCIERCX_CFG177"
+#define busnum_BDK_PCIERCX_CFG177(a) (a)
+#define arguments_BDK_PCIERCX_CFG177(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg178
+ *
+ * PCIe RC RAS Data Path Correctable Error Report Register
+ * This register contains the one hundred seventy-ninth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg178
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg178_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t corr_cnt_sel : 8; /**< [ 31: 24](RO/H) Counter selection. Returns the value set in the CFG113CORR_CNT_SEL] register. */
+ uint32_t corr_cnt_sel_reg : 4; /**< [ 23: 20](RO/H) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t corr_count : 8; /**< [ 7: 0](RO) Current corrected count for the selected counter. */
+#else /* Word 0 - Little Endian */
+ uint32_t corr_count : 8; /**< [ 7: 0](RO) Current corrected count for the selected counter. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t corr_cnt_sel_reg : 4; /**< [ 23: 20](RO/H) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t corr_cnt_sel : 8; /**< [ 31: 24](RO/H) Counter selection. Returns the value set in the CFG113CORR_CNT_SEL] register. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg178_s cn; */
+};
+typedef union bdk_pciercx_cfg178 bdk_pciercx_cfg178_t;
+
+static inline uint64_t BDK_PCIERCX_CFG178(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG178(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002c8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG178", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG178(a) bdk_pciercx_cfg178_t
+#define bustype_BDK_PCIERCX_CFG178(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG178(a) "PCIERCX_CFG178"
+#define busnum_BDK_PCIERCX_CFG178(a) (a)
+#define arguments_BDK_PCIERCX_CFG178(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg179
+ *
+ * PCIe RC RAS Data Path Uncorrectable Error Control Register
+ * This register contains the one hundred eighty 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg179
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg179_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ucorr_cnt_sel : 8; /**< [ 31: 24](R/W) Counter selection. This field selects the counter ID (within
+ the region defined by [UCORR_CNT_SEL_REG]) whose contents
+ can be read from the CFG114 register. You can
+ cycle this field value from 0 to 255 to access all counters. */
+ uint32_t ucorr_cnt_sel_reg : 4; /**< [ 23: 20](R/W) Selected correctable counter region.
+ Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_5_19 : 15;
+ uint32_t ucorr_en_cntrs : 1; /**< [ 4: 4](R/W) Error correction disable for ADM RX path. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t ep_dis_l3_rx : 1; /**< [ 0: 0](R/W1C) Clears all uncorrectable error counters. */
+#else /* Word 0 - Little Endian */
+ uint32_t ep_dis_l3_rx : 1; /**< [ 0: 0](R/W1C) Clears all uncorrectable error counters. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t ucorr_en_cntrs : 1; /**< [ 4: 4](R/W) Error correction disable for ADM RX path. */
+ uint32_t reserved_5_19 : 15;
+ uint32_t ucorr_cnt_sel_reg : 4; /**< [ 23: 20](R/W) Selected correctable counter region.
+ Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t ucorr_cnt_sel : 8; /**< [ 31: 24](R/W) Counter selection. This field selects the counter ID (within
+ the region defined by [UCORR_CNT_SEL_REG]) whose contents
+ can be read from the CFG114 register. You can
+ cycle this field value from 0 to 255 to access all counters. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg179_s cn; */
+};
+typedef union bdk_pciercx_cfg179 bdk_pciercx_cfg179_t;
+
+static inline uint64_t BDK_PCIERCX_CFG179(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG179(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002ccll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG179", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG179(a) bdk_pciercx_cfg179_t
+#define bustype_BDK_PCIERCX_CFG179(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG179(a) "PCIERCX_CFG179"
+#define busnum_BDK_PCIERCX_CFG179(a) (a)
+#define arguments_BDK_PCIERCX_CFG179(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg180
+ *
+ * PCIe RC RAS Data Path Uncorrectable Error Report Register
+ * This register contains the one hundred eighty-first 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg180
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg180_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ucorr_cnt_sel : 8; /**< [ 31: 24](RO/H) Counter selection. Returns the value set in the CFG113[ORR_CNT_SEL] register. */
+ uint32_t ucorr_cnt_sel_reg : 4; /**< [ 23: 20](RO/H) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion buffer path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t ucorr_count : 8; /**< [ 7: 0](RO) Current uncorrected count for the selected counter. */
+#else /* Word 0 - Little Endian */
+ uint32_t ucorr_count : 8; /**< [ 7: 0](RO) Current uncorrected count for the selected counter. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t ucorr_cnt_sel_reg : 4; /**< [ 23: 20](RO/H) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion buffer path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t ucorr_cnt_sel : 8; /**< [ 31: 24](RO/H) Counter selection. Returns the value set in the CFG113[ORR_CNT_SEL] register. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg180_s cn; */
+};
+typedef union bdk_pciercx_cfg180 bdk_pciercx_cfg180_t;
+
+static inline uint64_t BDK_PCIERCX_CFG180(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG180(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002d0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG180", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG180(a) bdk_pciercx_cfg180_t
+#define bustype_BDK_PCIERCX_CFG180(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG180(a) "PCIERCX_CFG180"
+#define busnum_BDK_PCIERCX_CFG180(a) (a)
+#define arguments_BDK_PCIERCX_CFG180(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg181
+ *
+ * PCIe RC RAS Data Correctable Error Injection Control Register
+ * This register contains the one hundred eighty-second 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg181
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg181_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t err_inj_loc : 8; /**< [ 23: 16](R/W) Error injection location. Selects where error injection takes place. You
+ can cycle this field value from 0 to 255 to access all locations. */
+ uint32_t err_inj_cnt : 8; /**< [ 15: 8](R/W) Error injection count.
+ 0x0 = errors are injected in every TLP until [ERR_INJ_EN] is cleared.
+ 0x1 - 0xFF = number of errors injected. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t err_inj_type : 2; /**< [ 5: 4](R/W) Error injection type.
+ 0x0 = None.
+ 0x1 = 1-bit.
+ 0x2 = 2-bit.
+ 0x3 = Reserved. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t err_inj_en : 1; /**< [ 0: 0](R/W) Error injection global enable. When set, enables the error
+ insertion logic. */
+#else /* Word 0 - Little Endian */
+ uint32_t err_inj_en : 1; /**< [ 0: 0](R/W) Error injection global enable. When set, enables the error
+ insertion logic. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t err_inj_type : 2; /**< [ 5: 4](R/W) Error injection type.
+ 0x0 = None.
+ 0x1 = 1-bit.
+ 0x2 = 2-bit.
+ 0x3 = Reserved. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t err_inj_cnt : 8; /**< [ 15: 8](R/W) Error injection count.
+ 0x0 = errors are injected in every TLP until [ERR_INJ_EN] is cleared.
+ 0x1 - 0xFF = number of errors injected. */
+ uint32_t err_inj_loc : 8; /**< [ 23: 16](R/W) Error injection location. Selects where error injection takes place. You
+ can cycle this field value from 0 to 255 to access all locations. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg181_s cn; */
+};
+typedef union bdk_pciercx_cfg181 bdk_pciercx_cfg181_t;
+
+static inline uint64_t BDK_PCIERCX_CFG181(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG181(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002d4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG181", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG181(a) bdk_pciercx_cfg181_t
+#define bustype_BDK_PCIERCX_CFG181(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG181(a) "PCIERCX_CFG181"
+#define busnum_BDK_PCIERCX_CFG181(a) (a)
+#define arguments_BDK_PCIERCX_CFG181(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg182
+ *
+ * PCIe RC RAS Data Correctable Error Location Register
+ * This register contains the one hundred eighty-third 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg182
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg182_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t loc_last_corr_err : 8; /**< [ 31: 24](RO) Location/ID of the last corrected error within the region defined by
+ [REG_LAST_CORR_ERR]. */
+ uint32_t reg_last_corr_err : 4; /**< [ 23: 20](RO) Region of last corrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t loc_first_corr_err : 8; /**< [ 15: 8](RO) Location/ID of the first corrected error within the region defined by
+ [REG_FIRST_CORR_ERR]. */
+ uint32_t reg_first_corr_err : 4; /**< [ 7: 4](RO) Region of first corrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA read engine (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA write engine (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t reg_first_corr_err : 4; /**< [ 7: 4](RO) Region of first corrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA read engine (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA write engine (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t loc_first_corr_err : 8; /**< [ 15: 8](RO) Location/ID of the first corrected error within the region defined by
+ [REG_FIRST_CORR_ERR]. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t reg_last_corr_err : 4; /**< [ 23: 20](RO) Region of last corrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t loc_last_corr_err : 8; /**< [ 31: 24](RO) Location/ID of the last corrected error within the region defined by
+ [REG_LAST_CORR_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg182_s cn; */
+};
+typedef union bdk_pciercx_cfg182 bdk_pciercx_cfg182_t;
+
+static inline uint64_t BDK_PCIERCX_CFG182(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG182(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002d8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG182", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG182(a) bdk_pciercx_cfg182_t
+#define bustype_BDK_PCIERCX_CFG182(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG182(a) "PCIERCX_CFG182"
+#define busnum_BDK_PCIERCX_CFG182(a) (a)
+#define arguments_BDK_PCIERCX_CFG182(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg183
+ *
+ * PCIe RC RAS Data Uncorrectable Error Location Register
+ * This register contains the one hundred eighty-fourth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg183
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg183_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t loc_last_ucorr_err : 8; /**< [ 31: 24](RO) Location/ID of the last uncorrected error within the region defined by
+ [REG_LAST_CORR_ERR]. */
+ uint32_t reg_last_ucorr_err : 4; /**< [ 23: 20](RO) Region of last uncorrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t loc_first_ucorr_err : 8; /**< [ 15: 8](RO) Location/ID of the first uncorrected error within the region defined by
+ REG_FIRST_CORR_ERR. */
+ uint32_t reg_first_ucorr_err : 4; /**< [ 7: 4](RO) Region of first uncorrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t reg_first_ucorr_err : 4; /**< [ 7: 4](RO) Region of first uncorrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t loc_first_ucorr_err : 8; /**< [ 15: 8](RO) Location/ID of the first uncorrected error within the region defined by
+ REG_FIRST_CORR_ERR. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t reg_last_ucorr_err : 4; /**< [ 23: 20](RO) Region of last uncorrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t loc_last_ucorr_err : 8; /**< [ 31: 24](RO) Location/ID of the last uncorrected error within the region defined by
+ [REG_LAST_CORR_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg183_s cn; */
+};
+typedef union bdk_pciercx_cfg183 bdk_pciercx_cfg183_t;
+
+static inline uint64_t BDK_PCIERCX_CFG183(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG183(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002dcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG183", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG183(a) bdk_pciercx_cfg183_t
+#define bustype_BDK_PCIERCX_CFG183(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG183(a) "PCIERCX_CFG183"
+#define busnum_BDK_PCIERCX_CFG183(a) (a)
+#define arguments_BDK_PCIERCX_CFG183(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg184
+ *
+ * PCIe RC RAS Data Error Mode Enable Register
+ * This register contains the one hundred eighty-fifth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg184
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg184_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t auto_lnk_dn_en : 1; /**< [ 1: 1](R/W) Set this bit to enable the core to bring the link down when RASDP error mode is entered.
+ [REG_LAST_CORR_ERR]. */
+ uint32_t err_mode_en : 1; /**< [ 0: 0](R/W) Set this bit to enable the core to enter RASDP error mode when it detects an uncorrectable error. */
+#else /* Word 0 - Little Endian */
+ uint32_t err_mode_en : 1; /**< [ 0: 0](R/W) Set this bit to enable the core to enter RASDP error mode when it detects an uncorrectable error. */
+ uint32_t auto_lnk_dn_en : 1; /**< [ 1: 1](R/W) Set this bit to enable the core to bring the link down when RASDP error mode is entered.
+ [REG_LAST_CORR_ERR]. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg184_s cn; */
+};
+typedef union bdk_pciercx_cfg184 bdk_pciercx_cfg184_t;
+
+static inline uint64_t BDK_PCIERCX_CFG184(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG184(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002e0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG184", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG184(a) bdk_pciercx_cfg184_t
+#define bustype_BDK_PCIERCX_CFG184(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG184(a) "PCIERCX_CFG184"
+#define busnum_BDK_PCIERCX_CFG184(a) (a)
+#define arguments_BDK_PCIERCX_CFG184(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg185
+ *
+ * PCIe RC RAS Data Error Mode Clear Register
+ * This register contains the one hundred eighty-sixth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg185
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg185_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t err_mode_clr : 1; /**< [ 0: 0](R/W1C) Set this bit to take the core out of RASDP error mode. The core will then report
+ uncorrectable
+ errors (through AER internal error reporting) and also stop nullifying/discarding TLPs. */
+#else /* Word 0 - Little Endian */
+ uint32_t err_mode_clr : 1; /**< [ 0: 0](R/W1C) Set this bit to take the core out of RASDP error mode. The core will then report
+ uncorrectable
+ errors (through AER internal error reporting) and also stop nullifying/discarding TLPs. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg185_s cn; */
+};
+typedef union bdk_pciercx_cfg185 bdk_pciercx_cfg185_t;
+
+static inline uint64_t BDK_PCIERCX_CFG185(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG185(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002e4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG185", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG185(a) bdk_pciercx_cfg185_t
+#define bustype_BDK_PCIERCX_CFG185(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG185(a) "PCIERCX_CFG185"
+#define busnum_BDK_PCIERCX_CFG185(a) (a)
+#define arguments_BDK_PCIERCX_CFG185(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg186
+ *
+ * PCIe RC RAS RAM Address Corrected Error Register
+ * This register contains the one hundred eigth-seventh 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg186
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg186_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ram_idx_corr_err : 4; /**< [ 31: 28](RO) RAM index where a corrected error has been detected. */
+ uint32_t reserved_27 : 1;
+ uint32_t ram_addr_corr_err : 27; /**< [ 26: 0](RO) RAM address where a corrected error has been detected. */
+#else /* Word 0 - Little Endian */
+ uint32_t ram_addr_corr_err : 27; /**< [ 26: 0](RO) RAM address where a corrected error has been detected. */
+ uint32_t reserved_27 : 1;
+ uint32_t ram_idx_corr_err : 4; /**< [ 31: 28](RO) RAM index where a corrected error has been detected. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg186_s cn; */
+};
+typedef union bdk_pciercx_cfg186 bdk_pciercx_cfg186_t;
+
+static inline uint64_t BDK_PCIERCX_CFG186(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG186(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002e8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG186", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG186(a) bdk_pciercx_cfg186_t
+#define bustype_BDK_PCIERCX_CFG186(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG186(a) "PCIERCX_CFG186"
+#define busnum_BDK_PCIERCX_CFG186(a) (a)
+#define arguments_BDK_PCIERCX_CFG186(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg187
+ *
+ * PCIe RC RAS RAM Address Uncorrected Error Register
+ * This register contains the one hundred eighty-eighth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg187
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg187_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ram_idx_ucorr_err : 4; /**< [ 31: 28](RO) RAM index where a uncorrected error has been detected. */
+ uint32_t reserved_27 : 1;
+ uint32_t ram_addr_ucorr_err : 27; /**< [ 26: 0](RO) RAM address where a uncorrected error has been detected. */
+#else /* Word 0 - Little Endian */
+ uint32_t ram_addr_ucorr_err : 27; /**< [ 26: 0](RO) RAM address where a uncorrected error has been detected. */
+ uint32_t reserved_27 : 1;
+ uint32_t ram_idx_ucorr_err : 4; /**< [ 31: 28](RO) RAM index where a uncorrected error has been detected. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg187_s cn; */
+};
+typedef union bdk_pciercx_cfg187 bdk_pciercx_cfg187_t;
+
+static inline uint64_t BDK_PCIERCX_CFG187(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG187(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000002ecll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG187", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG187(a) bdk_pciercx_cfg187_t
+#define bustype_BDK_PCIERCX_CFG187(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG187(a) "PCIERCX_CFG187"
+#define busnum_BDK_PCIERCX_CFG187(a) (a)
+#define arguments_BDK_PCIERCX_CFG187(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg192
+ *
+ * PCIe RC PCI Express ACS Extended Capability Header Register
+ * This register contains the one hundred ninety-third 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg192
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg192_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg192_s cn; */
+};
+typedef union bdk_pciercx_cfg192 bdk_pciercx_cfg192_t;
+
+static inline uint64_t BDK_PCIERCX_CFG192(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG192(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000300ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG192", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG192(a) bdk_pciercx_cfg192_t
+#define bustype_BDK_PCIERCX_CFG192(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG192(a) "PCIERCX_CFG192"
+#define busnum_BDK_PCIERCX_CFG192(a) (a)
+#define arguments_BDK_PCIERCX_CFG192(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg193
+ *
+ * PCIe RC ACS Capability and Control Register
+ * This register contains the one hundred ninety-fourth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg193
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg193_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t dte : 1; /**< [ 22: 22](R/W) ACS direct translated P2P enable. */
+ uint32_t ece : 1; /**< [ 21: 21](R/W) ACS P2P egress control enable. */
+ uint32_t ufe : 1; /**< [ 20: 20](R/W) ACS upstream forwarding enable. */
+ uint32_t cre : 1; /**< [ 19: 19](R/W) ACS P2P completion redirect enable. */
+ uint32_t rre : 1; /**< [ 18: 18](R/W) ACS P2P request redirect enable. */
+ uint32_t tbe : 1; /**< [ 17: 17](R/W) ACS translation blocking enable. */
+ uint32_t sve : 1; /**< [ 16: 16](R/W) ACS source validation enable. */
+ uint32_t ecvs : 8; /**< [ 15: 8](RO/WRSL) Egress control vector size.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t dt : 1; /**< [ 6: 6](RO/WRSL) ACS direct translated P2P.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t ec : 1; /**< [ 5: 5](RO/WRSL) ACS P2P egress control.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t uf : 1; /**< [ 4: 4](RO/WRSL) ACS upstream forwarding.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cr : 1; /**< [ 3: 3](RO/WRSL) ACS P2P completion redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t rr : 1; /**< [ 2: 2](RO/WRSL) ACS P2P request redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t tb : 1; /**< [ 1: 1](RO/WRSL) ACS translation blocking.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t sv : 1; /**< [ 0: 0](RO/WRSL) ACS source validation.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sv : 1; /**< [ 0: 0](RO/WRSL) ACS source validation.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t tb : 1; /**< [ 1: 1](RO/WRSL) ACS translation blocking.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t rr : 1; /**< [ 2: 2](RO/WRSL) ACS P2P request redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cr : 1; /**< [ 3: 3](RO/WRSL) ACS P2P completion redirect.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t uf : 1; /**< [ 4: 4](RO/WRSL) ACS upstream forwarding.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t ec : 1; /**< [ 5: 5](RO/WRSL) ACS P2P egress control.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t dt : 1; /**< [ 6: 6](RO/WRSL) ACS direct translated P2P.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_7 : 1;
+ uint32_t ecvs : 8; /**< [ 15: 8](RO/WRSL) Egress control vector size.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t sve : 1; /**< [ 16: 16](R/W) ACS source validation enable. */
+ uint32_t tbe : 1; /**< [ 17: 17](R/W) ACS translation blocking enable. */
+ uint32_t rre : 1; /**< [ 18: 18](R/W) ACS P2P request redirect enable. */
+ uint32_t cre : 1; /**< [ 19: 19](R/W) ACS P2P completion redirect enable. */
+ uint32_t ufe : 1; /**< [ 20: 20](R/W) ACS upstream forwarding enable. */
+ uint32_t ece : 1; /**< [ 21: 21](R/W) ACS P2P egress control enable. */
+ uint32_t dte : 1; /**< [ 22: 22](R/W) ACS direct translated P2P enable. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg193_s cn; */
+};
+typedef union bdk_pciercx_cfg193 bdk_pciercx_cfg193_t;
+
+static inline uint64_t BDK_PCIERCX_CFG193(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG193(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000304ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG193", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG193(a) bdk_pciercx_cfg193_t
+#define bustype_BDK_PCIERCX_CFG193(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG193(a) "PCIERCX_CFG193"
+#define busnum_BDK_PCIERCX_CFG193(a) (a)
+#define arguments_BDK_PCIERCX_CFG193(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg194
+ *
+ * PCIe RC Egress Control Vector Register
+ * This register contains the one hundred ninety-fifth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg194
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg194_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ecv : 32; /**< [ 31: 0](R/W) Egress control vector. */
+#else /* Word 0 - Little Endian */
+ uint32_t ecv : 32; /**< [ 31: 0](R/W) Egress control vector. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg194_s cn; */
+};
+typedef union bdk_pciercx_cfg194 bdk_pciercx_cfg194_t;
+
+static inline uint64_t BDK_PCIERCX_CFG194(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG194(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000308ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG194", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG194(a) bdk_pciercx_cfg194_t
+#define bustype_BDK_PCIERCX_CFG194(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG194(a) "PCIERCX_CFG194"
+#define busnum_BDK_PCIERCX_CFG194(a) (a)
+#define arguments_BDK_PCIERCX_CFG194(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg448
+ *
+ * PCIe RC Ack Latency Timer/Replay Timer Register
+ * This register contains the four hundred forty-ninth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg448
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg448_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rtl : 16; /**< [ 31: 16](R/W/H) Replay time limit. The replay timer expires when it reaches this limit. The PCI Express
+ bus initiates a replay upon reception of a NAK or when the replay timer expires. This
+ value is set correctly by the hardware out of reset or when the negotiated link width or
+ payload size changes. If the user changes this value
+ they should refer to the PCIe specification for the correct value. */
+ uint32_t rtltl : 16; /**< [ 15: 0](R/W/H) Round trip latency time limit. The ACK/NAK latency timer expires when it reaches this
+ limit. This value is set correctly by the hardware out of reset or when the negotiated
+ link width or payload size changes. If the user changes this value
+ they should refer to the PCIe specification for the correct value. */
+#else /* Word 0 - Little Endian */
+ uint32_t rtltl : 16; /**< [ 15: 0](R/W/H) Round trip latency time limit. The ACK/NAK latency timer expires when it reaches this
+ limit. This value is set correctly by the hardware out of reset or when the negotiated
+ link width or payload size changes. If the user changes this value
+ they should refer to the PCIe specification for the correct value. */
+ uint32_t rtl : 16; /**< [ 31: 16](R/W/H) Replay time limit. The replay timer expires when it reaches this limit. The PCI Express
+ bus initiates a replay upon reception of a NAK or when the replay timer expires. This
+ value is set correctly by the hardware out of reset or when the negotiated link width or
+ payload size changes. If the user changes this value
+ they should refer to the PCIe specification for the correct value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg448_s cn; */
+};
+typedef union bdk_pciercx_cfg448 bdk_pciercx_cfg448_t;
+
+static inline uint64_t BDK_PCIERCX_CFG448(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG448(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000700ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000700ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000700ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG448", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG448(a) bdk_pciercx_cfg448_t
+#define bustype_BDK_PCIERCX_CFG448(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG448(a) "PCIERCX_CFG448"
+#define busnum_BDK_PCIERCX_CFG448(a) (a)
+#define arguments_BDK_PCIERCX_CFG448(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg449
+ *
+ * PCIe RC Other Message Register
+ * This register contains the four hundred fiftieth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg449
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg449_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t omr : 32; /**< [ 31: 0](R/W) Other message register. This register can be used for either of the following purposes:
+ * To send a specific PCI Express message, the application writes the payload of the
+ message into this register, then sets bit 0 of the port link control register to send the
+ message.
+ * To store a corruption pattern for corrupting the LCRC on all TLPs, the application
+ places a 32-bit corruption pattern into this register and enables this function by setting
+ bit 25 of the port link control register. When enabled, the transmit LCRC result is XORed
+ with this pattern before inserting it into the packet. */
+#else /* Word 0 - Little Endian */
+ uint32_t omr : 32; /**< [ 31: 0](R/W) Other message register. This register can be used for either of the following purposes:
+ * To send a specific PCI Express message, the application writes the payload of the
+ message into this register, then sets bit 0 of the port link control register to send the
+ message.
+ * To store a corruption pattern for corrupting the LCRC on all TLPs, the application
+ places a 32-bit corruption pattern into this register and enables this function by setting
+ bit 25 of the port link control register. When enabled, the transmit LCRC result is XORed
+ with this pattern before inserting it into the packet. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg449_s cn; */
+};
+typedef union bdk_pciercx_cfg449 bdk_pciercx_cfg449_t;
+
+static inline uint64_t BDK_PCIERCX_CFG449(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG449(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000704ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000704ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000704ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG449", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG449(a) bdk_pciercx_cfg449_t
+#define bustype_BDK_PCIERCX_CFG449(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG449(a) "PCIERCX_CFG449"
+#define busnum_BDK_PCIERCX_CFG449(a) (a)
+#define arguments_BDK_PCIERCX_CFG449(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg450
+ *
+ * PCIe RC Port Force Link Register
+ * This register contains the four hundred fifty-first 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg450
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg450_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lpec : 8; /**< [ 31: 24](R/W) Low power entrance count. The power management state waits this many clock cycles for the
+ associated completion of a CfgWr to PCIERC()_CFG017 register, power state (PS) field
+ register
+ to go low-power. This register is intended for applications that do not let the PCI
+ Express bus handle a completion for configuration request to the power management control
+ and status (PCIRC()_CFG017) register. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t link_state : 6; /**< [ 21: 16](R/W) Link state. The link state that the PCI Express bus is forced to when bit 15 (force link)
+ is set. State encoding:
+ 0x0 = DETECT_QUIET.
+ 0x1 = DETECT_ACT.
+ 0x2 = POLL_ACTIVE.
+ 0x3 = POLL_COMPLIANCE.
+ 0x4 = POLL_CONFIG.
+ 0x5 = PRE_DETECT_QUIET.
+ 0x6 = DETECT_WAIT.
+ 0x7 = CFG_LINKWD_START.
+ 0x8 = CFG_LINKWD_ACEPT.
+ 0x9 = CFG_LANENUM_WAIT.
+ 0xA = CFG_LANENUM_ACEPT.
+ 0xB = CFG_COMPLETE.
+ 0xC = CFG_IDLE.
+ 0xD = RCVRY_LOCK.
+ 0xE = RCVRY_SPEED.
+ 0xF = RCVRY_RCVRCFG.
+ 0x10 = RCVRY_IDLE.
+ 0x11 = L0.
+ 0x12 = L0S.
+ 0x13 = L123_SEND_EIDLE.
+ 0x14 = L1_IDLE.
+ 0x15 = L2_IDLE.
+ 0x16 = L2_WAKE.
+ 0x17 = DISABLED_ENTRY.
+ 0x18 = DISABLED_IDLE.
+ 0x19 = DISABLED.
+ 0x1A = LPBK_ENTRY.
+ 0x1B = LPBK_ACTIVE.
+ 0x1C = LPBK_EXIT.
+ 0x1D = LPBK_EXIT_TIMEOUT.
+ 0x1E = HOT_RESET_ENTRY.
+ 0x1F = HOT_RESET. */
+ uint32_t force_link : 1; /**< [ 15: 15](WO/H) Force link. Forces the link to the state specified by [LINK_STATE]. The force link
+ pulse triggers link renegotiation.
+ As the force link is a pulse, writing a 1 to it does trigger the forced link state event,
+ even though reading it always returns a 0. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t forced_ltssm : 4; /**< [ 11: 8](R/W) Forced link command. */
+ uint32_t link_num : 8; /**< [ 7: 0](R/W) Link number. */
+#else /* Word 0 - Little Endian */
+ uint32_t link_num : 8; /**< [ 7: 0](R/W) Link number. */
+ uint32_t forced_ltssm : 4; /**< [ 11: 8](R/W) Forced link command. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t force_link : 1; /**< [ 15: 15](WO/H) Force link. Forces the link to the state specified by [LINK_STATE]. The force link
+ pulse triggers link renegotiation.
+ As the force link is a pulse, writing a 1 to it does trigger the forced link state event,
+ even though reading it always returns a 0. */
+ uint32_t link_state : 6; /**< [ 21: 16](R/W) Link state. The link state that the PCI Express bus is forced to when bit 15 (force link)
+ is set. State encoding:
+ 0x0 = DETECT_QUIET.
+ 0x1 = DETECT_ACT.
+ 0x2 = POLL_ACTIVE.
+ 0x3 = POLL_COMPLIANCE.
+ 0x4 = POLL_CONFIG.
+ 0x5 = PRE_DETECT_QUIET.
+ 0x6 = DETECT_WAIT.
+ 0x7 = CFG_LINKWD_START.
+ 0x8 = CFG_LINKWD_ACEPT.
+ 0x9 = CFG_LANENUM_WAIT.
+ 0xA = CFG_LANENUM_ACEPT.
+ 0xB = CFG_COMPLETE.
+ 0xC = CFG_IDLE.
+ 0xD = RCVRY_LOCK.
+ 0xE = RCVRY_SPEED.
+ 0xF = RCVRY_RCVRCFG.
+ 0x10 = RCVRY_IDLE.
+ 0x11 = L0.
+ 0x12 = L0S.
+ 0x13 = L123_SEND_EIDLE.
+ 0x14 = L1_IDLE.
+ 0x15 = L2_IDLE.
+ 0x16 = L2_WAKE.
+ 0x17 = DISABLED_ENTRY.
+ 0x18 = DISABLED_IDLE.
+ 0x19 = DISABLED.
+ 0x1A = LPBK_ENTRY.
+ 0x1B = LPBK_ACTIVE.
+ 0x1C = LPBK_EXIT.
+ 0x1D = LPBK_EXIT_TIMEOUT.
+ 0x1E = HOT_RESET_ENTRY.
+ 0x1F = HOT_RESET. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t lpec : 8; /**< [ 31: 24](R/W) Low power entrance count. The power management state waits this many clock cycles for the
+ associated completion of a CfgWr to PCIERC()_CFG017 register, power state (PS) field
+ register
+ to go low-power. This register is intended for applications that do not let the PCI
+ Express bus handle a completion for configuration request to the power management control
+ and status (PCIRC()_CFG017) register. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg450_s cn81xx; */
+ struct bdk_pciercx_cfg450_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lpec : 8; /**< [ 31: 24](R/W) Low power entrance count. The power management state waits this many clock cycles for the
+ associated completion of a CfgWr to PCIEEP()_CFG017 register, power state (PS) field
+ register
+ to go low-power. This register is intended for applications that do not let the PCI
+ Express bus handle a completion for configuration request to the power management control
+ and status (PCIEP()_CFG017) register. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t link_state : 6; /**< [ 21: 16](R/W) Link state. The link state that the PCI Express bus is forced to when bit 15 (force link)
+ is set. State encoding:
+ 0x0 = DETECT_QUIET.
+ 0x1 = DETECT_ACT.
+ 0x2 = POLL_ACTIVE.
+ 0x3 = POLL_COMPLIANCE.
+ 0x4 = POLL_CONFIG.
+ 0x5 = PRE_DETECT_QUIET.
+ 0x6 = DETECT_WAIT.
+ 0x7 = CFG_LINKWD_START.
+ 0x8 = CFG_LINKWD_ACEPT.
+ 0x9 = CFG_LANENUM_WAIT.
+ 0xA = CFG_LANENUM_ACEPT.
+ 0xB = CFG_COMPLETE.
+ 0xC = CFG_IDLE.
+ 0xD = RCVRY_LOCK.
+ 0xE = RCVRY_SPEED.
+ 0xF = RCVRY_RCVRCFG.
+ 0x10 = RCVRY_IDLE.
+ 0x11 = L0.
+ 0x12 = L0S.
+ 0x13 = L123_SEND_EIDLE.
+ 0x14 = L1_IDLE.
+ 0x15 = L2_IDLE.
+ 0x16 = L2_WAKE.
+ 0x17 = DISABLED_ENTRY.
+ 0x18 = DISABLED_IDLE.
+ 0x19 = DISABLED.
+ 0x1A = LPBK_ENTRY.
+ 0x1B = LPBK_ACTIVE.
+ 0x1C = LPBK_EXIT.
+ 0x1D = LPBK_EXIT_TIMEOUT.
+ 0x1E = HOT_RESET_ENTRY.
+ 0x1F = HOT_RESET. */
+ uint32_t force_link : 1; /**< [ 15: 15](WO/H) Force link. Forces the link to the state specified by [LINK_STATE]. The force link
+ pulse triggers link renegotiation.
+ As the force link is a pulse, writing a 1 to it does trigger the forced link state event,
+ even though reading it always returns a 0. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t forced_ltssm : 4; /**< [ 11: 8](R/W) Forced link command. */
+ uint32_t link_num : 8; /**< [ 7: 0](R/W) Link number. */
+#else /* Word 0 - Little Endian */
+ uint32_t link_num : 8; /**< [ 7: 0](R/W) Link number. */
+ uint32_t forced_ltssm : 4; /**< [ 11: 8](R/W) Forced link command. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t force_link : 1; /**< [ 15: 15](WO/H) Force link. Forces the link to the state specified by [LINK_STATE]. The force link
+ pulse triggers link renegotiation.
+ As the force link is a pulse, writing a 1 to it does trigger the forced link state event,
+ even though reading it always returns a 0. */
+ uint32_t link_state : 6; /**< [ 21: 16](R/W) Link state. The link state that the PCI Express bus is forced to when bit 15 (force link)
+ is set. State encoding:
+ 0x0 = DETECT_QUIET.
+ 0x1 = DETECT_ACT.
+ 0x2 = POLL_ACTIVE.
+ 0x3 = POLL_COMPLIANCE.
+ 0x4 = POLL_CONFIG.
+ 0x5 = PRE_DETECT_QUIET.
+ 0x6 = DETECT_WAIT.
+ 0x7 = CFG_LINKWD_START.
+ 0x8 = CFG_LINKWD_ACEPT.
+ 0x9 = CFG_LANENUM_WAIT.
+ 0xA = CFG_LANENUM_ACEPT.
+ 0xB = CFG_COMPLETE.
+ 0xC = CFG_IDLE.
+ 0xD = RCVRY_LOCK.
+ 0xE = RCVRY_SPEED.
+ 0xF = RCVRY_RCVRCFG.
+ 0x10 = RCVRY_IDLE.
+ 0x11 = L0.
+ 0x12 = L0S.
+ 0x13 = L123_SEND_EIDLE.
+ 0x14 = L1_IDLE.
+ 0x15 = L2_IDLE.
+ 0x16 = L2_WAKE.
+ 0x17 = DISABLED_ENTRY.
+ 0x18 = DISABLED_IDLE.
+ 0x19 = DISABLED.
+ 0x1A = LPBK_ENTRY.
+ 0x1B = LPBK_ACTIVE.
+ 0x1C = LPBK_EXIT.
+ 0x1D = LPBK_EXIT_TIMEOUT.
+ 0x1E = HOT_RESET_ENTRY.
+ 0x1F = HOT_RESET. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t lpec : 8; /**< [ 31: 24](R/W) Low power entrance count. The power management state waits this many clock cycles for the
+ associated completion of a CfgWr to PCIEEP()_CFG017 register, power state (PS) field
+ register
+ to go low-power. This register is intended for applications that do not let the PCI
+ Express bus handle a completion for configuration request to the power management control
+ and status (PCIEP()_CFG017) register. */
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pciercx_cfg450_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t link_state : 6; /**< [ 21: 16](R/W) Link state. The link state that the PCI Express bus is forced to when bit 15 (force link)
+ is set. State encoding:
+ 0x0 = DETECT_QUIET.
+ 0x1 = DETECT_ACT.
+ 0x2 = POLL_ACTIVE.
+ 0x3 = POLL_COMPLIANCE.
+ 0x4 = POLL_CONFIG.
+ 0x5 = PRE_DETECT_QUIET.
+ 0x6 = DETECT_WAIT.
+ 0x7 = CFG_LINKWD_START.
+ 0x8 = CFG_LINKWD_ACEPT.
+ 0x9 = CFG_LANENUM_WAIT.
+ 0xA = CFG_LANENUM_ACEPT.
+ 0xB = CFG_COMPLETE.
+ 0xC = CFG_IDLE.
+ 0xD = RCVRY_LOCK.
+ 0xE = RCVRY_SPEED.
+ 0xF = RCVRY_RCVRCFG.
+ 0x10 = RCVRY_IDLE.
+ 0x11 = L0.
+ 0x12 = L0S.
+ 0x13 = L123_SEND_EIDLE.
+ 0x14 = L1_IDLE.
+ 0x15 = L2_IDLE.
+ 0x16 = L2_WAKE.
+ 0x17 = DISABLED_ENTRY.
+ 0x18 = DISABLED_IDLE.
+ 0x19 = DISABLED.
+ 0x1A = LPBK_ENTRY.
+ 0x1B = LPBK_ACTIVE.
+ 0x1C = LPBK_EXIT.
+ 0x1D = LPBK_EXIT_TIMEOUT.
+ 0x1E = HOT_RESET_ENTRY.
+ 0x1F = HOT_RESET. */
+ uint32_t force_link : 1; /**< [ 15: 15](WO/H) Force link. Forces the link to the state specified by [LINK_STATE]. The force link
+ pulse triggers link renegotiation.
+ As the force link is a pulse, writing a 1 to it does trigger the forced link state event,
+ even though reading it always returns a 0. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t forced_ltssm : 4; /**< [ 11: 8](R/W) Forced link command. */
+ uint32_t link_num : 8; /**< [ 7: 0](R/W) Link number. */
+#else /* Word 0 - Little Endian */
+ uint32_t link_num : 8; /**< [ 7: 0](R/W) Link number. */
+ uint32_t forced_ltssm : 4; /**< [ 11: 8](R/W) Forced link command. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t force_link : 1; /**< [ 15: 15](WO/H) Force link. Forces the link to the state specified by [LINK_STATE]. The force link
+ pulse triggers link renegotiation.
+ As the force link is a pulse, writing a 1 to it does trigger the forced link state event,
+ even though reading it always returns a 0. */
+ uint32_t link_state : 6; /**< [ 21: 16](R/W) Link state. The link state that the PCI Express bus is forced to when bit 15 (force link)
+ is set. State encoding:
+ 0x0 = DETECT_QUIET.
+ 0x1 = DETECT_ACT.
+ 0x2 = POLL_ACTIVE.
+ 0x3 = POLL_COMPLIANCE.
+ 0x4 = POLL_CONFIG.
+ 0x5 = PRE_DETECT_QUIET.
+ 0x6 = DETECT_WAIT.
+ 0x7 = CFG_LINKWD_START.
+ 0x8 = CFG_LINKWD_ACEPT.
+ 0x9 = CFG_LANENUM_WAIT.
+ 0xA = CFG_LANENUM_ACEPT.
+ 0xB = CFG_COMPLETE.
+ 0xC = CFG_IDLE.
+ 0xD = RCVRY_LOCK.
+ 0xE = RCVRY_SPEED.
+ 0xF = RCVRY_RCVRCFG.
+ 0x10 = RCVRY_IDLE.
+ 0x11 = L0.
+ 0x12 = L0S.
+ 0x13 = L123_SEND_EIDLE.
+ 0x14 = L1_IDLE.
+ 0x15 = L2_IDLE.
+ 0x16 = L2_WAKE.
+ 0x17 = DISABLED_ENTRY.
+ 0x18 = DISABLED_IDLE.
+ 0x19 = DISABLED.
+ 0x1A = LPBK_ENTRY.
+ 0x1B = LPBK_ACTIVE.
+ 0x1C = LPBK_EXIT.
+ 0x1D = LPBK_EXIT_TIMEOUT.
+ 0x1E = HOT_RESET_ENTRY.
+ 0x1F = HOT_RESET. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg450 bdk_pciercx_cfg450_t;
+
+static inline uint64_t BDK_PCIERCX_CFG450(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG450(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000708ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000708ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000708ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG450", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG450(a) bdk_pciercx_cfg450_t
+#define bustype_BDK_PCIERCX_CFG450(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG450(a) "PCIERCX_CFG450"
+#define busnum_BDK_PCIERCX_CFG450(a) (a)
+#define arguments_BDK_PCIERCX_CFG450(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg451
+ *
+ * PCIe RC Ack Frequency Register
+ * This register contains the four hundred fifty-second 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg451
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg451_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t easpml1 : 1; /**< [ 30: 30](R/W/H) Enter ASPM L1 without receive in L0s. Allow core to enter ASPM L1 even when link partner
+ did not go to L0s (receive is not in L0s). When not set, core goes to ASPM L1 only after
+ idle period, during which both receive and transmit are in L0s. */
+ uint32_t l1el : 3; /**< [ 29: 27](R/W) L1 entrance latency. Values correspond to:
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 4 ms.
+ 0x3 = 8 ms.
+ 0x4 = 16 ms.
+ 0x5 = 32 ms.
+ 0x6 or 0x7 = 64 ms. */
+ uint32_t l0el : 3; /**< [ 26: 24](R/W) L0s entrance latency. Values correspond to:
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 3 ms.
+ 0x3 = 4 ms.
+ 0x4 = 5 ms.
+ 0x5 = 6 ms.
+ 0x6 or 0x7 = 7 ms. */
+ uint32_t n_fts_cc : 8; /**< [ 23: 16](RO) N_FTS when common clock is used.
+ The number of fast training sequence (FTS) ordered sets to be transmitted when
+ transitioning from L0s to L0. The maximum number of FTS ordered sets that a component can
+ request is 255.
+ A value of zero is not supported; a value of zero can cause the LTSSM to go into the
+ recovery state when exiting from L0s. */
+ uint32_t n_fts : 8; /**< [ 15: 8](R/W) N_FTS. The number of fast training sequence (FTS) ordered sets to be transmitted when
+ transitioning from L0s to L0. The maximum number of FTS ordered sets that a component can
+ request is 255.
+ A value of zero is not supported; a value of zero can cause the LTSSM to go into the
+ recovery state when exiting from L0s. */
+ uint32_t ack_freq : 8; /**< [ 7: 0](R/W) ACK frequency. The number of pending ACKs specified here (up to 255) before sending an ACK. */
+#else /* Word 0 - Little Endian */
+ uint32_t ack_freq : 8; /**< [ 7: 0](R/W) ACK frequency. The number of pending ACKs specified here (up to 255) before sending an ACK. */
+ uint32_t n_fts : 8; /**< [ 15: 8](R/W) N_FTS. The number of fast training sequence (FTS) ordered sets to be transmitted when
+ transitioning from L0s to L0. The maximum number of FTS ordered sets that a component can
+ request is 255.
+ A value of zero is not supported; a value of zero can cause the LTSSM to go into the
+ recovery state when exiting from L0s. */
+ uint32_t n_fts_cc : 8; /**< [ 23: 16](RO) N_FTS when common clock is used.
+ The number of fast training sequence (FTS) ordered sets to be transmitted when
+ transitioning from L0s to L0. The maximum number of FTS ordered sets that a component can
+ request is 255.
+ A value of zero is not supported; a value of zero can cause the LTSSM to go into the
+ recovery state when exiting from L0s. */
+ uint32_t l0el : 3; /**< [ 26: 24](R/W) L0s entrance latency. Values correspond to:
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 3 ms.
+ 0x3 = 4 ms.
+ 0x4 = 5 ms.
+ 0x5 = 6 ms.
+ 0x6 or 0x7 = 7 ms. */
+ uint32_t l1el : 3; /**< [ 29: 27](R/W) L1 entrance latency. Values correspond to:
+ 0x0 = 1 ms.
+ 0x1 = 2 ms.
+ 0x2 = 4 ms.
+ 0x3 = 8 ms.
+ 0x4 = 16 ms.
+ 0x5 = 32 ms.
+ 0x6 or 0x7 = 64 ms. */
+ uint32_t easpml1 : 1; /**< [ 30: 30](R/W/H) Enter ASPM L1 without receive in L0s. Allow core to enter ASPM L1 even when link partner
+ did not go to L0s (receive is not in L0s). When not set, core goes to ASPM L1 only after
+ idle period, during which both receive and transmit are in L0s. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg451_s cn; */
+};
+typedef union bdk_pciercx_cfg451 bdk_pciercx_cfg451_t;
+
+static inline uint64_t BDK_PCIERCX_CFG451(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG451(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000070cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000070cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000070cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG451", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG451(a) bdk_pciercx_cfg451_t
+#define bustype_BDK_PCIERCX_CFG451(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG451(a) "PCIERCX_CFG451"
+#define busnum_BDK_PCIERCX_CFG451(a) (a)
+#define arguments_BDK_PCIERCX_CFG451(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg452
+ *
+ * PCIe RC Port Link Control Register
+ * This register contains the four hundred fifty-third 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg452
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg452_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t xlr_en : 1; /**< [ 27: 27](R/W) Transmit lane reversible enable. Internally reserved field, do not set. */
+ uint32_t ex_synch : 1; /**< [ 26: 26](R/W) Extended synch. Internally reserved field, do not set. */
+ uint32_t clcrc_en : 1; /**< [ 25: 25](R/W) Corrupt LCRC enable. Internally reserved field, do not set. */
+ uint32_t beacon_en : 1; /**< [ 24: 24](R/W) Beacon enable. Internally reserved field, do not set. */
+ uint32_t cle : 2; /**< [ 23: 22](RAZ) Reserved. */
+ uint32_t lme : 6; /**< [ 21: 16](R/W) Link mode enable set as follows:
+ 0x1 = x1.
+ 0x3 = x2.
+ 0x7 = x4.
+ 0xF = x8 (not supported).
+ 0x1F = x16 (not supported).
+ 0x3F = x32 (not supported).
+
+ This field indicates the maximum number of lanes supported by the PCIe port. The value can
+ be set less than 0xF to limit the number of lanes the PCIe will attempt to use. The
+ programming of this field needs to be done by software before enabling the link. See also
+ PCIERC()_CFG031[MLW].
+ The value of this field does not indicate the number of lanes in use by the PCIe. This
+ field sets the maximum number of lanes in the PCIe core that could be used. As per the
+ PCIe specification, the PCIe core can negotiate a smaller link width, so all of x4,
+ x2, and x1 are supported when
+ LME = 0x7, for example. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t link_rate : 4; /**< [ 11: 8](RO/H) Reserved. */
+ uint32_t flm : 1; /**< [ 7: 7](R/W/H) Fast link mode. Sets all internal timers to fast mode for simulation purposes. */
+ uint32_t ldis : 1; /**< [ 6: 6](R/W) Link disable. Internally reserved field, do not set. */
+ uint32_t dllle : 1; /**< [ 5: 5](R/W) DLL link enable. Enables link initialization. If DLL link enable = 0, the PCI Express bus
+ does not transmit InitFC DLLPs and does not establish a link. */
+ uint32_t reserved_4 : 1;
+ uint32_t ra : 1; /**< [ 3: 3](R/W) Reset assert. Triggers a recovery and forces the LTSSM to the hot reset state (downstream
+ port only). */
+ uint32_t le : 1; /**< [ 2: 2](R/W) Loopback enable. Initiate loopback mode as a master. On a 0-\>1 transition, the PCIe core
+ sends TS ordered sets with the loopback bit set to cause the link partner to enter into
+ loopback mode as a slave. Normal transmission is not possible when LE=1. To exit loopback
+ mode, take the link through a reset sequence. */
+ uint32_t sd : 1; /**< [ 1: 1](R/W) Scramble disable. Setting this bit turns off data scrambling. */
+ uint32_t omr : 1; /**< [ 0: 0](WO/H) Other message request. When software writes a 1 to this bit, the PCI Express bus transmits
+ the message contained in the other message register. */
+#else /* Word 0 - Little Endian */
+ uint32_t omr : 1; /**< [ 0: 0](WO/H) Other message request. When software writes a 1 to this bit, the PCI Express bus transmits
+ the message contained in the other message register. */
+ uint32_t sd : 1; /**< [ 1: 1](R/W) Scramble disable. Setting this bit turns off data scrambling. */
+ uint32_t le : 1; /**< [ 2: 2](R/W) Loopback enable. Initiate loopback mode as a master. On a 0-\>1 transition, the PCIe core
+ sends TS ordered sets with the loopback bit set to cause the link partner to enter into
+ loopback mode as a slave. Normal transmission is not possible when LE=1. To exit loopback
+ mode, take the link through a reset sequence. */
+ uint32_t ra : 1; /**< [ 3: 3](R/W) Reset assert. Triggers a recovery and forces the LTSSM to the hot reset state (downstream
+ port only). */
+ uint32_t reserved_4 : 1;
+ uint32_t dllle : 1; /**< [ 5: 5](R/W) DLL link enable. Enables link initialization. If DLL link enable = 0, the PCI Express bus
+ does not transmit InitFC DLLPs and does not establish a link. */
+ uint32_t ldis : 1; /**< [ 6: 6](R/W) Link disable. Internally reserved field, do not set. */
+ uint32_t flm : 1; /**< [ 7: 7](R/W/H) Fast link mode. Sets all internal timers to fast mode for simulation purposes. */
+ uint32_t link_rate : 4; /**< [ 11: 8](RO/H) Reserved. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lme : 6; /**< [ 21: 16](R/W) Link mode enable set as follows:
+ 0x1 = x1.
+ 0x3 = x2.
+ 0x7 = x4.
+ 0xF = x8 (not supported).
+ 0x1F = x16 (not supported).
+ 0x3F = x32 (not supported).
+
+ This field indicates the maximum number of lanes supported by the PCIe port. The value can
+ be set less than 0xF to limit the number of lanes the PCIe will attempt to use. The
+ programming of this field needs to be done by software before enabling the link. See also
+ PCIERC()_CFG031[MLW].
+ The value of this field does not indicate the number of lanes in use by the PCIe. This
+ field sets the maximum number of lanes in the PCIe core that could be used. As per the
+ PCIe specification, the PCIe core can negotiate a smaller link width, so all of x4,
+ x2, and x1 are supported when
+ LME = 0x7, for example. */
+ uint32_t cle : 2; /**< [ 23: 22](RAZ) Reserved. */
+ uint32_t beacon_en : 1; /**< [ 24: 24](R/W) Beacon enable. Internally reserved field, do not set. */
+ uint32_t clcrc_en : 1; /**< [ 25: 25](R/W) Corrupt LCRC enable. Internally reserved field, do not set. */
+ uint32_t ex_synch : 1; /**< [ 26: 26](R/W) Extended synch. Internally reserved field, do not set. */
+ uint32_t xlr_en : 1; /**< [ 27: 27](R/W) Transmit lane reversible enable. Internally reserved field, do not set. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg452_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t reserved_24_27 : 4;
+ uint32_t reserved_22_23 : 2;
+ uint32_t lme : 6; /**< [ 21: 16](R/W) Link mode enable set as follows:
+ 0x1 = x1.
+ 0x3 = x2.
+ 0x7 = x4.
+ 0xF = x8 (not supported).
+ 0x1F = x16 (not supported).
+ 0x3F = x32 (not supported).
+
+ This field indicates the maximum number of lanes supported by the PCIe port. The value can
+ be set less than 0xF to limit the number of lanes the PCIe will attempt to use. The
+ programming of this field needs to be done by software before enabling the link. See also
+ PCIERC()_CFG031[MLW].
+ The value of this field does not indicate the number of lanes in use by the PCIe. This
+ field sets the maximum number of lanes in the PCIe core that could be used. As per the
+ PCIe specification, the PCIe core can negotiate a smaller link width, so all of x4,
+ x2, and x1 are supported when
+ LME = 0x7, for example. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t link_rate : 4; /**< [ 11: 8](RO/H) Reserved. */
+ uint32_t flm : 1; /**< [ 7: 7](R/W/H) Fast link mode. Sets all internal timers to fast mode for simulation purposes. */
+ uint32_t reserved_6 : 1;
+ uint32_t dllle : 1; /**< [ 5: 5](R/W) DLL link enable. Enables link initialization. If DLL link enable = 0, the PCI Express bus
+ does not transmit InitFC DLLPs and does not establish a link. */
+ uint32_t reserved_4 : 1;
+ uint32_t ra : 1; /**< [ 3: 3](R/W) Reset assert. Triggers a recovery and forces the LTSSM to the hot reset state (downstream
+ port only). */
+ uint32_t le : 1; /**< [ 2: 2](R/W) Loopback enable. Initiate loopback mode as a master. On a 0-\>1 transition, the PCIe core
+ sends TS ordered sets with the loopback bit set to cause the link partner to enter into
+ loopback mode as a slave. Normal transmission is not possible when LE=1. To exit loopback
+ mode, take the link through a reset sequence. */
+ uint32_t sd : 1; /**< [ 1: 1](R/W) Scramble disable. Setting this bit turns off data scrambling. */
+ uint32_t omr : 1; /**< [ 0: 0](WO/H) Other message request. When software writes a 1 to this bit, the PCI Express bus transmits
+ the message contained in the other message register. */
+#else /* Word 0 - Little Endian */
+ uint32_t omr : 1; /**< [ 0: 0](WO/H) Other message request. When software writes a 1 to this bit, the PCI Express bus transmits
+ the message contained in the other message register. */
+ uint32_t sd : 1; /**< [ 1: 1](R/W) Scramble disable. Setting this bit turns off data scrambling. */
+ uint32_t le : 1; /**< [ 2: 2](R/W) Loopback enable. Initiate loopback mode as a master. On a 0-\>1 transition, the PCIe core
+ sends TS ordered sets with the loopback bit set to cause the link partner to enter into
+ loopback mode as a slave. Normal transmission is not possible when LE=1. To exit loopback
+ mode, take the link through a reset sequence. */
+ uint32_t ra : 1; /**< [ 3: 3](R/W) Reset assert. Triggers a recovery and forces the LTSSM to the hot reset state (downstream
+ port only). */
+ uint32_t reserved_4 : 1;
+ uint32_t dllle : 1; /**< [ 5: 5](R/W) DLL link enable. Enables link initialization. If DLL link enable = 0, the PCI Express bus
+ does not transmit InitFC DLLPs and does not establish a link. */
+ uint32_t reserved_6 : 1;
+ uint32_t flm : 1; /**< [ 7: 7](R/W/H) Fast link mode. Sets all internal timers to fast mode for simulation purposes. */
+ uint32_t link_rate : 4; /**< [ 11: 8](RO/H) Reserved. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lme : 6; /**< [ 21: 16](R/W) Link mode enable set as follows:
+ 0x1 = x1.
+ 0x3 = x2.
+ 0x7 = x4.
+ 0xF = x8 (not supported).
+ 0x1F = x16 (not supported).
+ 0x3F = x32 (not supported).
+
+ This field indicates the maximum number of lanes supported by the PCIe port. The value can
+ be set less than 0xF to limit the number of lanes the PCIe will attempt to use. The
+ programming of this field needs to be done by software before enabling the link. See also
+ PCIERC()_CFG031[MLW].
+ The value of this field does not indicate the number of lanes in use by the PCIe. This
+ field sets the maximum number of lanes in the PCIe core that could be used. As per the
+ PCIe specification, the PCIe core can negotiate a smaller link width, so all of x4,
+ x2, and x1 are supported when
+ LME = 0x7, for example. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t reserved_24_27 : 4;
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg452_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t reserved_24_27 : 4;
+ uint32_t reserved_22_23 : 2;
+ uint32_t lme : 6; /**< [ 21: 16](R/W) Link mode enable set as follows:
+ 0x1 = x1.
+ 0x3 = x2.
+ 0x7 = x4.
+ 0xF = x8.
+ 0x1F = x16 (not supported).
+ 0x3F = x32 (not supported).
+
+ This field indicates the maximum number of lanes supported by the PCIe port. The value can
+ be set less than 0xF to limit the number of lanes the PCIe will attempt to use. The
+ programming of this field needs to be done by software before enabling the link. See also
+ PCIERC()_CFG031[MLW].
+ The value of this field does not indicate the number of lanes in use by the PCIe. This
+ field sets the maximum number of lanes in the PCIe core that could be used. As per the
+ PCIe specification, the PCIe core can negotiate a smaller link width, so all of x8, x4,
+ x2, and x1 are supported when
+ LME = 0xF, for example. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t link_rate : 4; /**< [ 11: 8](RO/H) Reserved. */
+ uint32_t flm : 1; /**< [ 7: 7](R/W/H) Fast link mode. Sets all internal timers to fast mode for simulation purposes. */
+ uint32_t reserved_6 : 1;
+ uint32_t dllle : 1; /**< [ 5: 5](R/W) DLL link enable. Enables link initialization. If DLL link enable = 0, the PCI Express bus
+ does not transmit InitFC DLLPs and does not establish a link. */
+ uint32_t reserved_4 : 1;
+ uint32_t ra : 1; /**< [ 3: 3](R/W) Reset assert. Triggers a recovery and forces the LTSSM to the hot reset state (downstream
+ port only). */
+ uint32_t le : 1; /**< [ 2: 2](R/W) Loopback enable. Initiate loopback mode as a master. On a 0-\>1 transition, the PCIe core
+ sends TS ordered sets with the loopback bit set to cause the link partner to enter into
+ loopback mode as a slave. Normal transmission is not possible when LE=1. To exit loopback
+ mode, take the link through a reset sequence. */
+ uint32_t sd : 1; /**< [ 1: 1](R/W) Scramble disable. Setting this bit turns off data scrambling. */
+ uint32_t omr : 1; /**< [ 0: 0](WO/H) Other message request. When software writes a 1 to this bit, the PCI Express bus transmits
+ the message contained in the other message register. */
+#else /* Word 0 - Little Endian */
+ uint32_t omr : 1; /**< [ 0: 0](WO/H) Other message request. When software writes a 1 to this bit, the PCI Express bus transmits
+ the message contained in the other message register. */
+ uint32_t sd : 1; /**< [ 1: 1](R/W) Scramble disable. Setting this bit turns off data scrambling. */
+ uint32_t le : 1; /**< [ 2: 2](R/W) Loopback enable. Initiate loopback mode as a master. On a 0-\>1 transition, the PCIe core
+ sends TS ordered sets with the loopback bit set to cause the link partner to enter into
+ loopback mode as a slave. Normal transmission is not possible when LE=1. To exit loopback
+ mode, take the link through a reset sequence. */
+ uint32_t ra : 1; /**< [ 3: 3](R/W) Reset assert. Triggers a recovery and forces the LTSSM to the hot reset state (downstream
+ port only). */
+ uint32_t reserved_4 : 1;
+ uint32_t dllle : 1; /**< [ 5: 5](R/W) DLL link enable. Enables link initialization. If DLL link enable = 0, the PCI Express bus
+ does not transmit InitFC DLLPs and does not establish a link. */
+ uint32_t reserved_6 : 1;
+ uint32_t flm : 1; /**< [ 7: 7](R/W/H) Fast link mode. Sets all internal timers to fast mode for simulation purposes. */
+ uint32_t link_rate : 4; /**< [ 11: 8](RO/H) Reserved. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lme : 6; /**< [ 21: 16](R/W) Link mode enable set as follows:
+ 0x1 = x1.
+ 0x3 = x2.
+ 0x7 = x4.
+ 0xF = x8.
+ 0x1F = x16 (not supported).
+ 0x3F = x32 (not supported).
+
+ This field indicates the maximum number of lanes supported by the PCIe port. The value can
+ be set less than 0xF to limit the number of lanes the PCIe will attempt to use. The
+ programming of this field needs to be done by software before enabling the link. See also
+ PCIERC()_CFG031[MLW].
+ The value of this field does not indicate the number of lanes in use by the PCIe. This
+ field sets the maximum number of lanes in the PCIe core that could be used. As per the
+ PCIe specification, the PCIe core can negotiate a smaller link width, so all of x8, x4,
+ x2, and x1 are supported when
+ LME = 0xF, for example. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t reserved_24_27 : 4;
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pciercx_cfg452_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t xlr_en : 1; /**< [ 27: 27](R/W) Transmit lane reversible enable. Internally reserved field, do not set. */
+ uint32_t ex_synch : 1; /**< [ 26: 26](R/W) Extended synch. Internally reserved field, do not set. */
+ uint32_t clcrc_en : 1; /**< [ 25: 25](R/W) Corrupt LCRC enable. Internally reserved field, do not set. */
+ uint32_t beacon_en : 1; /**< [ 24: 24](R/W) Beacon enable. Internally reserved field, do not set. */
+ uint32_t cle : 2; /**< [ 23: 22](RAZ) Reserved. */
+ uint32_t lme : 6; /**< [ 21: 16](R/W) Link mode enable set as follows:
+ 0x1 = x1.
+ 0x3 = x2.
+ 0x7 = x4.
+ 0xF = x8.
+ 0x1F = x16 (not supported).
+ 0x3F = x32 (not supported).
+
+ This field indicates the maximum number of lanes supported by the PCIe port. The value can
+ be set less than 0xF to limit the number of lanes the PCIe will attempt to use. The
+ programming of this field needs to be done by software before enabling the link. See also
+ PCIERC()_CFG031[MLW].
+ The value of this field does not indicate the number of lanes in use by the PCIe. This
+ field sets the maximum number of lanes in the PCIe core that could be used. As per the
+ PCIe specification, the PCIe core can negotiate a smaller link width, so all of x8, x4,
+ x2, and x1 are supported when
+ LME = 0xF, for example. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t link_rate : 4; /**< [ 11: 8](RO/H) Reserved. */
+ uint32_t flm : 1; /**< [ 7: 7](R/W/H) Fast link mode. Sets all internal timers to fast mode for simulation purposes.
+ The scaling factor is configured by PCIEEP()_CFG454[FLMSF]. */
+ uint32_t ldis : 1; /**< [ 6: 6](R/W) Link disable. Internally reserved field, do not set. */
+ uint32_t dllle : 1; /**< [ 5: 5](R/W) DLL link enable. Enables link initialization. If DLL link enable = 0, the PCI Express bus
+ does not transmit InitFC DLLPs and does not establish a link. */
+ uint32_t reserved_4 : 1;
+ uint32_t ra : 1; /**< [ 3: 3](R/W) Reset assert. Triggers a recovery and forces the LTSSM to the hot reset state (downstream
+ port only). */
+ uint32_t le : 1; /**< [ 2: 2](R/W) Loopback enable. Initiate loopback mode as a master. On a 0-\>1 transition, the PCIe core
+ sends TS ordered sets with the loopback bit set to cause the link partner to enter into
+ loopback mode as a slave. Normal transmission is not possible when LE=1. To exit loopback
+ mode, take the link through a reset sequence. */
+ uint32_t sd : 1; /**< [ 1: 1](R/W) Scramble disable. Setting this bit turns off data scrambling. */
+ uint32_t omr : 1; /**< [ 0: 0](WO/H) Other message request. When software writes a 1 to this bit, the PCI Express bus transmits
+ the message contained in the other message register. */
+#else /* Word 0 - Little Endian */
+ uint32_t omr : 1; /**< [ 0: 0](WO/H) Other message request. When software writes a 1 to this bit, the PCI Express bus transmits
+ the message contained in the other message register. */
+ uint32_t sd : 1; /**< [ 1: 1](R/W) Scramble disable. Setting this bit turns off data scrambling. */
+ uint32_t le : 1; /**< [ 2: 2](R/W) Loopback enable. Initiate loopback mode as a master. On a 0-\>1 transition, the PCIe core
+ sends TS ordered sets with the loopback bit set to cause the link partner to enter into
+ loopback mode as a slave. Normal transmission is not possible when LE=1. To exit loopback
+ mode, take the link through a reset sequence. */
+ uint32_t ra : 1; /**< [ 3: 3](R/W) Reset assert. Triggers a recovery and forces the LTSSM to the hot reset state (downstream
+ port only). */
+ uint32_t reserved_4 : 1;
+ uint32_t dllle : 1; /**< [ 5: 5](R/W) DLL link enable. Enables link initialization. If DLL link enable = 0, the PCI Express bus
+ does not transmit InitFC DLLPs and does not establish a link. */
+ uint32_t ldis : 1; /**< [ 6: 6](R/W) Link disable. Internally reserved field, do not set. */
+ uint32_t flm : 1; /**< [ 7: 7](R/W/H) Fast link mode. Sets all internal timers to fast mode for simulation purposes.
+ The scaling factor is configured by PCIEEP()_CFG454[FLMSF]. */
+ uint32_t link_rate : 4; /**< [ 11: 8](RO/H) Reserved. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lme : 6; /**< [ 21: 16](R/W) Link mode enable set as follows:
+ 0x1 = x1.
+ 0x3 = x2.
+ 0x7 = x4.
+ 0xF = x8.
+ 0x1F = x16 (not supported).
+ 0x3F = x32 (not supported).
+
+ This field indicates the maximum number of lanes supported by the PCIe port. The value can
+ be set less than 0xF to limit the number of lanes the PCIe will attempt to use. The
+ programming of this field needs to be done by software before enabling the link. See also
+ PCIERC()_CFG031[MLW].
+ The value of this field does not indicate the number of lanes in use by the PCIe. This
+ field sets the maximum number of lanes in the PCIe core that could be used. As per the
+ PCIe specification, the PCIe core can negotiate a smaller link width, so all of x8, x4,
+ x2, and x1 are supported when
+ LME = 0xF, for example. */
+ uint32_t cle : 2; /**< [ 23: 22](RAZ) Reserved. */
+ uint32_t beacon_en : 1; /**< [ 24: 24](R/W) Beacon enable. Internally reserved field, do not set. */
+ uint32_t clcrc_en : 1; /**< [ 25: 25](R/W) Corrupt LCRC enable. Internally reserved field, do not set. */
+ uint32_t ex_synch : 1; /**< [ 26: 26](R/W) Extended synch. Internally reserved field, do not set. */
+ uint32_t xlr_en : 1; /**< [ 27: 27](R/W) Transmit lane reversible enable. Internally reserved field, do not set. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg452 bdk_pciercx_cfg452_t;
+
+static inline uint64_t BDK_PCIERCX_CFG452(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG452(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000710ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000710ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000710ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG452", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG452(a) bdk_pciercx_cfg452_t
+#define bustype_BDK_PCIERCX_CFG452(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG452(a) "PCIERCX_CFG452"
+#define busnum_BDK_PCIERCX_CFG452(a) (a)
+#define arguments_BDK_PCIERCX_CFG452(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg453
+ *
+ * PCIe RC Lane Skew Register
+ * This register contains the four hundred fifty-fourth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg453
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg453_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dlld : 1; /**< [ 31: 31](R/W) Disable lane-to-lane deskew. Disables the internal lane-to-lane deskew logic. */
+ uint32_t reserved_26_30 : 5;
+ uint32_t ack_nak : 1; /**< [ 25: 25](R/W) ACK/NAK disable. Prevents the PCI Express bus from sending Ack and Nak DLLPs. */
+ uint32_t fcd : 1; /**< [ 24: 24](R/W) Flow control disable. Prevents the PCI Express bus from sending FC DLLPs. */
+ uint32_t ilst : 24; /**< [ 23: 0](R/W) Insert lane skew for transmit (not supported for *16). Causes skew between lanes for test
+ purposes. There are three bits per lane. The value is in units of one symbol time. For
+ example, the value 0x2 for a lane forces a skew of two symbol times for that lane. The
+ maximum skew value for any lane is 5 symbol times. */
+#else /* Word 0 - Little Endian */
+ uint32_t ilst : 24; /**< [ 23: 0](R/W) Insert lane skew for transmit (not supported for *16). Causes skew between lanes for test
+ purposes. There are three bits per lane. The value is in units of one symbol time. For
+ example, the value 0x2 for a lane forces a skew of two symbol times for that lane. The
+ maximum skew value for any lane is 5 symbol times. */
+ uint32_t fcd : 1; /**< [ 24: 24](R/W) Flow control disable. Prevents the PCI Express bus from sending FC DLLPs. */
+ uint32_t ack_nak : 1; /**< [ 25: 25](R/W) ACK/NAK disable. Prevents the PCI Express bus from sending Ack and Nak DLLPs. */
+ uint32_t reserved_26_30 : 5;
+ uint32_t dlld : 1; /**< [ 31: 31](R/W) Disable lane-to-lane deskew. Disables the internal lane-to-lane deskew logic. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg453_s cn; */
+};
+typedef union bdk_pciercx_cfg453 bdk_pciercx_cfg453_t;
+
+static inline uint64_t BDK_PCIERCX_CFG453(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG453(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000714ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000714ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000714ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG453", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG453(a) bdk_pciercx_cfg453_t
+#define bustype_BDK_PCIERCX_CFG453(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG453(a) "PCIERCX_CFG453"
+#define busnum_BDK_PCIERCX_CFG453(a) (a)
+#define arguments_BDK_PCIERCX_CFG453(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg454
+ *
+ * PCIe RC Symbol Number Register
+ * This register contains the four hundred fifty-fifth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg454
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg454_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t flmsf : 2; /**< [ 30: 29](R/W) Fast Link Timer Scaling Factor. Sets the scaling factor of
+ LTSSM timer when PCIERC()_CFG452[FLM] is set.
+ 0x0: Scaling Factor is 1024 (1ms is 1us)
+ 0x1: Scaling Factor is 256 (1ms is 4us)
+ 0x2: Scaling Factor is 64 (1ms is 16us)
+ 0x3: Scaling Factor is 16 (1ms is 64us) */
+ uint32_t reserved_24_28 : 5;
+ uint32_t tmanlt : 5; /**< [ 23: 19](R/W) Timer modifier for ACK/NAK latency timer. Increases the timer value for the ACK/NAK
+ latency timer, in increments of 64 clock cycles. */
+ uint32_t tmrt : 5; /**< [ 18: 14](R/W/H) Timer modifier for replay timer. Increases the timer value for the replay timer, in
+ increments of 64 clock cycles. */
+ uint32_t reserved_8_13 : 6;
+ uint32_t mfuncn : 8; /**< [ 7: 0](R/W) Max number of functions supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t mfuncn : 8; /**< [ 7: 0](R/W) Max number of functions supported. */
+ uint32_t reserved_8_13 : 6;
+ uint32_t tmrt : 5; /**< [ 18: 14](R/W/H) Timer modifier for replay timer. Increases the timer value for the replay timer, in
+ increments of 64 clock cycles. */
+ uint32_t tmanlt : 5; /**< [ 23: 19](R/W) Timer modifier for ACK/NAK latency timer. Increases the timer value for the ACK/NAK
+ latency timer, in increments of 64 clock cycles. */
+ uint32_t reserved_24_28 : 5;
+ uint32_t flmsf : 2; /**< [ 30: 29](R/W) Fast Link Timer Scaling Factor. Sets the scaling factor of
+ LTSSM timer when PCIERC()_CFG452[FLM] is set.
+ 0x0: Scaling Factor is 1024 (1ms is 1us)
+ 0x1: Scaling Factor is 256 (1ms is 4us)
+ 0x2: Scaling Factor is 64 (1ms is 16us)
+ 0x3: Scaling Factor is 16 (1ms is 64us) */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg454_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t tmfcwt : 5; /**< [ 28: 24](R/W) Used to be 'timer modifier for flow control watchdog timer.' This field is no longer used.
+ and has moved to the queue status register -- PCIERC()_CFG463. This field remains to
+ prevent software from breaking. */
+ uint32_t tmanlt : 5; /**< [ 23: 19](R/W) Timer modifier for ACK/NAK latency timer. Increases the timer value for the ACK/NAK
+ latency timer, in increments of 64 clock cycles. */
+ uint32_t tmrt : 5; /**< [ 18: 14](R/W/H) Timer modifier for replay timer. Increases the timer value for the replay timer, in
+ increments of 64 clock cycles. */
+ uint32_t reserved_8_13 : 6;
+ uint32_t mfuncn : 8; /**< [ 7: 0](R/W) Max number of functions supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t mfuncn : 8; /**< [ 7: 0](R/W) Max number of functions supported. */
+ uint32_t reserved_8_13 : 6;
+ uint32_t tmrt : 5; /**< [ 18: 14](R/W/H) Timer modifier for replay timer. Increases the timer value for the replay timer, in
+ increments of 64 clock cycles. */
+ uint32_t tmanlt : 5; /**< [ 23: 19](R/W) Timer modifier for ACK/NAK latency timer. Increases the timer value for the ACK/NAK
+ latency timer, in increments of 64 clock cycles. */
+ uint32_t tmfcwt : 5; /**< [ 28: 24](R/W) Used to be 'timer modifier for flow control watchdog timer.' This field is no longer used.
+ and has moved to the queue status register -- PCIERC()_CFG463. This field remains to
+ prevent software from breaking. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pciercx_cfg454_cn81xx cn88xx; */
+ struct bdk_pciercx_cfg454_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t flmsf : 2; /**< [ 30: 29](R/W) Fast Link Timer Scaling Factor. Sets the scaling factor of
+ LTSSM timer when PCIERC()_CFG452[FLM] is set.
+ 0x0: Scaling Factor is 1024 (1ms is 1us)
+ 0x1: Scaling Factor is 256 (1ms is 4us)
+ 0x2: Scaling Factor is 64 (1ms is 16us)
+ 0x3: Scaling Factor is 16 (1ms is 64us) */
+ uint32_t updft : 5; /**< [ 28: 24](R/W) Update Frequency Timer. This is an internally reserved field, do not use. */
+ uint32_t tmanlt : 5; /**< [ 23: 19](R/W) Timer modifier for ACK/NAK latency timer. Increases the timer value for the ACK/NAK
+ latency timer, in increments of 64 clock cycles. */
+ uint32_t tmrt : 5; /**< [ 18: 14](R/W/H) Timer modifier for replay timer. Increases the timer value for the replay timer, in
+ increments of 64 clock cycles. */
+ uint32_t reserved_8_13 : 6;
+ uint32_t mfuncn : 8; /**< [ 7: 0](R/W) Max number of functions supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t mfuncn : 8; /**< [ 7: 0](R/W) Max number of functions supported. */
+ uint32_t reserved_8_13 : 6;
+ uint32_t tmrt : 5; /**< [ 18: 14](R/W/H) Timer modifier for replay timer. Increases the timer value for the replay timer, in
+ increments of 64 clock cycles. */
+ uint32_t tmanlt : 5; /**< [ 23: 19](R/W) Timer modifier for ACK/NAK latency timer. Increases the timer value for the ACK/NAK
+ latency timer, in increments of 64 clock cycles. */
+ uint32_t updft : 5; /**< [ 28: 24](R/W) Update Frequency Timer. This is an internally reserved field, do not use. */
+ uint32_t flmsf : 2; /**< [ 30: 29](R/W) Fast Link Timer Scaling Factor. Sets the scaling factor of
+ LTSSM timer when PCIERC()_CFG452[FLM] is set.
+ 0x0: Scaling Factor is 1024 (1ms is 1us)
+ 0x1: Scaling Factor is 256 (1ms is 4us)
+ 0x2: Scaling Factor is 64 (1ms is 16us)
+ 0x3: Scaling Factor is 16 (1ms is 64us) */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg454 bdk_pciercx_cfg454_t;
+
+static inline uint64_t BDK_PCIERCX_CFG454(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG454(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000718ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000718ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000718ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG454", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG454(a) bdk_pciercx_cfg454_t
+#define bustype_BDK_PCIERCX_CFG454(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG454(a) "PCIERCX_CFG454"
+#define busnum_BDK_PCIERCX_CFG454(a) (a)
+#define arguments_BDK_PCIERCX_CFG454(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg455
+ *
+ * PCIe RC Symbol Timer/Filter Mask Register 1
+ * This register contains the four hundred fifty-sixth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg455
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg455_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t m_cfg0_filt : 1; /**< [ 31: 31](R/W) Mask filtering of received configuration requests (RC mode only). */
+ uint32_t m_io_filt : 1; /**< [ 30: 30](R/W) Mask filtering of received I/O requests (RC mode only). */
+ uint32_t msg_ctrl : 1; /**< [ 29: 29](R/W) Message control. The application must not change this field. */
+ uint32_t m_cpl_ecrc_filt : 1; /**< [ 28: 28](R/W) Mask ECRC error filtering for completions. */
+ uint32_t m_ecrc_filt : 1; /**< [ 27: 27](R/W) Mask ECRC error filtering. */
+ uint32_t m_cpl_len_err : 1; /**< [ 26: 26](R/W) Mask length mismatch error for received completions. */
+ uint32_t m_cpl_attr_err : 1; /**< [ 25: 25](R/W) Mask attributes mismatch error for received completions. */
+ uint32_t m_cpl_tc_err : 1; /**< [ 24: 24](R/W) Mask traffic class mismatch error for received completions. */
+ uint32_t m_cpl_fun_err : 1; /**< [ 23: 23](R/W) Mask function mismatch error for received completions. */
+ uint32_t m_cpl_rid_err : 1; /**< [ 22: 22](R/W) Mask requester ID mismatch error for received completions. */
+ uint32_t m_cpl_tag_err : 1; /**< [ 21: 21](R/W) Mask tag error rules for received completions. */
+ uint32_t m_lk_filt : 1; /**< [ 20: 20](R/W) Mask locked request filtering. */
+ uint32_t m_cfg1_filt : 1; /**< [ 19: 19](R/W) Mask type 1 configuration request filtering. */
+ uint32_t m_bar_match : 1; /**< [ 18: 18](R/W) Mask BAR match filtering. */
+ uint32_t m_pois_filt : 1; /**< [ 17: 17](R/W) Mask poisoned TLP filtering. */
+ uint32_t m_fun : 1; /**< [ 16: 16](R/W) Mask function. */
+ uint32_t dfcwt : 1; /**< [ 15: 15](R/W) Disable FC watchdog timer. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t skpiv : 11; /**< [ 10: 0](R/W) SKP interval value. */
+#else /* Word 0 - Little Endian */
+ uint32_t skpiv : 11; /**< [ 10: 0](R/W) SKP interval value. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t dfcwt : 1; /**< [ 15: 15](R/W) Disable FC watchdog timer. */
+ uint32_t m_fun : 1; /**< [ 16: 16](R/W) Mask function. */
+ uint32_t m_pois_filt : 1; /**< [ 17: 17](R/W) Mask poisoned TLP filtering. */
+ uint32_t m_bar_match : 1; /**< [ 18: 18](R/W) Mask BAR match filtering. */
+ uint32_t m_cfg1_filt : 1; /**< [ 19: 19](R/W) Mask type 1 configuration request filtering. */
+ uint32_t m_lk_filt : 1; /**< [ 20: 20](R/W) Mask locked request filtering. */
+ uint32_t m_cpl_tag_err : 1; /**< [ 21: 21](R/W) Mask tag error rules for received completions. */
+ uint32_t m_cpl_rid_err : 1; /**< [ 22: 22](R/W) Mask requester ID mismatch error for received completions. */
+ uint32_t m_cpl_fun_err : 1; /**< [ 23: 23](R/W) Mask function mismatch error for received completions. */
+ uint32_t m_cpl_tc_err : 1; /**< [ 24: 24](R/W) Mask traffic class mismatch error for received completions. */
+ uint32_t m_cpl_attr_err : 1; /**< [ 25: 25](R/W) Mask attributes mismatch error for received completions. */
+ uint32_t m_cpl_len_err : 1; /**< [ 26: 26](R/W) Mask length mismatch error for received completions. */
+ uint32_t m_ecrc_filt : 1; /**< [ 27: 27](R/W) Mask ECRC error filtering. */
+ uint32_t m_cpl_ecrc_filt : 1; /**< [ 28: 28](R/W) Mask ECRC error filtering for completions. */
+ uint32_t msg_ctrl : 1; /**< [ 29: 29](R/W) Message control. The application must not change this field. */
+ uint32_t m_io_filt : 1; /**< [ 30: 30](R/W) Mask filtering of received I/O requests (RC mode only). */
+ uint32_t m_cfg0_filt : 1; /**< [ 31: 31](R/W) Mask filtering of received configuration requests (RC mode only). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg455_s cn81xx; */
+ /* struct bdk_pciercx_cfg455_s cn88xx; */
+ struct bdk_pciercx_cfg455_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t m_cfg0_filt : 1; /**< [ 31: 31](R/W) Mask filtering of received configuration requests (RC mode only). */
+ uint32_t m_io_filt : 1; /**< [ 30: 30](R/W) Mask filtering of received I/O requests (RC mode only). */
+ uint32_t msg_ctrl : 1; /**< [ 29: 29](R/W) Message control. The application must not change this field. */
+ uint32_t m_cpl_ecrc_filt : 1; /**< [ 28: 28](R/W) Mask ECRC error filtering for completions. */
+ uint32_t m_ecrc_filt : 1; /**< [ 27: 27](R/W) Mask ECRC error filtering. */
+ uint32_t m_cpl_len_err : 1; /**< [ 26: 26](R/W) Mask length mismatch error for received completions. */
+ uint32_t m_cpl_attr_err : 1; /**< [ 25: 25](R/W) Mask attributes mismatch error for received completions. */
+ uint32_t m_cpl_tc_err : 1; /**< [ 24: 24](R/W) Mask traffic class mismatch error for received completions. */
+ uint32_t m_cpl_fun_err : 1; /**< [ 23: 23](R/W) Mask function mismatch error for received completions. */
+ uint32_t m_cpl_rid_err : 1; /**< [ 22: 22](R/W) Mask requester ID mismatch error for received completions. */
+ uint32_t m_cpl_tag_err : 1; /**< [ 21: 21](R/W) Mask tag error rules for received completions. */
+ uint32_t m_lk_filt : 1; /**< [ 20: 20](R/W) Mask locked request filtering. */
+ uint32_t m_cfg1_filt : 1; /**< [ 19: 19](R/W) Mask type 1 configuration request filtering. */
+ uint32_t m_bar_match : 1; /**< [ 18: 18](R/W) Mask BAR match filtering. */
+ uint32_t m_pois_filt : 1; /**< [ 17: 17](R/W) Mask poisoned TLP filtering. */
+ uint32_t m_fun : 1; /**< [ 16: 16](R/W) Mask function. */
+ uint32_t dfcwt : 1; /**< [ 15: 15](R/W) Disable FC watchdog timer. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t skpiv : 11; /**< [ 10: 0](R/W) SKP interval value. The number of symbol times to wait
+ between transmitting SKP ordered sets. Note that the
+ controller actually waits the number of symbol times in this
+ register plus 1 between transmitting SKP ordered sets.
+
+ This value is not used at Gen3 speed; the skip interval
+ is hardcoded to 370 blocks. */
+#else /* Word 0 - Little Endian */
+ uint32_t skpiv : 11; /**< [ 10: 0](R/W) SKP interval value. The number of symbol times to wait
+ between transmitting SKP ordered sets. Note that the
+ controller actually waits the number of symbol times in this
+ register plus 1 between transmitting SKP ordered sets.
+
+ This value is not used at Gen3 speed; the skip interval
+ is hardcoded to 370 blocks. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t dfcwt : 1; /**< [ 15: 15](R/W) Disable FC watchdog timer. */
+ uint32_t m_fun : 1; /**< [ 16: 16](R/W) Mask function. */
+ uint32_t m_pois_filt : 1; /**< [ 17: 17](R/W) Mask poisoned TLP filtering. */
+ uint32_t m_bar_match : 1; /**< [ 18: 18](R/W) Mask BAR match filtering. */
+ uint32_t m_cfg1_filt : 1; /**< [ 19: 19](R/W) Mask type 1 configuration request filtering. */
+ uint32_t m_lk_filt : 1; /**< [ 20: 20](R/W) Mask locked request filtering. */
+ uint32_t m_cpl_tag_err : 1; /**< [ 21: 21](R/W) Mask tag error rules for received completions. */
+ uint32_t m_cpl_rid_err : 1; /**< [ 22: 22](R/W) Mask requester ID mismatch error for received completions. */
+ uint32_t m_cpl_fun_err : 1; /**< [ 23: 23](R/W) Mask function mismatch error for received completions. */
+ uint32_t m_cpl_tc_err : 1; /**< [ 24: 24](R/W) Mask traffic class mismatch error for received completions. */
+ uint32_t m_cpl_attr_err : 1; /**< [ 25: 25](R/W) Mask attributes mismatch error for received completions. */
+ uint32_t m_cpl_len_err : 1; /**< [ 26: 26](R/W) Mask length mismatch error for received completions. */
+ uint32_t m_ecrc_filt : 1; /**< [ 27: 27](R/W) Mask ECRC error filtering. */
+ uint32_t m_cpl_ecrc_filt : 1; /**< [ 28: 28](R/W) Mask ECRC error filtering for completions. */
+ uint32_t msg_ctrl : 1; /**< [ 29: 29](R/W) Message control. The application must not change this field. */
+ uint32_t m_io_filt : 1; /**< [ 30: 30](R/W) Mask filtering of received I/O requests (RC mode only). */
+ uint32_t m_cfg0_filt : 1; /**< [ 31: 31](R/W) Mask filtering of received configuration requests (RC mode only). */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg455 bdk_pciercx_cfg455_t;
+
+static inline uint64_t BDK_PCIERCX_CFG455(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG455(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000071cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000071cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000071cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG455", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG455(a) bdk_pciercx_cfg455_t
+#define bustype_BDK_PCIERCX_CFG455(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG455(a) "PCIERCX_CFG455"
+#define busnum_BDK_PCIERCX_CFG455(a) (a)
+#define arguments_BDK_PCIERCX_CFG455(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg456
+ *
+ * PCIe RC Filter Mask Register 2
+ * This register contains the four hundred fifty-seventh 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg456
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg456_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t m_prs : 1; /**< [ 7: 7](R/W) Mask PRS messages dropped silently. */
+ uint32_t m_unmask_td : 1; /**< [ 6: 6](R/W) Not Supported. */
+ uint32_t m_unmask_ur_pois : 1; /**< [ 5: 5](R/W) Not Supported. */
+ uint32_t m_ln_vend1_drop : 1; /**< [ 4: 4](R/W) Mask LN messages dropped silently. */
+ uint32_t m_handle_flush : 1; /**< [ 3: 3](R/W) Mask core filter to handle flush request. */
+ uint32_t m_dabort_4ucpl : 1; /**< [ 2: 2](R/W) Mask DLLP abort for unexpected CPL. */
+ uint32_t m_vend1_drp : 1; /**< [ 1: 1](R/W) Mask vendor MSG type 1 dropped silently. */
+ uint32_t m_vend0_drp : 1; /**< [ 0: 0](R/W) Mask vendor MSG type 0 dropped with UR error reporting. */
+#else /* Word 0 - Little Endian */
+ uint32_t m_vend0_drp : 1; /**< [ 0: 0](R/W) Mask vendor MSG type 0 dropped with UR error reporting. */
+ uint32_t m_vend1_drp : 1; /**< [ 1: 1](R/W) Mask vendor MSG type 1 dropped silently. */
+ uint32_t m_dabort_4ucpl : 1; /**< [ 2: 2](R/W) Mask DLLP abort for unexpected CPL. */
+ uint32_t m_handle_flush : 1; /**< [ 3: 3](R/W) Mask core filter to handle flush request. */
+ uint32_t m_ln_vend1_drop : 1; /**< [ 4: 4](R/W) Mask LN messages dropped silently. */
+ uint32_t m_unmask_ur_pois : 1; /**< [ 5: 5](R/W) Not Supported. */
+ uint32_t m_unmask_td : 1; /**< [ 6: 6](R/W) Not Supported. */
+ uint32_t m_prs : 1; /**< [ 7: 7](R/W) Mask PRS messages dropped silently. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg456_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t m_handle_flush : 1; /**< [ 3: 3](R/W) Mask core filter to handle flush request. */
+ uint32_t m_dabort_4ucpl : 1; /**< [ 2: 2](R/W) Mask DLLP abort for unexpected CPL. */
+ uint32_t m_vend1_drp : 1; /**< [ 1: 1](R/W) Mask vendor MSG type 1 dropped silently. */
+ uint32_t m_vend0_drp : 1; /**< [ 0: 0](R/W) Mask vendor MSG type 0 dropped with UR error reporting. */
+#else /* Word 0 - Little Endian */
+ uint32_t m_vend0_drp : 1; /**< [ 0: 0](R/W) Mask vendor MSG type 0 dropped with UR error reporting. */
+ uint32_t m_vend1_drp : 1; /**< [ 1: 1](R/W) Mask vendor MSG type 1 dropped silently. */
+ uint32_t m_dabort_4ucpl : 1; /**< [ 2: 2](R/W) Mask DLLP abort for unexpected CPL. */
+ uint32_t m_handle_flush : 1; /**< [ 3: 3](R/W) Mask core filter to handle flush request. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pciercx_cfg456_cn81xx cn88xx; */
+ /* struct bdk_pciercx_cfg456_s cn83xx; */
+};
+typedef union bdk_pciercx_cfg456 bdk_pciercx_cfg456_t;
+
+static inline uint64_t BDK_PCIERCX_CFG456(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG456(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000720ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000720ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000720ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG456", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG456(a) bdk_pciercx_cfg456_t
+#define bustype_BDK_PCIERCX_CFG456(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG456(a) "PCIERCX_CFG456"
+#define busnum_BDK_PCIERCX_CFG456(a) (a)
+#define arguments_BDK_PCIERCX_CFG456(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg458
+ *
+ * PCIe RC Debug Register 0
+ * This register contains the four hundred fifty-ninth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg458
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg458_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dbg_info_l32 : 32; /**< [ 31: 0](RO/H) Debug info lower 32 bits. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbg_info_l32 : 32; /**< [ 31: 0](RO/H) Debug info lower 32 bits. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg458_s cn; */
+};
+typedef union bdk_pciercx_cfg458 bdk_pciercx_cfg458_t;
+
+static inline uint64_t BDK_PCIERCX_CFG458(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG458(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000728ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000728ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000728ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG458", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG458(a) bdk_pciercx_cfg458_t
+#define bustype_BDK_PCIERCX_CFG458(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG458(a) "PCIERCX_CFG458"
+#define busnum_BDK_PCIERCX_CFG458(a) (a)
+#define arguments_BDK_PCIERCX_CFG458(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg459
+ *
+ * PCIe RC Debug Register 1
+ * This register contains the four hundred sixtieth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg459
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg459_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dbg_info_u32 : 32; /**< [ 31: 0](RO/H) Debug info upper 32 bits. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbg_info_u32 : 32; /**< [ 31: 0](RO/H) Debug info upper 32 bits. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg459_s cn; */
+};
+typedef union bdk_pciercx_cfg459 bdk_pciercx_cfg459_t;
+
+static inline uint64_t BDK_PCIERCX_CFG459(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG459(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000072cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000072cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000072cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG459", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG459(a) bdk_pciercx_cfg459_t
+#define bustype_BDK_PCIERCX_CFG459(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG459(a) "PCIERCX_CFG459"
+#define busnum_BDK_PCIERCX_CFG459(a) (a)
+#define arguments_BDK_PCIERCX_CFG459(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg460
+ *
+ * PCIe RC Transmit Posted FC Credit Status Register
+ * This register contains the four hundred sixty-first 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg460
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg460_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_20_31 : 12;
+ uint32_t tphfcc : 8; /**< [ 19: 12](RO/H) Transmit posted header FC credits. The posted header credits advertised by the receiver at
+ the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tpdfcc : 12; /**< [ 11: 0](RO/H) Transmit posted data FC credits. The posted data credits advertised by the receiver at the
+ other end of the link, updated with each UpdateFC DLLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t tpdfcc : 12; /**< [ 11: 0](RO/H) Transmit posted data FC credits. The posted data credits advertised by the receiver at the
+ other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tphfcc : 8; /**< [ 19: 12](RO/H) Transmit posted header FC credits. The posted header credits advertised by the receiver at
+ the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t reserved_20_31 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg460_s cn; */
+};
+typedef union bdk_pciercx_cfg460 bdk_pciercx_cfg460_t;
+
+static inline uint64_t BDK_PCIERCX_CFG460(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG460(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000730ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000730ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000730ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG460", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG460(a) bdk_pciercx_cfg460_t
+#define bustype_BDK_PCIERCX_CFG460(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG460(a) "PCIERCX_CFG460"
+#define busnum_BDK_PCIERCX_CFG460(a) (a)
+#define arguments_BDK_PCIERCX_CFG460(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg461
+ *
+ * PCIe RC Transmit Nonposted FC Credit Status Register
+ * This register contains the four hundred sixty-second 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg461
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg461_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_20_31 : 12;
+ uint32_t tchfcc : 8; /**< [ 19: 12](RO/H) Transmit nonposted header FC credits. The nonposted header credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tcdfcc : 12; /**< [ 11: 0](RO/H) Transmit nonposted data FC credits. The nonposted data credits advertised by the receiver
+ at the other end of the link, updated with each UpdateFC DLLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t tcdfcc : 12; /**< [ 11: 0](RO/H) Transmit nonposted data FC credits. The nonposted data credits advertised by the receiver
+ at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tchfcc : 8; /**< [ 19: 12](RO/H) Transmit nonposted header FC credits. The nonposted header credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t reserved_20_31 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg461_s cn; */
+};
+typedef union bdk_pciercx_cfg461 bdk_pciercx_cfg461_t;
+
+static inline uint64_t BDK_PCIERCX_CFG461(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG461(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000734ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000734ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000734ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG461", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG461(a) bdk_pciercx_cfg461_t
+#define bustype_BDK_PCIERCX_CFG461(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG461(a) "PCIERCX_CFG461"
+#define busnum_BDK_PCIERCX_CFG461(a) (a)
+#define arguments_BDK_PCIERCX_CFG461(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg462
+ *
+ * PCIe RC Transmit Completion FC Credit Status Register
+ * This register contains the four hundred sixty-third 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg462
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg462_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_20_31 : 12;
+ uint32_t tchfcc : 8; /**< [ 19: 12](RO/H) Transmit completion header FC credits. The completion header credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tcdfcc : 12; /**< [ 11: 0](RO/H) Transmit completion data FC credits. The completion data credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t tcdfcc : 12; /**< [ 11: 0](RO/H) Transmit completion data FC credits. The completion data credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tchfcc : 8; /**< [ 19: 12](RO/H) Transmit completion header FC credits. The completion header credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t reserved_20_31 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg462_s cn; */
+};
+typedef union bdk_pciercx_cfg462 bdk_pciercx_cfg462_t;
+
+static inline uint64_t BDK_PCIERCX_CFG462(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG462(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000738ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000738ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000738ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG462", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG462(a) bdk_pciercx_cfg462_t
+#define bustype_BDK_PCIERCX_CFG462(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG462(a) "PCIERCX_CFG462"
+#define busnum_BDK_PCIERCX_CFG462(a) (a)
+#define arguments_BDK_PCIERCX_CFG462(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg463
+ *
+ * PCIe RC Queue Status Register
+ * This register contains the four hundred sixty-fourth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg463
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg463_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t fcltoe : 1; /**< [ 31: 31](R/W) FC latency timer override enable. When this bit is set, the value in
+ PCIERC()_CFG453[FCLTOV] will override the FC latency timer value that the core
+ calculates according to the PCIe specification. */
+ uint32_t reserved_29_30 : 2;
+ uint32_t fcltov : 13; /**< [ 28: 16](R/W) FC latency timer override value. When you set PCIERC()_CFG453[FCLTOE], the value in
+ this field will override the FC latency timer value that the core calculates according to
+ the PCIe specification. */
+ uint32_t reserved_3_15 : 13;
+ uint32_t rqne : 1; /**< [ 2: 2](RO/H) Received queue not empty. Indicates there is data in one or more of the receive buffers. */
+ uint32_t trbne : 1; /**< [ 1: 1](RO/H) Transmit retry buffer not empty. Indicates that there is data in the transmit retry buffer. */
+ uint32_t rtlpfccnr : 1; /**< [ 0: 0](RO/H) Received TLP FC credits not returned. Indicates that the PCI Express bus has sent a TLP
+ but has not yet received an UpdateFC DLLP indicating that the credits for that TLP have
+ been restored by the receiver at the other end of the link. */
+#else /* Word 0 - Little Endian */
+ uint32_t rtlpfccnr : 1; /**< [ 0: 0](RO/H) Received TLP FC credits not returned. Indicates that the PCI Express bus has sent a TLP
+ but has not yet received an UpdateFC DLLP indicating that the credits for that TLP have
+ been restored by the receiver at the other end of the link. */
+ uint32_t trbne : 1; /**< [ 1: 1](RO/H) Transmit retry buffer not empty. Indicates that there is data in the transmit retry buffer. */
+ uint32_t rqne : 1; /**< [ 2: 2](RO/H) Received queue not empty. Indicates there is data in one or more of the receive buffers. */
+ uint32_t reserved_3_15 : 13;
+ uint32_t fcltov : 13; /**< [ 28: 16](R/W) FC latency timer override value. When you set PCIERC()_CFG453[FCLTOE], the value in
+ this field will override the FC latency timer value that the core calculates according to
+ the PCIe specification. */
+ uint32_t reserved_29_30 : 2;
+ uint32_t fcltoe : 1; /**< [ 31: 31](R/W) FC latency timer override enable. When this bit is set, the value in
+ PCIERC()_CFG453[FCLTOV] will override the FC latency timer value that the core
+ calculates according to the PCIe specification. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg463_s cn; */
+};
+typedef union bdk_pciercx_cfg463 bdk_pciercx_cfg463_t;
+
+static inline uint64_t BDK_PCIERCX_CFG463(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG463(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000073cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000073cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000073cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG463", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG463(a) bdk_pciercx_cfg463_t
+#define bustype_BDK_PCIERCX_CFG463(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG463(a) "PCIERCX_CFG463"
+#define busnum_BDK_PCIERCX_CFG463(a) (a)
+#define arguments_BDK_PCIERCX_CFG463(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg464
+ *
+ * PCIe RC VC Transmit Arbitration Register 1
+ * This register contains the four hundred sixty-fifth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg464
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg464_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wrr_vc3 : 8; /**< [ 31: 24](RO/H) WRR weight for VC3. */
+ uint32_t wrr_vc2 : 8; /**< [ 23: 16](RO/H) WRR weight for VC2. */
+ uint32_t wrr_vc1 : 8; /**< [ 15: 8](RO/H) WRR weight for VC1. */
+ uint32_t wrr_vc0 : 8; /**< [ 7: 0](RO/H) WRR weight for VC0. */
+#else /* Word 0 - Little Endian */
+ uint32_t wrr_vc0 : 8; /**< [ 7: 0](RO/H) WRR weight for VC0. */
+ uint32_t wrr_vc1 : 8; /**< [ 15: 8](RO/H) WRR weight for VC1. */
+ uint32_t wrr_vc2 : 8; /**< [ 23: 16](RO/H) WRR weight for VC2. */
+ uint32_t wrr_vc3 : 8; /**< [ 31: 24](RO/H) WRR weight for VC3. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg464_s cn81xx; */
+ /* struct bdk_pciercx_cfg464_s cn88xx; */
+ struct bdk_pciercx_cfg464_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wrr_vc3 : 8; /**< [ 31: 24](RO) WRR weight for VC3. */
+ uint32_t wrr_vc2 : 8; /**< [ 23: 16](RO) WRR weight for VC2. */
+ uint32_t wrr_vc1 : 8; /**< [ 15: 8](RO) WRR weight for VC1. */
+ uint32_t wrr_vc0 : 8; /**< [ 7: 0](RO) WRR weight for VC0. */
+#else /* Word 0 - Little Endian */
+ uint32_t wrr_vc0 : 8; /**< [ 7: 0](RO) WRR weight for VC0. */
+ uint32_t wrr_vc1 : 8; /**< [ 15: 8](RO) WRR weight for VC1. */
+ uint32_t wrr_vc2 : 8; /**< [ 23: 16](RO) WRR weight for VC2. */
+ uint32_t wrr_vc3 : 8; /**< [ 31: 24](RO) WRR weight for VC3. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg464 bdk_pciercx_cfg464_t;
+
+static inline uint64_t BDK_PCIERCX_CFG464(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG464(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000740ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000740ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000740ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG464", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG464(a) bdk_pciercx_cfg464_t
+#define bustype_BDK_PCIERCX_CFG464(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG464(a) "PCIERCX_CFG464"
+#define busnum_BDK_PCIERCX_CFG464(a) (a)
+#define arguments_BDK_PCIERCX_CFG464(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg465
+ *
+ * PCIe RC VC Transmit Arbitration Register 2
+ * This register contains the four hundred sixty-sixth 32-bits of configuration space.
+ */
+union bdk_pciercx_cfg465
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg465_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wrr_vc7 : 8; /**< [ 31: 24](RO/H) WRR weight for VC7. */
+ uint32_t wrr_vc6 : 8; /**< [ 23: 16](RO/H) WRR weight for VC6. */
+ uint32_t wrr_vc5 : 8; /**< [ 15: 8](RO/H) WRR weight for VC5. */
+ uint32_t wrr_vc4 : 8; /**< [ 7: 0](RO/H) WRR weight for VC4. */
+#else /* Word 0 - Little Endian */
+ uint32_t wrr_vc4 : 8; /**< [ 7: 0](RO/H) WRR weight for VC4. */
+ uint32_t wrr_vc5 : 8; /**< [ 15: 8](RO/H) WRR weight for VC5. */
+ uint32_t wrr_vc6 : 8; /**< [ 23: 16](RO/H) WRR weight for VC6. */
+ uint32_t wrr_vc7 : 8; /**< [ 31: 24](RO/H) WRR weight for VC7. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg465_s cn81xx; */
+ /* struct bdk_pciercx_cfg465_s cn88xx; */
+ struct bdk_pciercx_cfg465_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wrr_vc7 : 8; /**< [ 31: 24](RO) WRR weight for VC7. */
+ uint32_t wrr_vc6 : 8; /**< [ 23: 16](RO) WRR weight for VC6. */
+ uint32_t wrr_vc5 : 8; /**< [ 15: 8](RO) WRR weight for VC5. */
+ uint32_t wrr_vc4 : 8; /**< [ 7: 0](RO) WRR weight for VC4. */
+#else /* Word 0 - Little Endian */
+ uint32_t wrr_vc4 : 8; /**< [ 7: 0](RO) WRR weight for VC4. */
+ uint32_t wrr_vc5 : 8; /**< [ 15: 8](RO) WRR weight for VC5. */
+ uint32_t wrr_vc6 : 8; /**< [ 23: 16](RO) WRR weight for VC6. */
+ uint32_t wrr_vc7 : 8; /**< [ 31: 24](RO) WRR weight for VC7. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg465 bdk_pciercx_cfg465_t;
+
+static inline uint64_t BDK_PCIERCX_CFG465(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG465(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000744ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000744ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000744ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG465", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG465(a) bdk_pciercx_cfg465_t
+#define bustype_BDK_PCIERCX_CFG465(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG465(a) "PCIERCX_CFG465"
+#define busnum_BDK_PCIERCX_CFG465(a) (a)
+#define arguments_BDK_PCIERCX_CFG465(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg466
+ *
+ * PCIe RC VC0 Posted Receive Queue Control Register
+ * This register contains the four hundred sixty-seventh 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg466
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg466_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rx_queue_order : 1; /**< [ 31: 31](R/W) VC ordering for receive queues. Determines the VC ordering rule for the receive queues,
+ used only in the segmented-buffer configuration, writable through PEM()_CFG_WR:
+ 0 = Round robin.
+ 1 = Strict ordering, higher numbered VCs have higher priority.
+
+ However, the application must not change this field. */
+ uint32_t type_ordering : 1; /**< [ 30: 30](RO/WRSL) TLP type ordering for VC0. Determines the TLP type ordering rule for VC0 receive queues,
+ used only in the segmented-buffer configuration, writable through
+ PEM()_CFG_WR:
+ 0 = Strict ordering for received TLPs: Posted, then completion, then NonPosted.
+ 1 = Ordering of received TLPs follows the rules in PCI Express Base Specification.
+
+ The application must not change this field. */
+ uint32_t reserved_24_29 : 6;
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 posted TLP queue mode. The operating mode of the posted receive queue for VC0, used
+ only in the segmented-buffer configuration, writable through PEM()_CFG_WR. However,
+ the application must not change this field.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward. */
+ uint32_t reserved_20 : 1;
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL) VC0 posted header credits. The number of initial posted header credits for VC0, used for
+ all receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL) VC0 posted data credits. The number of initial posted data credits for VC0, used for all
+ receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL) VC0 posted data credits. The number of initial posted data credits for VC0, used for all
+ receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL) VC0 posted header credits. The number of initial posted header credits for VC0, used for
+ all receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 posted TLP queue mode. The operating mode of the posted receive queue for VC0, used
+ only in the segmented-buffer configuration, writable through PEM()_CFG_WR. However,
+ the application must not change this field.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward. */
+ uint32_t reserved_24_29 : 6;
+ uint32_t type_ordering : 1; /**< [ 30: 30](RO/WRSL) TLP type ordering for VC0. Determines the TLP type ordering rule for VC0 receive queues,
+ used only in the segmented-buffer configuration, writable through
+ PEM()_CFG_WR:
+ 0 = Strict ordering for received TLPs: Posted, then completion, then NonPosted.
+ 1 = Ordering of received TLPs follows the rules in PCI Express Base Specification.
+
+ The application must not change this field. */
+ uint32_t rx_queue_order : 1; /**< [ 31: 31](R/W) VC ordering for receive queues. Determines the VC ordering rule for the receive queues,
+ used only in the segmented-buffer configuration, writable through PEM()_CFG_WR:
+ 0 = Round robin.
+ 1 = Strict ordering, higher numbered VCs have higher priority.
+
+ However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg466_s cn; */
+};
+typedef union bdk_pciercx_cfg466 bdk_pciercx_cfg466_t;
+
+static inline uint64_t BDK_PCIERCX_CFG466(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG466(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000748ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000748ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000748ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG466", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG466(a) bdk_pciercx_cfg466_t
+#define bustype_BDK_PCIERCX_CFG466(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG466(a) "PCIERCX_CFG466"
+#define busnum_BDK_PCIERCX_CFG466(a) (a)
+#define arguments_BDK_PCIERCX_CFG466(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg467
+ *
+ * PCIe RC VC0 Nonposted Receive Queue Control Register
+ * This register contains the four hundred sixty-eighth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg467
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg467_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 nonposted TLP queue mode. The operating mode of the nonposted receive queue for VC0,
+ used only in the segmented-buffer configuration, writable through PEM()_CFG_WR.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward.
+
+ The application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL) VC0 nonposted header credits. The number of initial nonposted header credits for VC0, used
+ for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL) VC0 nonposted data credits. The number of initial nonposted data credits for VC0, used for
+ all receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL) VC0 nonposted data credits. The number of initial nonposted data credits for VC0, used for
+ all receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL) VC0 nonposted header credits. The number of initial nonposted header credits for VC0, used
+ for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 nonposted TLP queue mode. The operating mode of the nonposted receive queue for VC0,
+ used only in the segmented-buffer configuration, writable through PEM()_CFG_WR.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward.
+
+ The application must not change this field. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg467_s cn; */
+};
+typedef union bdk_pciercx_cfg467 bdk_pciercx_cfg467_t;
+
+static inline uint64_t BDK_PCIERCX_CFG467(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG467(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000074cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000074cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000074cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG467", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG467(a) bdk_pciercx_cfg467_t
+#define bustype_BDK_PCIERCX_CFG467(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG467(a) "PCIERCX_CFG467"
+#define busnum_BDK_PCIERCX_CFG467(a) (a)
+#define arguments_BDK_PCIERCX_CFG467(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg468
+ *
+ * PCIe RC VC0 Completion Receive Queue Control Register
+ * This register contains the four hundred sixty-ninth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg468
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg468_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 completion TLP queue mode. The operating mode of the completion receive queue for VC0,
+ used only in the segmented-buffer configuration, writable through
+ PEM()_CFG_WR.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward.
+
+ The application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL) VC0 completion header credits. The number of initial completion header credits for VC0,
+ used for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL) VC0 completion data credits. The number of initial completion data credits for VC0, used
+ for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL) VC0 completion data credits. The number of initial completion data credits for VC0, used
+ for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL) VC0 completion header credits. The number of initial completion header credits for VC0,
+ used for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 completion TLP queue mode. The operating mode of the completion receive queue for VC0,
+ used only in the segmented-buffer configuration, writable through
+ PEM()_CFG_WR.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward.
+
+ The application must not change this field. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg468_s cn; */
+};
+typedef union bdk_pciercx_cfg468 bdk_pciercx_cfg468_t;
+
+static inline uint64_t BDK_PCIERCX_CFG468(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG468(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000750ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000750ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000750ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG468", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG468(a) bdk_pciercx_cfg468_t
+#define bustype_BDK_PCIERCX_CFG468(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG468(a) "PCIERCX_CFG468"
+#define busnum_BDK_PCIERCX_CFG468(a) (a)
+#define arguments_BDK_PCIERCX_CFG468(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg515
+ *
+ * PCIe RC Gen2 Port Logic Register
+ * This register contains the five hundred sixteenth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg515
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg515_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_21_31 : 11;
+ uint32_t s_d_e : 1; /**< [ 20: 20](R/W) SEL_DE_EMPHASIS. Used to set the deemphasis level for upstream ports. */
+ uint32_t ctcrb : 1; /**< [ 19: 19](R/W) Config TX compliance receive bit. When set to 1, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to 1). */
+ uint32_t cpyts : 1; /**< [ 18: 18](R/W) Config PHY TX swing. Indicates the voltage level that the PHY should drive. When set to 1,
+ indicates full swing. When set to 0, indicates low swing. */
+ uint32_t dsc : 1; /**< [ 17: 17](R/W/H) Directed speed change. A write of 1 initiates a speed change.
+ When the speed change occurs, the controller will clear the contents of this field. */
+ uint32_t reserved_8_16 : 9;
+ uint32_t n_fts : 8; /**< [ 7: 0](R/W) N_FTS. Sets the number of fast training sequences (N_FTS) that the core advertises as its
+ N_FTS during GEN2 Link training. This value is used to inform the link partner about the
+ PHY's ability to recover synchronization after a low power state.
+
+ Do not set [N_FTS] to zero; doing so can cause the LTSSM to go into the recovery
+ state when exiting from L0s. */
+#else /* Word 0 - Little Endian */
+ uint32_t n_fts : 8; /**< [ 7: 0](R/W) N_FTS. Sets the number of fast training sequences (N_FTS) that the core advertises as its
+ N_FTS during GEN2 Link training. This value is used to inform the link partner about the
+ PHY's ability to recover synchronization after a low power state.
+
+ Do not set [N_FTS] to zero; doing so can cause the LTSSM to go into the recovery
+ state when exiting from L0s. */
+ uint32_t reserved_8_16 : 9;
+ uint32_t dsc : 1; /**< [ 17: 17](R/W/H) Directed speed change. A write of 1 initiates a speed change.
+ When the speed change occurs, the controller will clear the contents of this field. */
+ uint32_t cpyts : 1; /**< [ 18: 18](R/W) Config PHY TX swing. Indicates the voltage level that the PHY should drive. When set to 1,
+ indicates full swing. When set to 0, indicates low swing. */
+ uint32_t ctcrb : 1; /**< [ 19: 19](R/W) Config TX compliance receive bit. When set to 1, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to 1). */
+ uint32_t s_d_e : 1; /**< [ 20: 20](R/W) SEL_DE_EMPHASIS. Used to set the deemphasis level for upstream ports. */
+ uint32_t reserved_21_31 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg515_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t reserved_21 : 1;
+ uint32_t s_d_e : 1; /**< [ 20: 20](R/W) SEL_DE_EMPHASIS. Used to set the deemphasis level for upstream ports. */
+ uint32_t ctcrb : 1; /**< [ 19: 19](R/W) Config TX compliance receive bit. When set to 1, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to 1). */
+ uint32_t cpyts : 1; /**< [ 18: 18](R/W) Config PHY TX swing. Indicates the voltage level that the PHY should drive. When set to 1,
+ indicates full swing. When set to 0, indicates low swing. */
+ uint32_t dsc : 1; /**< [ 17: 17](R/W/H) Directed speed change. A write of 1 initiates a speed change.
+ When the speed change occurs, the controller will clear the contents of this field. */
+ uint32_t alfpce : 1; /**< [ 16: 16](R/W) Auto lane flip control enable. When set to 1, the core will try to flip the lanes
+ autonomously in Detect LTSSM state when lane0 is not detected. */
+ uint32_t pdl : 3; /**< [ 15: 13](R/W) Predetermined lane for Auto Flip. This field defines which physical lane is connected
+ to logical Lane0 by the flip operation performed in Detect. 0x0 = connect logical Lane0
+ to physical lane0 or CX_NL or CX_NL/2-1 or CX_NL/4-1 or CX_NL/8-1, depending on which lane
+ is detected, 0x1 = logical Lane0 to physical lane 1, 0x2 = logical Lane0 to physical lane
+ 3,
+ 0x3 = logical Lane0 to physical Lane 7, 0x4 = logical Lane0 to physical lane 15. */
+ uint32_t le : 5; /**< [ 12: 8](R/W) Lane enable. Indicates the number of lanes to check for exit from electrical idle in
+ Polling.Active and Polling.Compliance. 0x1 = x1, 0x2 = x2, etc. Used to limit the maximum
+ link width to ignore broken lanes that detect a receiver, but will not exit electrical
+ idle and would otherwise prevent a valid link from being configured. */
+ uint32_t n_fts : 8; /**< [ 7: 0](R/W) N_FTS. Sets the number of fast training sequences (N_FTS) that the core advertises as its
+ N_FTS during GEN2 Link training. This value is used to inform the link partner about the
+ PHY's ability to recover synchronization after a low power state.
+
+ Do not set [N_FTS] to zero; doing so can cause the LTSSM to go into the recovery
+ state when exiting from L0s. */
+#else /* Word 0 - Little Endian */
+ uint32_t n_fts : 8; /**< [ 7: 0](R/W) N_FTS. Sets the number of fast training sequences (N_FTS) that the core advertises as its
+ N_FTS during GEN2 Link training. This value is used to inform the link partner about the
+ PHY's ability to recover synchronization after a low power state.
+
+ Do not set [N_FTS] to zero; doing so can cause the LTSSM to go into the recovery
+ state when exiting from L0s. */
+ uint32_t le : 5; /**< [ 12: 8](R/W) Lane enable. Indicates the number of lanes to check for exit from electrical idle in
+ Polling.Active and Polling.Compliance. 0x1 = x1, 0x2 = x2, etc. Used to limit the maximum
+ link width to ignore broken lanes that detect a receiver, but will not exit electrical
+ idle and would otherwise prevent a valid link from being configured. */
+ uint32_t pdl : 3; /**< [ 15: 13](R/W) Predetermined lane for Auto Flip. This field defines which physical lane is connected
+ to logical Lane0 by the flip operation performed in Detect. 0x0 = connect logical Lane0
+ to physical lane0 or CX_NL or CX_NL/2-1 or CX_NL/4-1 or CX_NL/8-1, depending on which lane
+ is detected, 0x1 = logical Lane0 to physical lane 1, 0x2 = logical Lane0 to physical lane
+ 3,
+ 0x3 = logical Lane0 to physical Lane 7, 0x4 = logical Lane0 to physical lane 15. */
+ uint32_t alfpce : 1; /**< [ 16: 16](R/W) Auto lane flip control enable. When set to 1, the core will try to flip the lanes
+ autonomously in Detect LTSSM state when lane0 is not detected. */
+ uint32_t dsc : 1; /**< [ 17: 17](R/W/H) Directed speed change. A write of 1 initiates a speed change.
+ When the speed change occurs, the controller will clear the contents of this field. */
+ uint32_t cpyts : 1; /**< [ 18: 18](R/W) Config PHY TX swing. Indicates the voltage level that the PHY should drive. When set to 1,
+ indicates full swing. When set to 0, indicates low swing. */
+ uint32_t ctcrb : 1; /**< [ 19: 19](R/W) Config TX compliance receive bit. When set to 1, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to 1). */
+ uint32_t s_d_e : 1; /**< [ 20: 20](R/W) SEL_DE_EMPHASIS. Used to set the deemphasis level for upstream ports. */
+ uint32_t reserved_21 : 1;
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg515_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t reserved_21 : 1;
+ uint32_t s_d_e : 1; /**< [ 20: 20](R/W) SEL_DE_EMPHASIS. Used to set the deemphasis level for upstream ports. */
+ uint32_t ctcrb : 1; /**< [ 19: 19](R/W) Config TX compliance receive bit. When set to 1, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to 1). */
+ uint32_t cpyts : 1; /**< [ 18: 18](R/W) Config PHY TX swing. Indicates the voltage level that the PHY should drive. When set to 1,
+ indicates full swing. When set to 0, indicates low swing. */
+ uint32_t dsc : 1; /**< [ 17: 17](R/W/H) Directed speed change. A write of 1 initiates a speed change.
+ When the speed change occurs, the controller will clear the contents of this field. */
+ uint32_t le : 9; /**< [ 16: 8](R/W) Lane enable. Indicates the number of lanes to check for exit from electrical idle in
+ Polling.Active and Polling.Compliance. 0x1 = x1, 0x2 = x2, etc. Used to limit the maximum
+ link width to ignore broken lanes that detect a receiver, but will not exit electrical
+ idle and would otherwise prevent a valid link from being configured. */
+ uint32_t n_fts : 8; /**< [ 7: 0](R/W) N_FTS. Sets the number of fast training sequences (N_FTS) that the core advertises as its
+ N_FTS during GEN2 Link training. This value is used to inform the link partner about the
+ PHY's ability to recover synchronization after a low power state.
+
+ Do not set [N_FTS] to zero; doing so can cause the LTSSM to go into the recovery
+ state when exiting from L0s. */
+#else /* Word 0 - Little Endian */
+ uint32_t n_fts : 8; /**< [ 7: 0](R/W) N_FTS. Sets the number of fast training sequences (N_FTS) that the core advertises as its
+ N_FTS during GEN2 Link training. This value is used to inform the link partner about the
+ PHY's ability to recover synchronization after a low power state.
+
+ Do not set [N_FTS] to zero; doing so can cause the LTSSM to go into the recovery
+ state when exiting from L0s. */
+ uint32_t le : 9; /**< [ 16: 8](R/W) Lane enable. Indicates the number of lanes to check for exit from electrical idle in
+ Polling.Active and Polling.Compliance. 0x1 = x1, 0x2 = x2, etc. Used to limit the maximum
+ link width to ignore broken lanes that detect a receiver, but will not exit electrical
+ idle and would otherwise prevent a valid link from being configured. */
+ uint32_t dsc : 1; /**< [ 17: 17](R/W/H) Directed speed change. A write of 1 initiates a speed change.
+ When the speed change occurs, the controller will clear the contents of this field. */
+ uint32_t cpyts : 1; /**< [ 18: 18](R/W) Config PHY TX swing. Indicates the voltage level that the PHY should drive. When set to 1,
+ indicates full swing. When set to 0, indicates low swing. */
+ uint32_t ctcrb : 1; /**< [ 19: 19](R/W) Config TX compliance receive bit. When set to 1, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to 1). */
+ uint32_t s_d_e : 1; /**< [ 20: 20](R/W) SEL_DE_EMPHASIS. Used to set the deemphasis level for upstream ports. */
+ uint32_t reserved_21 : 1;
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_pciercx_cfg515_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t reserved_21 : 1;
+ uint32_t s_d_e : 1; /**< [ 20: 20](R/W) SEL_DE_EMPHASIS. Used to set the deemphasis level for upstream ports.
+ 1 = -3.5 dB.
+ 0 = -6 dB. */
+ uint32_t ctcrb : 1; /**< [ 19: 19](R/W) Config TX compliance receive bit. When set to 1, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to 1). */
+ uint32_t cpyts : 1; /**< [ 18: 18](R/W) Config PHY TX swing. Indicates the voltage level that the PHY should drive. When set to 1,
+ indicates low swing. When set to 0, indicates full swing. */
+ uint32_t dsc : 1; /**< [ 17: 17](R/W/H) Directed speed change. A write of 1 initiates a speed change.
+ When the speed change occurs, the controller will clear the contents of this field. */
+ uint32_t alaneflip : 1; /**< [ 16: 16](R/W) Enable auto flipping of the lanes. */
+ uint32_t pdetlane : 3; /**< [ 15: 13](R/W) Predetermined lane for auto flip. This field defines which
+ physical lane is connected to logical Lane0 by the flip
+ operation performed in detect.
+ 0x0 = Reserved.
+ 0x1 = Connect logical Lane0 to physical lane 1.
+ 0x2 = Connect logical Lane0 to physical lane 3.
+ 0x3 = Connect logical Lane0 to physical lane 7.
+ 0x4 = Connect logical Lane0 to physical lane 15.
+ 0x5 - 0x7 = Reserved. */
+ uint32_t nlanes : 5; /**< [ 12: 8](R/W) Predetermined number of lanes. Defines the number of
+ lanes which are connected and not bad. Used to limit the
+ effective link width to ignore "broken" or "unused" lanes that
+ detect a receiver. Indicates the number of lanes to check for
+ exit from electrical idle in Polling.Active and L2.Idle.
+ 0x1 = 1 lane.
+ 0x2 = 2 lanes.
+ 0x3 = 3 lanes.
+ ...
+ 0x8 = 8 lanes.
+ 0x9-0x1F = Reserved.
+
+ When there are unused lanes in the system, then this value must reflect the
+ number of lanes. PCIEEP()_CFG452[LME] must also be changed likewise. */
+ uint32_t n_fts : 8; /**< [ 7: 0](R/W) N_FTS. Sets the number of fast training sequences (N_FTS) that the core advertises as its
+ N_FTS during GEN2 Link training. This value is used to inform the link partner about the
+ PHY's ability to recover synchronization after a low power state.
+
+ Do not set [N_FTS] to zero; doing so can cause the LTSSM to go into the recovery
+ state when exiting from L0s. */
+#else /* Word 0 - Little Endian */
+ uint32_t n_fts : 8; /**< [ 7: 0](R/W) N_FTS. Sets the number of fast training sequences (N_FTS) that the core advertises as its
+ N_FTS during GEN2 Link training. This value is used to inform the link partner about the
+ PHY's ability to recover synchronization after a low power state.
+
+ Do not set [N_FTS] to zero; doing so can cause the LTSSM to go into the recovery
+ state when exiting from L0s. */
+ uint32_t nlanes : 5; /**< [ 12: 8](R/W) Predetermined number of lanes. Defines the number of
+ lanes which are connected and not bad. Used to limit the
+ effective link width to ignore "broken" or "unused" lanes that
+ detect a receiver. Indicates the number of lanes to check for
+ exit from electrical idle in Polling.Active and L2.Idle.
+ 0x1 = 1 lane.
+ 0x2 = 2 lanes.
+ 0x3 = 3 lanes.
+ ...
+ 0x8 = 8 lanes.
+ 0x9-0x1F = Reserved.
+
+ When there are unused lanes in the system, then this value must reflect the
+ number of lanes. PCIEEP()_CFG452[LME] must also be changed likewise. */
+ uint32_t pdetlane : 3; /**< [ 15: 13](R/W) Predetermined lane for auto flip. This field defines which
+ physical lane is connected to logical Lane0 by the flip
+ operation performed in detect.
+ 0x0 = Reserved.
+ 0x1 = Connect logical Lane0 to physical lane 1.
+ 0x2 = Connect logical Lane0 to physical lane 3.
+ 0x3 = Connect logical Lane0 to physical lane 7.
+ 0x4 = Connect logical Lane0 to physical lane 15.
+ 0x5 - 0x7 = Reserved. */
+ uint32_t alaneflip : 1; /**< [ 16: 16](R/W) Enable auto flipping of the lanes. */
+ uint32_t dsc : 1; /**< [ 17: 17](R/W/H) Directed speed change. A write of 1 initiates a speed change.
+ When the speed change occurs, the controller will clear the contents of this field. */
+ uint32_t cpyts : 1; /**< [ 18: 18](R/W) Config PHY TX swing. Indicates the voltage level that the PHY should drive. When set to 1,
+ indicates low swing. When set to 0, indicates full swing. */
+ uint32_t ctcrb : 1; /**< [ 19: 19](R/W) Config TX compliance receive bit. When set to 1, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to 1). */
+ uint32_t s_d_e : 1; /**< [ 20: 20](R/W) SEL_DE_EMPHASIS. Used to set the deemphasis level for upstream ports.
+ 1 = -3.5 dB.
+ 0 = -6 dB. */
+ uint32_t reserved_21 : 1;
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg515 bdk_pciercx_cfg515_t;
+
+static inline uint64_t BDK_PCIERCX_CFG515(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG515(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x2000000080cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x2000000080cll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x2000000080cll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG515", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG515(a) bdk_pciercx_cfg515_t
+#define bustype_BDK_PCIERCX_CFG515(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG515(a) "PCIERCX_CFG515"
+#define busnum_BDK_PCIERCX_CFG515(a) (a)
+#define arguments_BDK_PCIERCX_CFG515(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg516
+ *
+ * PCIe RC PHY Status Register
+ * This register contains the five hundred seventeenth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg516
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg516_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t phy_stat : 32; /**< [ 31: 0](RO/H) PHY status. */
+#else /* Word 0 - Little Endian */
+ uint32_t phy_stat : 32; /**< [ 31: 0](RO/H) PHY status. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg516_s cn; */
+};
+typedef union bdk_pciercx_cfg516 bdk_pciercx_cfg516_t;
+
+static inline uint64_t BDK_PCIERCX_CFG516(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG516(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000810ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000810ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000810ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG516", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG516(a) bdk_pciercx_cfg516_t
+#define bustype_BDK_PCIERCX_CFG516(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG516(a) "PCIERCX_CFG516"
+#define busnum_BDK_PCIERCX_CFG516(a) (a)
+#define arguments_BDK_PCIERCX_CFG516(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg517
+ *
+ * PCIe RC PHY Control Register
+ * This register contains the five hundred eighteenth 32-bits of PCIe type 1 configuration space.
+ */
+union bdk_pciercx_cfg517
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg517_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t phy_ctrl : 32; /**< [ 31: 0](R/W) PHY control. */
+#else /* Word 0 - Little Endian */
+ uint32_t phy_ctrl : 32; /**< [ 31: 0](R/W) PHY control. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg517_s cn; */
+};
+typedef union bdk_pciercx_cfg517 bdk_pciercx_cfg517_t;
+
+static inline uint64_t BDK_PCIERCX_CFG517(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG517(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000814ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000814ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000814ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG517", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG517(a) bdk_pciercx_cfg517_t
+#define bustype_BDK_PCIERCX_CFG517(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG517(a) "PCIERCX_CFG517"
+#define busnum_BDK_PCIERCX_CFG517(a) (a)
+#define arguments_BDK_PCIERCX_CFG517(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg548
+ *
+ * PCIe RC Gen3 Control Register
+ * This register contains the five hundred forty-ninth 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg548
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg548_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t rss : 2; /**< [ 25: 24](RO) Data rate for shadow register. Hard-wired for Gen3. */
+ uint32_t eiedd : 1; /**< [ 23: 23](R/W) Eq InvalidRequest and RxEqEval Different Time Assertion Disable. Disable the assertion of
+ Eq InvalidRequest and RxEqEval at different time. */
+ uint32_t reserved_19_22 : 4;
+ uint32_t dcbd : 1; /**< [ 18: 18](R/W) Disable balance disable. Disable DC balance feature. */
+ uint32_t dtdd : 1; /**< [ 17: 17](R/W) DLLP transmission delay disable. Disable delay transmission of DLLPs before equalization. */
+ uint32_t ed : 1; /**< [ 16: 16](R/W) Equalization disable. Disable equalization feature. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t rxeq_rgrdless_rsts : 1; /**< [ 13: 13](R/W) The controller as Gen3 EQ master asserts RxEqEval to instruct the
+ PHY to do Rx adaptation and evaluation.
+ 0x0 = Asserts after 1 us and 2 TS1 received from remote partner.
+ 0x1 = Asserts after 500 ns regardless of TS's received or not. */
+ uint32_t rxeq_ph01_en : 1; /**< [ 12: 12](R/W) Rx Equalization Phase 0/Phase 1 Hold Enable. */
+ uint32_t erd : 1; /**< [ 11: 11](R/W) Equalization redo disable. Disable requesting reset of EIEOS count during equalization. */
+ uint32_t ecrd : 1; /**< [ 10: 10](R/W) Equalization EIEOS count reset disable. Disable requesting reset of EIEOS count during
+ equalization. */
+ uint32_t ep2p3d : 1; /**< [ 9: 9](R/W) Equalization phase 2 and phase 3 disable. This applies to downstream ports only. */
+ uint32_t dsg3 : 1; /**< [ 8: 8](R/W) Disable scrambler for Gen3 data rate. The Gen3 scrambler/descrambler within the core needs
+ to be disabled when the scrambling function is implemented outside of the core (within the
+ PHY). */
+ uint32_t reserved_1_7 : 7;
+ uint32_t grizdnc : 1; /**< [ 0: 0](R/W) Gen3 receiver impedance ZRX-DC not compliant. */
+#else /* Word 0 - Little Endian */
+ uint32_t grizdnc : 1; /**< [ 0: 0](R/W) Gen3 receiver impedance ZRX-DC not compliant. */
+ uint32_t reserved_1_7 : 7;
+ uint32_t dsg3 : 1; /**< [ 8: 8](R/W) Disable scrambler for Gen3 data rate. The Gen3 scrambler/descrambler within the core needs
+ to be disabled when the scrambling function is implemented outside of the core (within the
+ PHY). */
+ uint32_t ep2p3d : 1; /**< [ 9: 9](R/W) Equalization phase 2 and phase 3 disable. This applies to downstream ports only. */
+ uint32_t ecrd : 1; /**< [ 10: 10](R/W) Equalization EIEOS count reset disable. Disable requesting reset of EIEOS count during
+ equalization. */
+ uint32_t erd : 1; /**< [ 11: 11](R/W) Equalization redo disable. Disable requesting reset of EIEOS count during equalization. */
+ uint32_t rxeq_ph01_en : 1; /**< [ 12: 12](R/W) Rx Equalization Phase 0/Phase 1 Hold Enable. */
+ uint32_t rxeq_rgrdless_rsts : 1; /**< [ 13: 13](R/W) The controller as Gen3 EQ master asserts RxEqEval to instruct the
+ PHY to do Rx adaptation and evaluation.
+ 0x0 = Asserts after 1 us and 2 TS1 received from remote partner.
+ 0x1 = Asserts after 500 ns regardless of TS's received or not. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t ed : 1; /**< [ 16: 16](R/W) Equalization disable. Disable equalization feature. */
+ uint32_t dtdd : 1; /**< [ 17: 17](R/W) DLLP transmission delay disable. Disable delay transmission of DLLPs before equalization. */
+ uint32_t dcbd : 1; /**< [ 18: 18](R/W) Disable balance disable. Disable DC balance feature. */
+ uint32_t reserved_19_22 : 4;
+ uint32_t eiedd : 1; /**< [ 23: 23](R/W) Eq InvalidRequest and RxEqEval Different Time Assertion Disable. Disable the assertion of
+ Eq InvalidRequest and RxEqEval at different time. */
+ uint32_t rss : 2; /**< [ 25: 24](RO) Data rate for shadow register. Hard-wired for Gen3. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg548_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t rss : 2; /**< [ 25: 24](RO) Data rate for shadow register. Hard-wired for Gen3. */
+ uint32_t eiedd : 1; /**< [ 23: 23](R/W) Eq InvalidRequest and RxEqEval Different Time Assertion Disable. Disable the assertion of
+ Eq InvalidRequest and RxEqEval at different time. */
+ uint32_t reserved_19_22 : 4;
+ uint32_t dcbd : 1; /**< [ 18: 18](R/W) Disable balance disable. Disable DC balance feature. */
+ uint32_t dtdd : 1; /**< [ 17: 17](R/W) DLLP transmission delay disable. Disable delay transmission of DLLPs before equalization. */
+ uint32_t ed : 1; /**< [ 16: 16](R/W) Equalization disable. Disable equalization feature. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t reserved_12 : 1;
+ uint32_t erd : 1; /**< [ 11: 11](R/W) Equalization redo disable. Disable requesting reset of EIEOS count during equalization. */
+ uint32_t ecrd : 1; /**< [ 10: 10](R/W) Equalization EIEOS count reset disable. Disable requesting reset of EIEOS count during
+ equalization. */
+ uint32_t ep2p3d : 1; /**< [ 9: 9](R/W) Equalization phase 2 and phase 3 disable. This applies to downstream ports only. */
+ uint32_t dsg3 : 1; /**< [ 8: 8](R/W) Disable scrambler for Gen3 data rate. The Gen3 scrambler/descrambler within the core needs
+ to be disabled when the scrambling function is implemented outside of the core (within the
+ PHY). */
+ uint32_t reserved_1_7 : 7;
+ uint32_t grizdnc : 1; /**< [ 0: 0](R/W) Gen3 receiver impedance ZRX-DC not compliant. */
+#else /* Word 0 - Little Endian */
+ uint32_t grizdnc : 1; /**< [ 0: 0](R/W) Gen3 receiver impedance ZRX-DC not compliant. */
+ uint32_t reserved_1_7 : 7;
+ uint32_t dsg3 : 1; /**< [ 8: 8](R/W) Disable scrambler for Gen3 data rate. The Gen3 scrambler/descrambler within the core needs
+ to be disabled when the scrambling function is implemented outside of the core (within the
+ PHY). */
+ uint32_t ep2p3d : 1; /**< [ 9: 9](R/W) Equalization phase 2 and phase 3 disable. This applies to downstream ports only. */
+ uint32_t ecrd : 1; /**< [ 10: 10](R/W) Equalization EIEOS count reset disable. Disable requesting reset of EIEOS count during
+ equalization. */
+ uint32_t erd : 1; /**< [ 11: 11](R/W) Equalization redo disable. Disable requesting reset of EIEOS count during equalization. */
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_13_15 : 3;
+ uint32_t ed : 1; /**< [ 16: 16](R/W) Equalization disable. Disable equalization feature. */
+ uint32_t dtdd : 1; /**< [ 17: 17](R/W) DLLP transmission delay disable. Disable delay transmission of DLLPs before equalization. */
+ uint32_t dcbd : 1; /**< [ 18: 18](R/W) Disable balance disable. Disable DC balance feature. */
+ uint32_t reserved_19_22 : 4;
+ uint32_t eiedd : 1; /**< [ 23: 23](R/W) Eq InvalidRequest and RxEqEval Different Time Assertion Disable. Disable the assertion of
+ Eq InvalidRequest and RxEqEval at different time. */
+ uint32_t rss : 2; /**< [ 25: 24](RO) Data rate for shadow register. Hard-wired for Gen3. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg548_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_19_31 : 13;
+ uint32_t dcbd : 1; /**< [ 18: 18](R/W) Disable balance disable. Disable DC balance feature. */
+ uint32_t dtdd : 1; /**< [ 17: 17](R/W) DLLP transmission delay disable. Disable delay transmission of DLLPs before equalization. */
+ uint32_t ed : 1; /**< [ 16: 16](R/W) Equalization disable. Disable equalization feature. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t reserved_12 : 1;
+ uint32_t erd : 1; /**< [ 11: 11](R/W) Equalization redo disable. Disable requesting reset of EIEOS count during equalization. */
+ uint32_t ecrd : 1; /**< [ 10: 10](R/W) Equalization EIEOS count reset disable. Disable requesting reset of EIEOS count during
+ equalization. */
+ uint32_t ep2p3d : 1; /**< [ 9: 9](R/W) Equalization phase 2 and phase 3 disable. This applies to downstream ports only. */
+ uint32_t dsg3 : 1; /**< [ 8: 8](R/W) Disable scrambler for Gen3 data rate. The Gen3 scrambler/descrambler within the core needs
+ to be disabled when the scrambling function is implemented outside of the core (within the
+ PHY). */
+ uint32_t reserved_1_7 : 7;
+ uint32_t grizdnc : 1; /**< [ 0: 0](R/W) Gen3 receiver impedance ZRX-DC not compliant. */
+#else /* Word 0 - Little Endian */
+ uint32_t grizdnc : 1; /**< [ 0: 0](R/W) Gen3 receiver impedance ZRX-DC not compliant. */
+ uint32_t reserved_1_7 : 7;
+ uint32_t dsg3 : 1; /**< [ 8: 8](R/W) Disable scrambler for Gen3 data rate. The Gen3 scrambler/descrambler within the core needs
+ to be disabled when the scrambling function is implemented outside of the core (within the
+ PHY). */
+ uint32_t ep2p3d : 1; /**< [ 9: 9](R/W) Equalization phase 2 and phase 3 disable. This applies to downstream ports only. */
+ uint32_t ecrd : 1; /**< [ 10: 10](R/W) Equalization EIEOS count reset disable. Disable requesting reset of EIEOS count during
+ equalization. */
+ uint32_t erd : 1; /**< [ 11: 11](R/W) Equalization redo disable. Disable requesting reset of EIEOS count during equalization. */
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_13_15 : 3;
+ uint32_t ed : 1; /**< [ 16: 16](R/W) Equalization disable. Disable equalization feature. */
+ uint32_t dtdd : 1; /**< [ 17: 17](R/W) DLLP transmission delay disable. Disable delay transmission of DLLPs before equalization. */
+ uint32_t dcbd : 1; /**< [ 18: 18](R/W) Disable balance disable. Disable DC balance feature. */
+ uint32_t reserved_19_31 : 13;
+#endif /* Word 0 - End */
+ } cn88xx;
+ /* struct bdk_pciercx_cfg548_s cn83xx; */
+};
+typedef union bdk_pciercx_cfg548 bdk_pciercx_cfg548_t;
+
+static inline uint64_t BDK_PCIERCX_CFG548(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG548(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x20000000890ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x20000000890ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x20000000890ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG548", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG548(a) bdk_pciercx_cfg548_t
+#define bustype_BDK_PCIERCX_CFG548(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG548(a) "PCIERCX_CFG548"
+#define busnum_BDK_PCIERCX_CFG548(a) (a)
+#define arguments_BDK_PCIERCX_CFG548(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg554
+ *
+ * PCIe RC Gen3 EQ Control Register
+ * This register contains the five hundred fifty-fifth 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg554
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg554_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t scefpm : 1; /**< [ 26: 26](R/W) Request core to send back-to-back EIEOS in Recovery.RcvrLock state until
+ presets to coefficient mapping is complete. */
+ uint32_t reserved_25 : 1;
+ uint32_t iif : 1; /**< [ 24: 24](R/W) Include initial FOM. Include, or not, the FOM feedback from the initial preset evaluation
+ performed in the EQ master, when finding the highest FOM among all preset evaluations. */
+ uint32_t prv : 16; /**< [ 23: 8](R/W) Preset request vector. Requesting of presets during the initial part
+ of the EQ master
+ phase. Encoding scheme as follows:
+
+ Bit [15:0] = 0x0: No preset is requested and evaluated in the EQ master phase.
+
+ Bit [i] = 1: Preset=i is requested and evaluated in the EQ master phase.
+
+ _ 0b0000000000000000 = No preset req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxxx1 = Preset 0 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxx1x = Preset 1 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxx1xx = Preset 2 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxx1xxx = Preset 3 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxx1xxxx = Preset 4 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxx1xxxxx = Preset 5 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxx1xxxxxx = Preset 6 req/evaluated in EQ master phase.
+
+ _ 0b00000xxx1xxxxxxx = Preset 7 req/evaluated in EQ master phase.
+
+ _ 0b00000xx1xxxxxxxx = Preset 8 req/evaluated in EQ master phase.
+
+ _ 0b00000x1xxxxxxxxx = Preset 9 req/evaluated in EQ master phase.
+
+ _ 0b000001xxxxxxxxxx = Preset 10 req/evaluated in EQ master phase.
+
+ _ All other encodings = Reserved. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t p23td : 1; /**< [ 5: 5](R/W) Phase2_3 2 ms timeout disable. Determine behavior in Phase2 for USP (Phase3 if DSP) when
+ the PHY does not respond within 2 ms to the assertion of RxEqEval:
+ 0 = Abort the current evaluation; stop any attempt to modify the remote transmitter
+ settings. Phase2 will be terminated by the 24 ms timeout.
+ 1 = Ignore the 2 ms timeout and continue as normal. This is used to support PHYs that
+ require more than 2 ms to respond to the assertion of RxEqEval. */
+ uint32_t bt : 1; /**< [ 4: 4](R/W) Behavior after 24 ms timeout (when optimal settings are not found).
+
+ For a USP: determine the next LTSSM state from Phase2:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.Phase3.
+
+ For a DSP: determine the next LTSSM state from Phase3:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.RcrLock.
+
+ When optimal settings are not found:
+ * Equalization phase 3 successful status bit is not set in the link status register.
+ * Equalization phase 3 complete status bit is set in the link status register. */
+ uint32_t fm : 4; /**< [ 3: 0](R/W) Feedback mode.
+ 0 = Direction of change (not supported).
+ 1 = Figure of merit.
+ 2-15 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t fm : 4; /**< [ 3: 0](R/W) Feedback mode.
+ 0 = Direction of change (not supported).
+ 1 = Figure of merit.
+ 2-15 = Reserved. */
+ uint32_t bt : 1; /**< [ 4: 4](R/W) Behavior after 24 ms timeout (when optimal settings are not found).
+
+ For a USP: determine the next LTSSM state from Phase2:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.Phase3.
+
+ For a DSP: determine the next LTSSM state from Phase3:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.RcrLock.
+
+ When optimal settings are not found:
+ * Equalization phase 3 successful status bit is not set in the link status register.
+ * Equalization phase 3 complete status bit is set in the link status register. */
+ uint32_t p23td : 1; /**< [ 5: 5](R/W) Phase2_3 2 ms timeout disable. Determine behavior in Phase2 for USP (Phase3 if DSP) when
+ the PHY does not respond within 2 ms to the assertion of RxEqEval:
+ 0 = Abort the current evaluation; stop any attempt to modify the remote transmitter
+ settings. Phase2 will be terminated by the 24 ms timeout.
+ 1 = Ignore the 2 ms timeout and continue as normal. This is used to support PHYs that
+ require more than 2 ms to respond to the assertion of RxEqEval. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t prv : 16; /**< [ 23: 8](R/W) Preset request vector. Requesting of presets during the initial part
+ of the EQ master
+ phase. Encoding scheme as follows:
+
+ Bit [15:0] = 0x0: No preset is requested and evaluated in the EQ master phase.
+
+ Bit [i] = 1: Preset=i is requested and evaluated in the EQ master phase.
+
+ _ 0b0000000000000000 = No preset req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxxx1 = Preset 0 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxx1x = Preset 1 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxx1xx = Preset 2 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxx1xxx = Preset 3 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxx1xxxx = Preset 4 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxx1xxxxx = Preset 5 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxx1xxxxxx = Preset 6 req/evaluated in EQ master phase.
+
+ _ 0b00000xxx1xxxxxxx = Preset 7 req/evaluated in EQ master phase.
+
+ _ 0b00000xx1xxxxxxxx = Preset 8 req/evaluated in EQ master phase.
+
+ _ 0b00000x1xxxxxxxxx = Preset 9 req/evaluated in EQ master phase.
+
+ _ 0b000001xxxxxxxxxx = Preset 10 req/evaluated in EQ master phase.
+
+ _ All other encodings = Reserved. */
+ uint32_t iif : 1; /**< [ 24: 24](R/W) Include initial FOM. Include, or not, the FOM feedback from the initial preset evaluation
+ performed in the EQ master, when finding the highest FOM among all preset evaluations. */
+ uint32_t reserved_25 : 1;
+ uint32_t scefpm : 1; /**< [ 26: 26](R/W) Request core to send back-to-back EIEOS in Recovery.RcvrLock state until
+ presets to coefficient mapping is complete. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg554_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t reserved_25 : 1;
+ uint32_t iif : 1; /**< [ 24: 24](R/W) Include initial FOM. Include, or not, the FOM feedback from the initial preset evaluation
+ performed in the EQ master, when finding the highest FOM among all preset evaluations. */
+ uint32_t prv : 16; /**< [ 23: 8](R/W) Preset request vector. Requesting of presets during the initial part of the EQ master
+ phase. Encoding scheme as follows:
+
+ Bit [15:0] = 0x0: No preset is requested and evaluated in the EQ master phase.
+
+ Bit [i] = 1: Preset=i is requested and evaluated in the EQ master phase.
+
+ _ 0000000000000000: No preset req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxxxx1: Preset 0 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxxx1x: Preset 1 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxx1xx: Preset 2 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxx1xxx: Preset 3 req/evaluated in EQ master phase
+
+ _ 00000xxxxxx1xxxx: Preset 4 req/evaluated in EQ master phase
+
+ _ 00000xxxxx1xxxxx: Preset 5 req/evaluated in EQ master phase
+
+ _ 00000xxxx1xxxxxx: Preset 6 req/evaluated in EQ master phase
+
+ _ 00000xxx1xxxxxxx: Preset 7 req/evaluated in EQ master phase
+
+ _ 00000xx1xxxxxxxx: Preset 8 req/evaluated in EQ master phase
+
+ _ 00000x1xxxxxxxxx: Preset 9 req/evaluated in EQ master phase
+
+ _ 000001xxxxxxxxxx: Preset 10 req/evaluated in EQ master phase
+
+ _ All other encodings: Reserved */
+ uint32_t reserved_6_7 : 2;
+ uint32_t p23td : 1; /**< [ 5: 5](R/W) Phase2_3 2 ms timeout disable. Determine behavior in Phase2 for USP (Phase3 if DSP) when
+ the PHY does not respond within 2 ms to the assertion of RxEqEval:
+ 0 = Abort the current evaluation; stop any attempt to modify the remote transmitter
+ settings. Phase2 will be terminated by the 24 ms timeout.
+ 1 = Ignore the 2 ms timeout and continue as normal. This is used to support PHYs that
+ require more than 2 ms to respond to the assertion of RxEqEval. */
+ uint32_t bt : 1; /**< [ 4: 4](R/W) Behavior after 24 ms timeout (when optimal settings are not found).
+
+ For a USP: determine the next LTSSM state from Phase2:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.Phase3.
+
+ For a DSP: determine the next LTSSM state from Phase3:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.RcrLock.
+
+ When optimal settings are not found:
+ * Equalization phase 3 successful status bit is not set in the link status register.
+ * Equalization phase 3 complete status bit is set in the link status register. */
+ uint32_t fm : 4; /**< [ 3: 0](R/W) Feedback mode.
+ 0 = Direction of change (not supported).
+ 1 = Figure of merit.
+ 2-15 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t fm : 4; /**< [ 3: 0](R/W) Feedback mode.
+ 0 = Direction of change (not supported).
+ 1 = Figure of merit.
+ 2-15 = Reserved. */
+ uint32_t bt : 1; /**< [ 4: 4](R/W) Behavior after 24 ms timeout (when optimal settings are not found).
+
+ For a USP: determine the next LTSSM state from Phase2:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.Phase3.
+
+ For a DSP: determine the next LTSSM state from Phase3:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.RcrLock.
+
+ When optimal settings are not found:
+ * Equalization phase 3 successful status bit is not set in the link status register.
+ * Equalization phase 3 complete status bit is set in the link status register. */
+ uint32_t p23td : 1; /**< [ 5: 5](R/W) Phase2_3 2 ms timeout disable. Determine behavior in Phase2 for USP (Phase3 if DSP) when
+ the PHY does not respond within 2 ms to the assertion of RxEqEval:
+ 0 = Abort the current evaluation; stop any attempt to modify the remote transmitter
+ settings. Phase2 will be terminated by the 24 ms timeout.
+ 1 = Ignore the 2 ms timeout and continue as normal. This is used to support PHYs that
+ require more than 2 ms to respond to the assertion of RxEqEval. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t prv : 16; /**< [ 23: 8](R/W) Preset request vector. Requesting of presets during the initial part of the EQ master
+ phase. Encoding scheme as follows:
+
+ Bit [15:0] = 0x0: No preset is requested and evaluated in the EQ master phase.
+
+ Bit [i] = 1: Preset=i is requested and evaluated in the EQ master phase.
+
+ _ 0000000000000000: No preset req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxxxx1: Preset 0 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxxx1x: Preset 1 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxx1xx: Preset 2 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxx1xxx: Preset 3 req/evaluated in EQ master phase
+
+ _ 00000xxxxxx1xxxx: Preset 4 req/evaluated in EQ master phase
+
+ _ 00000xxxxx1xxxxx: Preset 5 req/evaluated in EQ master phase
+
+ _ 00000xxxx1xxxxxx: Preset 6 req/evaluated in EQ master phase
+
+ _ 00000xxx1xxxxxxx: Preset 7 req/evaluated in EQ master phase
+
+ _ 00000xx1xxxxxxxx: Preset 8 req/evaluated in EQ master phase
+
+ _ 00000x1xxxxxxxxx: Preset 9 req/evaluated in EQ master phase
+
+ _ 000001xxxxxxxxxx: Preset 10 req/evaluated in EQ master phase
+
+ _ All other encodings: Reserved */
+ uint32_t iif : 1; /**< [ 24: 24](R/W) Include initial FOM. Include, or not, the FOM feedback from the initial preset evaluation
+ performed in the EQ master, when finding the highest FOM among all preset evaluations. */
+ uint32_t reserved_25 : 1;
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_pciercx_cfg554_s cn81xx; */
+ struct bdk_pciercx_cfg554_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t scefpm : 1; /**< [ 26: 26](R/W) Request core to send back-to-back EIEOS in Recovery.RcvrLock state until
+ presets to coefficient mapping is complete. */
+ uint32_t reserved_25 : 1;
+ uint32_t iif : 1; /**< [ 24: 24](R/W) Include initial FOM. Include, or not, the FOM feedback from the initial preset evaluation
+ performed in the EQ master, when finding the highest FOM among all preset evaluations. */
+ uint32_t prv : 16; /**< [ 23: 8](R/W) Preset request vector. Requesting of presets during the initial part of the EQ master
+ phase. Encoding scheme as follows:
+
+ Bit [15:0] = 0x0: No preset is requested and evaluated in the EQ master phase.
+
+ Bit [i] = 1: Preset=i is requested and evaluated in the EQ master phase.
+
+ _ 0b0000000000000000 = No preset req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxxx1 = Preset 0 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxx1x = Preset 1 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxx1xx = Preset 2 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxx1xxx = Preset 3 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxx1xxxx = Preset 4 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxx1xxxxx = Preset 5 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxx1xxxxxx = Preset 6 req/evaluated in EQ master phase.
+
+ _ 0b00000xxx1xxxxxxx = Preset 7 req/evaluated in EQ master phase.
+
+ _ 0b00000xx1xxxxxxxx = Preset 8 req/evaluated in EQ master phase.
+
+ _ 0b00000x1xxxxxxxxx = Preset 9 req/evaluated in EQ master phase.
+
+ _ 0b000001xxxxxxxxxx = Preset 10 req/evaluated in EQ master phase.
+
+ _ All other encodings = Reserved. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t p23td : 1; /**< [ 5: 5](R/W) Phase2_3 2 ms timeout disable. Determine behavior in Phase2 for USP (Phase3 if DSP) when
+ the PHY does not respond within 2 ms to the assertion of RxEqEval:
+ 0 = Abort the current evaluation; stop any attempt to modify the remote transmitter
+ settings. Phase2 will be terminated by the 24 ms timeout.
+ 1 = Ignore the 2 ms timeout and continue as normal. This is used to support PHYs that
+ require more than 2 ms to respond to the assertion of RxEqEval. */
+ uint32_t bt : 1; /**< [ 4: 4](R/W) Behavior after 24 ms timeout (when optimal settings are not found).
+
+ For a USP: determine the next LTSSM state from Phase2:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.Phase3.
+
+ For a DSP: determine the next LTSSM state from Phase3:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.RcrLock.
+
+ When optimal settings are not found:
+ * Equalization phase 3 successful status bit is not set in the link status register.
+ * Equalization phase 3 complete status bit is set in the link status register. */
+ uint32_t fm : 4; /**< [ 3: 0](R/W) Feedback mode.
+ 0 = Direction of change (not supported).
+ 1 = Figure of merit.
+ 2-15 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t fm : 4; /**< [ 3: 0](R/W) Feedback mode.
+ 0 = Direction of change (not supported).
+ 1 = Figure of merit.
+ 2-15 = Reserved. */
+ uint32_t bt : 1; /**< [ 4: 4](R/W) Behavior after 24 ms timeout (when optimal settings are not found).
+
+ For a USP: determine the next LTSSM state from Phase2:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.Phase3.
+
+ For a DSP: determine the next LTSSM state from Phase3:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.RcrLock.
+
+ When optimal settings are not found:
+ * Equalization phase 3 successful status bit is not set in the link status register.
+ * Equalization phase 3 complete status bit is set in the link status register. */
+ uint32_t p23td : 1; /**< [ 5: 5](R/W) Phase2_3 2 ms timeout disable. Determine behavior in Phase2 for USP (Phase3 if DSP) when
+ the PHY does not respond within 2 ms to the assertion of RxEqEval:
+ 0 = Abort the current evaluation; stop any attempt to modify the remote transmitter
+ settings. Phase2 will be terminated by the 24 ms timeout.
+ 1 = Ignore the 2 ms timeout and continue as normal. This is used to support PHYs that
+ require more than 2 ms to respond to the assertion of RxEqEval. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t prv : 16; /**< [ 23: 8](R/W) Preset request vector. Requesting of presets during the initial part of the EQ master
+ phase. Encoding scheme as follows:
+
+ Bit [15:0] = 0x0: No preset is requested and evaluated in the EQ master phase.
+
+ Bit [i] = 1: Preset=i is requested and evaluated in the EQ master phase.
+
+ _ 0b0000000000000000 = No preset req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxxx1 = Preset 0 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxx1x = Preset 1 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxx1xx = Preset 2 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxx1xxx = Preset 3 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxx1xxxx = Preset 4 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxx1xxxxx = Preset 5 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxx1xxxxxx = Preset 6 req/evaluated in EQ master phase.
+
+ _ 0b00000xxx1xxxxxxx = Preset 7 req/evaluated in EQ master phase.
+
+ _ 0b00000xx1xxxxxxxx = Preset 8 req/evaluated in EQ master phase.
+
+ _ 0b00000x1xxxxxxxxx = Preset 9 req/evaluated in EQ master phase.
+
+ _ 0b000001xxxxxxxxxx = Preset 10 req/evaluated in EQ master phase.
+
+ _ All other encodings = Reserved. */
+ uint32_t iif : 1; /**< [ 24: 24](R/W) Include initial FOM. Include, or not, the FOM feedback from the initial preset evaluation
+ performed in the EQ master, when finding the highest FOM among all preset evaluations. */
+ uint32_t reserved_25 : 1;
+ uint32_t scefpm : 1; /**< [ 26: 26](R/W) Request core to send back-to-back EIEOS in Recovery.RcvrLock state until
+ presets to coefficient mapping is complete. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_pciercx_cfg554_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t reserved_25 : 1;
+ uint32_t iif : 1; /**< [ 24: 24](R/W) Include initial FOM. Include, or not, the FOM feedback from the initial preset evaluation
+ performed in the EQ master, when finding the highest FOM among all preset evaluations. */
+ uint32_t prv : 16; /**< [ 23: 8](R/W) Preset request vector. Requesting of presets during the initial part
+ of the EQ master
+ phase. Encoding scheme as follows:
+
+ Bit [15:0] = 0x0: No preset is requested and evaluated in the EQ master phase.
+
+ Bit [i] = 1: Preset=i is requested and evaluated in the EQ master phase.
+
+ _ 0000000000000000: No preset req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxxxx1: Preset 0 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxxx1x: Preset 1 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxx1xx: Preset 2 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxx1xxx: Preset 3 req/evaluated in EQ master phase
+
+ _ 00000xxxxxx1xxxx: Preset 4 req/evaluated in EQ master phase
+
+ _ 00000xxxxx1xxxxx: Preset 5 req/evaluated in EQ master phase
+
+ _ 00000xxxx1xxxxxx: Preset 6 req/evaluated in EQ master phase
+
+ _ 00000xxx1xxxxxxx: Preset 7 req/evaluated in EQ master phase
+
+ _ 00000xx1xxxxxxxx: Preset 8 req/evaluated in EQ master phase
+
+ _ 00000x1xxxxxxxxx: Preset 9 req/evaluated in EQ master phase
+
+ _ 000001xxxxxxxxxx: Preset 10 req/evaluated in EQ master phase
+
+ _ All other encodings: Reserved */
+ uint32_t reserved_6_7 : 2;
+ uint32_t p23td : 1; /**< [ 5: 5](R/W) Phase2_3 2 ms timeout disable. Determine behavior in Phase2 for USP (Phase3 if DSP) when
+ the PHY does not respond within 2 ms to the assertion of RxEqEval:
+ 0 = Abort the current evaluation; stop any attempt to modify the remote transmitter
+ settings. Phase2 will be terminated by the 24 ms timeout.
+ 1 = Ignore the 2 ms timeout and continue as normal. This is used to support PHYs that
+ require more than 2 ms to respond to the assertion of RxEqEval. */
+ uint32_t bt : 1; /**< [ 4: 4](R/W) Behavior after 24 ms timeout (when optimal settings are not found).
+
+ For a USP: determine the next LTSSM state from Phase2:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.Phase3.
+
+ For a DSP: determine the next LTSSM state from Phase3:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.RcrLock.
+
+ When optimal settings are not found:
+ * Equalization phase 3 successful status bit is not set in the link status register.
+ * Equalization phase 3 complete status bit is set in the link status register. */
+ uint32_t fm : 4; /**< [ 3: 0](R/W) Feedback mode.
+ 0 = Direction of change (not supported).
+ 1 = Figure of merit.
+ 2-15 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t fm : 4; /**< [ 3: 0](R/W) Feedback mode.
+ 0 = Direction of change (not supported).
+ 1 = Figure of merit.
+ 2-15 = Reserved. */
+ uint32_t bt : 1; /**< [ 4: 4](R/W) Behavior after 24 ms timeout (when optimal settings are not found).
+
+ For a USP: determine the next LTSSM state from Phase2:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.Phase3.
+
+ For a DSP: determine the next LTSSM state from Phase3:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.RcrLock.
+
+ When optimal settings are not found:
+ * Equalization phase 3 successful status bit is not set in the link status register.
+ * Equalization phase 3 complete status bit is set in the link status register. */
+ uint32_t p23td : 1; /**< [ 5: 5](R/W) Phase2_3 2 ms timeout disable. Determine behavior in Phase2 for USP (Phase3 if DSP) when
+ the PHY does not respond within 2 ms to the assertion of RxEqEval:
+ 0 = Abort the current evaluation; stop any attempt to modify the remote transmitter
+ settings. Phase2 will be terminated by the 24 ms timeout.
+ 1 = Ignore the 2 ms timeout and continue as normal. This is used to support PHYs that
+ require more than 2 ms to respond to the assertion of RxEqEval. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t prv : 16; /**< [ 23: 8](R/W) Preset request vector. Requesting of presets during the initial part
+ of the EQ master
+ phase. Encoding scheme as follows:
+
+ Bit [15:0] = 0x0: No preset is requested and evaluated in the EQ master phase.
+
+ Bit [i] = 1: Preset=i is requested and evaluated in the EQ master phase.
+
+ _ 0000000000000000: No preset req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxxxx1: Preset 0 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxxx1x: Preset 1 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxxx1xx: Preset 2 req/evaluated in EQ master phase
+
+ _ 00000xxxxxxx1xxx: Preset 3 req/evaluated in EQ master phase
+
+ _ 00000xxxxxx1xxxx: Preset 4 req/evaluated in EQ master phase
+
+ _ 00000xxxxx1xxxxx: Preset 5 req/evaluated in EQ master phase
+
+ _ 00000xxxx1xxxxxx: Preset 6 req/evaluated in EQ master phase
+
+ _ 00000xxx1xxxxxxx: Preset 7 req/evaluated in EQ master phase
+
+ _ 00000xx1xxxxxxxx: Preset 8 req/evaluated in EQ master phase
+
+ _ 00000x1xxxxxxxxx: Preset 9 req/evaluated in EQ master phase
+
+ _ 000001xxxxxxxxxx: Preset 10 req/evaluated in EQ master phase
+
+ _ All other encodings: Reserved */
+ uint32_t iif : 1; /**< [ 24: 24](R/W) Include initial FOM. Include, or not, the FOM feedback from the initial preset evaluation
+ performed in the EQ master, when finding the highest FOM among all preset evaluations. */
+ uint32_t reserved_25 : 1;
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_pciercx_cfg554 bdk_pciercx_cfg554_t;
+
+static inline uint64_t BDK_PCIERCX_CFG554(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG554(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000008a8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000008a8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x200000008a8ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG554", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG554(a) bdk_pciercx_cfg554_t
+#define bustype_BDK_PCIERCX_CFG554(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG554(a) "PCIERCX_CFG554"
+#define busnum_BDK_PCIERCX_CFG554(a) (a)
+#define arguments_BDK_PCIERCX_CFG554(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg558
+ *
+ * PCIe RC Gen3 PIPE Loopback Register
+ * This register contains the five hundred fifty-ninth 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg558
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg558_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ple : 1; /**< [ 31: 31](R/W) Pipe loopback enable. */
+ uint32_t reserved_0_30 : 31;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_30 : 31;
+ uint32_t ple : 1; /**< [ 31: 31](R/W) Pipe loopback enable. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg558_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ple : 1; /**< [ 31: 31](R/W) Pipe loopback enable. */
+ uint32_t rxstatus : 31; /**< [ 30: 0](RO/H) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t rxstatus : 31; /**< [ 30: 0](RO/H) Reserved. */
+ uint32_t ple : 1; /**< [ 31: 31](R/W) Pipe loopback enable. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_pciercx_cfg558_cn81xx cn88xx; */
+ struct bdk_pciercx_cfg558_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ple : 1; /**< [ 31: 31](R/W) Pipe loopback enable. */
+ uint32_t reserved_16_30 : 15;
+ uint32_t lpbk_rxvalid : 16; /**< [ 15: 0](R/W) Loopback rxvalid (lane enable - 1 bit per lane) */
+#else /* Word 0 - Little Endian */
+ uint32_t lpbk_rxvalid : 16; /**< [ 15: 0](R/W) Loopback rxvalid (lane enable - 1 bit per lane) */
+ uint32_t reserved_16_30 : 15;
+ uint32_t ple : 1; /**< [ 31: 31](R/W) Pipe loopback enable. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg558 bdk_pciercx_cfg558_t;
+
+static inline uint64_t BDK_PCIERCX_CFG558(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG558(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000008b8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000008b8ll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=5))
+ return 0x200000008b8ll + 0x100000000ll * ((a) & 0x7);
+ __bdk_csr_fatal("PCIERCX_CFG558", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG558(a) bdk_pciercx_cfg558_t
+#define bustype_BDK_PCIERCX_CFG558(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG558(a) "PCIERCX_CFG558"
+#define busnum_BDK_PCIERCX_CFG558(a) (a)
+#define arguments_BDK_PCIERCX_CFG558(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg559
+ *
+ * PCIe RC Miscellaneous Control 1 Register
+ * This register contains the five hundred sixtieth 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg559
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg559_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t simp_replay_timer : 1; /**< [ 3: 3](R/W) Not Supported. */
+ uint32_t ur_c4_mask_4_trgt1 : 1; /**< [ 2: 2](R/W) This field only applies to request TLPs (with UR filtering status) that are
+ chosen to forward to the application (when [DEFAULT_TARGET] is set).
+
+ When set, the core suppresses error logging, error message generation, and CPL
+ generation (for non-posted requests). */
+ uint32_t def_target : 1; /**< [ 1: 1](R/W) Default target a received IO or MEM request with UR/CA/CRS
+ is sent to be the controller.
+ 0x0 = The controller drops all incoming I/O or Mem (after
+ corresponding error reporting). A completion with
+ UR status will be generated for non-posted requests.
+ 0x1 = The controller forwards all incoming I/O or MEM
+ requests with UR/CA/CRS status to your application. */
+ uint32_t dbi_ro_wr_en : 1; /**< [ 0: 0](R/W) Write to RO registers using DBI. This bit should not be cleared in normal operation. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbi_ro_wr_en : 1; /**< [ 0: 0](R/W) Write to RO registers using DBI. This bit should not be cleared in normal operation. */
+ uint32_t def_target : 1; /**< [ 1: 1](R/W) Default target a received IO or MEM request with UR/CA/CRS
+ is sent to be the controller.
+ 0x0 = The controller drops all incoming I/O or Mem (after
+ corresponding error reporting). A completion with
+ UR status will be generated for non-posted requests.
+ 0x1 = The controller forwards all incoming I/O or MEM
+ requests with UR/CA/CRS status to your application. */
+ uint32_t ur_c4_mask_4_trgt1 : 1; /**< [ 2: 2](R/W) This field only applies to request TLPs (with UR filtering status) that are
+ chosen to forward to the application (when [DEFAULT_TARGET] is set).
+
+ When set, the core suppresses error logging, error message generation, and CPL
+ generation (for non-posted requests). */
+ uint32_t simp_replay_timer : 1; /**< [ 3: 3](R/W) Not Supported. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_cfg559_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t dbi_ro_wr_en : 1; /**< [ 0: 0](R/W) Write to RO registers using DBI. This bit should not be cleared in normal operation. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbi_ro_wr_en : 1; /**< [ 0: 0](R/W) Write to RO registers using DBI. This bit should not be cleared in normal operation. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_pciercx_cfg559_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_4_31 : 28;
+ uint32_t simp_replay_timer : 1; /**< [ 3: 3](R/W) Not Supported. */
+ uint32_t ur_c4_mask_4_trgt1 : 1; /**< [ 2: 2](R/W) This field only applies to request TLPs (with UR filtering status) that are
+ chosen to forward to the application (when [DEFAULT_TARGET] is set).
+
+ When set, the core suppresses error logging, error message generation, and CPL
+ generation (for non-posted requests). */
+ uint32_t def_target : 1; /**< [ 1: 1](R/W) Default target a received IO or MEM request with UR/CA/CRS
+ is sent to be the controller.
+ 0x0 = The controller drops all incoming I/O or Mem (after
+ corresponding error reporting). A completion with
+ UR status will be generated for non-posted requests.
+ 0x1 = The controller forwards all incoming I/O or MEM
+ requests with UR/CA/CRS status to your application. */
+ uint32_t dbi_ro_wr_en : 1; /**< [ 0: 0](R/W) Write to RO registers using DBI. When you set this bit, then some
+ RO bits are writable from the DBI. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbi_ro_wr_en : 1; /**< [ 0: 0](R/W) Write to RO registers using DBI. When you set this bit, then some
+ RO bits are writable from the DBI. */
+ uint32_t def_target : 1; /**< [ 1: 1](R/W) Default target a received IO or MEM request with UR/CA/CRS
+ is sent to be the controller.
+ 0x0 = The controller drops all incoming I/O or Mem (after
+ corresponding error reporting). A completion with
+ UR status will be generated for non-posted requests.
+ 0x1 = The controller forwards all incoming I/O or MEM
+ requests with UR/CA/CRS status to your application. */
+ uint32_t ur_c4_mask_4_trgt1 : 1; /**< [ 2: 2](R/W) This field only applies to request TLPs (with UR filtering status) that are
+ chosen to forward to the application (when [DEFAULT_TARGET] is set).
+
+ When set, the core suppresses error logging, error message generation, and CPL
+ generation (for non-posted requests). */
+ uint32_t simp_replay_timer : 1; /**< [ 3: 3](R/W) Not Supported. */
+ uint32_t reserved_4_31 : 28;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_pciercx_cfg559 bdk_pciercx_cfg559_t;
+
+static inline uint64_t BDK_PCIERCX_CFG559(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG559(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000008bcll + 0x100000000ll * ((a) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000008bcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG559", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG559(a) bdk_pciercx_cfg559_t
+#define bustype_BDK_PCIERCX_CFG559(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG559(a) "PCIERCX_CFG559"
+#define busnum_BDK_PCIERCX_CFG559(a) (a)
+#define arguments_BDK_PCIERCX_CFG559(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg560
+ *
+ * PCIe RC UpConfigure Multi-lane Control Register
+ * This register contains the five hundred sixty-first 32-bits of PCIe type 0 configuration space.
+ */
+union bdk_pciercx_cfg560
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg560_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t upc_supp : 1; /**< [ 7: 7](R/W) Upconfigure support.
+ The core sends this value to the link upconfigure capability in TS2 ordered
+ sets in Configuration.Complete state. */
+ uint32_t dir_lnk_wdth_chg : 1; /**< [ 6: 6](R/W/H) Directed link width change.
+ The core always moves to configuration state through recovery state
+ when this bit is set.
+
+ If PCIERC()_CFG144[LTSSM_VAR] is set and PCIERC()_CFG040[HASD]
+ is 0, the core starts upconfigure or autonomous width
+ downsizing (to the TRGT_LNK_WDTH value) in the configuration
+ state.
+
+ If TRGT_LNK_WDTH value is 0x0, the core does not
+ start upconfigure or autonomous width downsizing in the
+ configuration state.
+
+ The core self-clears this field when the core accepts this
+ request. */
+ uint32_t trgt_lnk_wdth : 6; /**< [ 5: 0](R/W/H) Target link width.
+ 0x0 = Core does not start upconfigure or autonomous width downsizing in configuration
+ state.
+ 0x1 = x1.
+ 0x2 = x2.
+ 0x4 = x4.
+ 0x8 = x8.
+ 0x10 = x16 (Not Supported).
+ 0x20 = x32 (Not Supported). */
+#else /* Word 0 - Little Endian */
+ uint32_t trgt_lnk_wdth : 6; /**< [ 5: 0](R/W/H) Target link width.
+ 0x0 = Core does not start upconfigure or autonomous width downsizing in configuration
+ state.
+ 0x1 = x1.
+ 0x2 = x2.
+ 0x4 = x4.
+ 0x8 = x8.
+ 0x10 = x16 (Not Supported).
+ 0x20 = x32 (Not Supported). */
+ uint32_t dir_lnk_wdth_chg : 1; /**< [ 6: 6](R/W/H) Directed link width change.
+ The core always moves to configuration state through recovery state
+ when this bit is set.
+
+ If PCIERC()_CFG144[LTSSM_VAR] is set and PCIERC()_CFG040[HASD]
+ is 0, the core starts upconfigure or autonomous width
+ downsizing (to the TRGT_LNK_WDTH value) in the configuration
+ state.
+
+ If TRGT_LNK_WDTH value is 0x0, the core does not
+ start upconfigure or autonomous width downsizing in the
+ configuration state.
+
+ The core self-clears this field when the core accepts this
+ request. */
+ uint32_t upc_supp : 1; /**< [ 7: 7](R/W) Upconfigure support.
+ The core sends this value to the link upconfigure capability in TS2 ordered
+ sets in Configuration.Complete state. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg560_s cn; */
+};
+typedef union bdk_pciercx_cfg560 bdk_pciercx_cfg560_t;
+
+static inline uint64_t BDK_PCIERCX_CFG560(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG560(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=3))
+ return 0x200000008c0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG560", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG560(a) bdk_pciercx_cfg560_t
+#define bustype_BDK_PCIERCX_CFG560(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG560(a) "PCIERCX_CFG560"
+#define busnum_BDK_PCIERCX_CFG560(a) (a)
+#define arguments_BDK_PCIERCX_CFG560(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg568
+ *
+ * PCIe RC ACE Cache Coherency Control 1 Register
+ * This register contains the five hundred sixty-eigth 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg568
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg568_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cfg_memtype_boundary_l_addr : 30;/**< [ 31: 2](R/W) Reserved. */
+ uint32_t reserved_1 : 1;
+ uint32_t cfg_memtype_value : 1; /**< [ 0: 0](R/W) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t cfg_memtype_value : 1; /**< [ 0: 0](R/W) Reserved. */
+ uint32_t reserved_1 : 1;
+ uint32_t cfg_memtype_boundary_l_addr : 30;/**< [ 31: 2](R/W) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg568_s cn; */
+};
+typedef union bdk_pciercx_cfg568 bdk_pciercx_cfg568_t;
+
+static inline uint64_t BDK_PCIERCX_CFG568(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG568(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000008e0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG568", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG568(a) bdk_pciercx_cfg568_t
+#define bustype_BDK_PCIERCX_CFG568(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG568(a) "PCIERCX_CFG568"
+#define busnum_BDK_PCIERCX_CFG568(a) (a)
+#define arguments_BDK_PCIERCX_CFG568(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cfg569
+ *
+ * PCIe RC ACE Cache Coherency Control 2 Register
+ * This register contains the five hundred sixty-eigth 32-bits of type 0 PCIe configuration space.
+ */
+union bdk_pciercx_cfg569
+{
+ uint32_t u;
+ struct bdk_pciercx_cfg569_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cfg_memtype_boundary_h_addr : 32;/**< [ 31: 0](R/W) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t cfg_memtype_boundary_h_addr : 32;/**< [ 31: 0](R/W) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cfg569_s cn; */
+};
+typedef union bdk_pciercx_cfg569 bdk_pciercx_cfg569_t;
+
+static inline uint64_t BDK_PCIERCX_CFG569(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CFG569(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=2))
+ return 0x200000008e4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CFG569", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CFG569(a) bdk_pciercx_cfg569_t
+#define bustype_BDK_PCIERCX_CFG569(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CFG569(a) "PCIERCX_CFG569"
+#define busnum_BDK_PCIERCX_CFG569(a) (a)
+#define arguments_BDK_PCIERCX_CFG569(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_clk_gating_ctl
+ *
+ * PCIe RC RADM Clock Gating Enable Control Register
+ */
+union bdk_pciercx_clk_gating_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_clk_gating_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t radm_clk_gating_en : 1; /**< [ 0: 0](R/W) Enable RADM clock gating feature when there is no
+ receive traffic, receive queues and pre/post-queue pipelines
+ are empty, RADM completion LUT is empty, and there
+ are no FLR actions pending.
+ 0x0 = Disable.
+ 0x1 = Enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t radm_clk_gating_en : 1; /**< [ 0: 0](R/W) Enable RADM clock gating feature when there is no
+ receive traffic, receive queues and pre/post-queue pipelines
+ are empty, RADM completion LUT is empty, and there
+ are no FLR actions pending.
+ 0x0 = Disable.
+ 0x1 = Enable. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_clk_gating_ctl_s cn; */
+};
+typedef union bdk_pciercx_clk_gating_ctl bdk_pciercx_clk_gating_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_CLK_GATING_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CLK_GATING_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x88cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CLK_GATING_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CLK_GATING_CTL(a) bdk_pciercx_clk_gating_ctl_t
+#define bustype_BDK_PCIERCX_CLK_GATING_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CLK_GATING_CTL(a) "PCIERCX_CLK_GATING_CTL"
+#define busnum_BDK_PCIERCX_CLK_GATING_CTL(a) (a)
+#define arguments_BDK_PCIERCX_CLK_GATING_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_clsize
+ *
+ * PCIe RC BIST, Header Type, Master Latency Timer, Cache Line Size Register
+ */
+union bdk_pciercx_clsize
+{
+ uint32_t u;
+ struct bdk_pciercx_clsize_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bist : 8; /**< [ 31: 24](RO) The BIST register functions are not supported. All 8 bits of the BIST register
+ are hardwired to zero. */
+ uint32_t mfd : 1; /**< [ 23: 23](RO) Multi function device. */
+ uint32_t chf : 7; /**< [ 22: 16](RO) Configuration header format. Hardwired to 0x1. */
+ uint32_t lt : 8; /**< [ 15: 8](RO) Master latency timer. Not applicable for PCI Express, hardwired to 0x0. */
+ uint32_t cls : 8; /**< [ 7: 0](R/W) Cache line size. The cache line size register is R/W for legacy compatibility purposes and
+ is not applicable to PCI Express device functionality. Writing to the cache line size
+ register does not impact functionality of the PCI Express bus. */
+#else /* Word 0 - Little Endian */
+ uint32_t cls : 8; /**< [ 7: 0](R/W) Cache line size. The cache line size register is R/W for legacy compatibility purposes and
+ is not applicable to PCI Express device functionality. Writing to the cache line size
+ register does not impact functionality of the PCI Express bus. */
+ uint32_t lt : 8; /**< [ 15: 8](RO) Master latency timer. Not applicable for PCI Express, hardwired to 0x0. */
+ uint32_t chf : 7; /**< [ 22: 16](RO) Configuration header format. Hardwired to 0x1. */
+ uint32_t mfd : 1; /**< [ 23: 23](RO) Multi function device. */
+ uint32_t bist : 8; /**< [ 31: 24](RO) The BIST register functions are not supported. All 8 bits of the BIST register
+ are hardwired to zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_clsize_s cn; */
+};
+typedef union bdk_pciercx_clsize bdk_pciercx_clsize_t;
+
+static inline uint64_t BDK_PCIERCX_CLSIZE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CLSIZE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CLSIZE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CLSIZE(a) bdk_pciercx_clsize_t
+#define bustype_BDK_PCIERCX_CLSIZE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CLSIZE(a) "PCIERCX_CLSIZE"
+#define busnum_BDK_PCIERCX_CLSIZE(a) (a)
+#define arguments_BDK_PCIERCX_CLSIZE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cmd
+ *
+ * PCIe RC Command/Status Register
+ */
+union bdk_pciercx_cmd
+{
+ uint32_t u;
+ struct bdk_pciercx_cmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dpe : 1; /**< [ 31: 31](R/W1C/H) Detected parity error. */
+ uint32_t sse : 1; /**< [ 30: 30](R/W1C/H) Signaled system error. */
+ uint32_t rma : 1; /**< [ 29: 29](R/W1C/H) Received master abort. */
+ uint32_t rta : 1; /**< [ 28: 28](R/W1C/H) Received target abort. */
+ uint32_t sta : 1; /**< [ 27: 27](R/W1C/H) Signaled target abort. */
+ uint32_t devt : 2; /**< [ 26: 25](RO) DEVSEL timing. Not applicable for PCI Express. Hardwired to 0x0. */
+ uint32_t mdpe : 1; /**< [ 24: 24](R/W1C/H) Master data parity error. */
+ uint32_t fbb : 1; /**< [ 23: 23](RO) Fast back-to-back capable. Not applicable for PCI Express. Hardwired to zero. */
+ uint32_t reserved_22 : 1;
+ uint32_t m66 : 1; /**< [ 21: 21](RO) 66 MHz capable. Not applicable for PCI Express. Hardwired to zero. */
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. Hardwired to one. */
+ uint32_t i_stat : 1; /**< [ 19: 19](RO) INTx status. */
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_dis : 1; /**< [ 10: 10](R/W) INTx assertion disable. */
+ uint32_t fbbe : 1; /**< [ 9: 9](RO) Fast back-to-back transaction enable. Not applicable for PCI Express. Must be hardwired to zero. */
+ uint32_t see : 1; /**< [ 8: 8](R/W) SERR# enable. */
+ uint32_t ids_wcc : 1; /**< [ 7: 7](RO) IDSEL stepping/wait cycle control. Not applicable for PCI Express. Must be hardwired to zero. */
+ uint32_t per : 1; /**< [ 6: 6](R/W) Parity error response. */
+ uint32_t vps : 1; /**< [ 5: 5](RO) VGA palette snoop. Not applicable for PCI Express. Must be hardwired to zero. */
+ uint32_t mwice : 1; /**< [ 4: 4](RO) Memory write and invalidate. Not applicable for PCI Express. Must be hardwired to zero. */
+ uint32_t scse : 1; /**< [ 3: 3](RO) Special cycle enable. Not applicable for PCI Express. Must be hardwired to zero. */
+ uint32_t me : 1; /**< [ 2: 2](R/W) Bus master enable. */
+ uint32_t msae : 1; /**< [ 1: 1](R/W) Memory space access enable. */
+ uint32_t isae : 1; /**< [ 0: 0](R/W) I/O space access enable.
+ There are no I/O BARs supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t isae : 1; /**< [ 0: 0](R/W) I/O space access enable.
+ There are no I/O BARs supported. */
+ uint32_t msae : 1; /**< [ 1: 1](R/W) Memory space access enable. */
+ uint32_t me : 1; /**< [ 2: 2](R/W) Bus master enable. */
+ uint32_t scse : 1; /**< [ 3: 3](RO) Special cycle enable. Not applicable for PCI Express. Must be hardwired to zero. */
+ uint32_t mwice : 1; /**< [ 4: 4](RO) Memory write and invalidate. Not applicable for PCI Express. Must be hardwired to zero. */
+ uint32_t vps : 1; /**< [ 5: 5](RO) VGA palette snoop. Not applicable for PCI Express. Must be hardwired to zero. */
+ uint32_t per : 1; /**< [ 6: 6](R/W) Parity error response. */
+ uint32_t ids_wcc : 1; /**< [ 7: 7](RO) IDSEL stepping/wait cycle control. Not applicable for PCI Express. Must be hardwired to zero. */
+ uint32_t see : 1; /**< [ 8: 8](R/W) SERR# enable. */
+ uint32_t fbbe : 1; /**< [ 9: 9](RO) Fast back-to-back transaction enable. Not applicable for PCI Express. Must be hardwired to zero. */
+ uint32_t i_dis : 1; /**< [ 10: 10](R/W) INTx assertion disable. */
+ uint32_t reserved_11_18 : 8;
+ uint32_t i_stat : 1; /**< [ 19: 19](RO) INTx status. */
+ uint32_t cl : 1; /**< [ 20: 20](RO) Capabilities list. Indicates presence of an extended capability item. Hardwired to one. */
+ uint32_t m66 : 1; /**< [ 21: 21](RO) 66 MHz capable. Not applicable for PCI Express. Hardwired to zero. */
+ uint32_t reserved_22 : 1;
+ uint32_t fbb : 1; /**< [ 23: 23](RO) Fast back-to-back capable. Not applicable for PCI Express. Hardwired to zero. */
+ uint32_t mdpe : 1; /**< [ 24: 24](R/W1C/H) Master data parity error. */
+ uint32_t devt : 2; /**< [ 26: 25](RO) DEVSEL timing. Not applicable for PCI Express. Hardwired to 0x0. */
+ uint32_t sta : 1; /**< [ 27: 27](R/W1C/H) Signaled target abort. */
+ uint32_t rta : 1; /**< [ 28: 28](R/W1C/H) Received target abort. */
+ uint32_t rma : 1; /**< [ 29: 29](R/W1C/H) Received master abort. */
+ uint32_t sse : 1; /**< [ 30: 30](R/W1C/H) Signaled system error. */
+ uint32_t dpe : 1; /**< [ 31: 31](R/W1C/H) Detected parity error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cmd_s cn; */
+};
+typedef union bdk_pciercx_cmd bdk_pciercx_cmd_t;
+
+static inline uint64_t BDK_PCIERCX_CMD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_CMD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_CMD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_CMD(a) bdk_pciercx_cmd_t
+#define bustype_BDK_PCIERCX_CMD(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_CMD(a) "PCIERCX_CMD"
+#define busnum_BDK_PCIERCX_CMD(a) (a)
+#define arguments_BDK_PCIERCX_CMD(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cor_err_msk
+ *
+ * PCIe RC Correctable Error Mask Register
+ */
+union bdk_pciercx_cor_err_msk
+{
+ uint32_t u;
+ struct bdk_pciercx_cor_err_msk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t chlom : 1; /**< [ 15: 15](R/W) Corrected header log overflow error mask. */
+ uint32_t ciem : 1; /**< [ 14: 14](R/W) Corrected internal error mask. */
+ uint32_t anfem : 1; /**< [ 13: 13](R/W) Advisory nonfatal error mask. */
+ uint32_t rttm : 1; /**< [ 12: 12](R/W) Replay timer timeout mask. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrm : 1; /**< [ 8: 8](R/W) REPLAY_NUM rollover mask. */
+ uint32_t bdllpm : 1; /**< [ 7: 7](R/W) Bad DLLP mask. */
+ uint32_t btlpm : 1; /**< [ 6: 6](R/W) Bad TLP mask. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t rem : 1; /**< [ 0: 0](R/W) Receiver error mask. */
+#else /* Word 0 - Little Endian */
+ uint32_t rem : 1; /**< [ 0: 0](R/W) Receiver error mask. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlpm : 1; /**< [ 6: 6](R/W) Bad TLP mask. */
+ uint32_t bdllpm : 1; /**< [ 7: 7](R/W) Bad DLLP mask. */
+ uint32_t rnrm : 1; /**< [ 8: 8](R/W) REPLAY_NUM rollover mask. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rttm : 1; /**< [ 12: 12](R/W) Replay timer timeout mask. */
+ uint32_t anfem : 1; /**< [ 13: 13](R/W) Advisory nonfatal error mask. */
+ uint32_t ciem : 1; /**< [ 14: 14](R/W) Corrected internal error mask. */
+ uint32_t chlom : 1; /**< [ 15: 15](R/W) Corrected header log overflow error mask. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cor_err_msk_s cn; */
+};
+typedef union bdk_pciercx_cor_err_msk bdk_pciercx_cor_err_msk_t;
+
+static inline uint64_t BDK_PCIERCX_COR_ERR_MSK(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_COR_ERR_MSK(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x114ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_COR_ERR_MSK", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_COR_ERR_MSK(a) bdk_pciercx_cor_err_msk_t
+#define bustype_BDK_PCIERCX_COR_ERR_MSK(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_COR_ERR_MSK(a) "PCIERCX_COR_ERR_MSK"
+#define busnum_BDK_PCIERCX_COR_ERR_MSK(a) (a)
+#define arguments_BDK_PCIERCX_COR_ERR_MSK(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_cor_err_stat
+ *
+ * PCIe RC Correctable Error Status Register
+ */
+union bdk_pciercx_cor_err_stat
+{
+ uint32_t u;
+ struct bdk_pciercx_cor_err_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t chlo : 1; /**< [ 15: 15](R/W1C/H) Corrected header log overflow status. */
+ uint32_t cies : 1; /**< [ 14: 14](R/W1C/H) Corrected internal error status. */
+ uint32_t anfes : 1; /**< [ 13: 13](R/W1C/H) Advisory nonfatal error status. */
+ uint32_t rtts : 1; /**< [ 12: 12](R/W1C/H) Replay timer timeout status. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rnrs : 1; /**< [ 8: 8](R/W1C/H) REPLAY_NUM rollover status. */
+ uint32_t bdllps : 1; /**< [ 7: 7](R/W1C/H) Bad DLLP status. */
+ uint32_t btlps : 1; /**< [ 6: 6](R/W1C/H) Bad TLP status. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t res : 1; /**< [ 0: 0](R/W1C/H) Receiver error status. */
+#else /* Word 0 - Little Endian */
+ uint32_t res : 1; /**< [ 0: 0](R/W1C/H) Receiver error status. */
+ uint32_t reserved_1_5 : 5;
+ uint32_t btlps : 1; /**< [ 6: 6](R/W1C/H) Bad TLP status. */
+ uint32_t bdllps : 1; /**< [ 7: 7](R/W1C/H) Bad DLLP status. */
+ uint32_t rnrs : 1; /**< [ 8: 8](R/W1C/H) REPLAY_NUM rollover status. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t rtts : 1; /**< [ 12: 12](R/W1C/H) Replay timer timeout status. */
+ uint32_t anfes : 1; /**< [ 13: 13](R/W1C/H) Advisory nonfatal error status. */
+ uint32_t cies : 1; /**< [ 14: 14](R/W1C/H) Corrected internal error status. */
+ uint32_t chlo : 1; /**< [ 15: 15](R/W1C/H) Corrected header log overflow status. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_cor_err_stat_s cn; */
+};
+typedef union bdk_pciercx_cor_err_stat bdk_pciercx_cor_err_stat_t;
+
+static inline uint64_t BDK_PCIERCX_COR_ERR_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_COR_ERR_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x110ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_COR_ERR_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_COR_ERR_STAT(a) bdk_pciercx_cor_err_stat_t
+#define bustype_BDK_PCIERCX_COR_ERR_STAT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_COR_ERR_STAT(a) "PCIERCX_COR_ERR_STAT"
+#define busnum_BDK_PCIERCX_COR_ERR_STAT(a) (a)
+#define arguments_BDK_PCIERCX_COR_ERR_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_dbg0
+ *
+ * PCIe RC Debug Register 0
+ */
+union bdk_pciercx_dbg0
+{
+ uint32_t u;
+ struct bdk_pciercx_dbg0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dbg_info_l32 : 32; /**< [ 31: 0](RO/H) Debug info lower 32 bits. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbg_info_l32 : 32; /**< [ 31: 0](RO/H) Debug info lower 32 bits. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_dbg0_s cn; */
+};
+typedef union bdk_pciercx_dbg0 bdk_pciercx_dbg0_t;
+
+static inline uint64_t BDK_PCIERCX_DBG0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_DBG0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x728ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_DBG0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_DBG0(a) bdk_pciercx_dbg0_t
+#define bustype_BDK_PCIERCX_DBG0(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_DBG0(a) "PCIERCX_DBG0"
+#define busnum_BDK_PCIERCX_DBG0(a) (a)
+#define arguments_BDK_PCIERCX_DBG0(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_dbg1
+ *
+ * PCIe RC Debug Register 1
+ */
+union bdk_pciercx_dbg1
+{
+ uint32_t u;
+ struct bdk_pciercx_dbg1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dbg_info_u32 : 32; /**< [ 31: 0](RO/H) Debug info upper 32 bits. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbg_info_u32 : 32; /**< [ 31: 0](RO/H) Debug info upper 32 bits. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_dbg1_s cn; */
+};
+typedef union bdk_pciercx_dbg1 bdk_pciercx_dbg1_t;
+
+static inline uint64_t BDK_PCIERCX_DBG1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_DBG1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x72cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_DBG1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_DBG1(a) bdk_pciercx_dbg1_t
+#define bustype_BDK_PCIERCX_DBG1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_DBG1(a) "PCIERCX_DBG1"
+#define busnum_BDK_PCIERCX_DBG1(a) (a)
+#define arguments_BDK_PCIERCX_DBG1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_dev_cap
+ *
+ * PCIe RC Device Capabilities Register
+ */
+union bdk_pciercx_dev_cap
+{
+ uint32_t u;
+ struct bdk_pciercx_dev_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t flr_cap : 1; /**< [ 28: 28](RO) Function level reset capability. This bit applies to endpoints only. */
+ uint32_t cspls : 2; /**< [ 27: 26](RO) Captured slot power limit scale. Not applicable for RC port, upstream port only */
+ uint32_t csplv : 8; /**< [ 25: 18](RO) Captured slot power limit value. Not applicable for RC port, upstream port only. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t rber : 1; /**< [ 15: 15](RO/WRSL) Role-based error reporting, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t el1al : 3; /**< [ 11: 9](RO) Endpoint L1 acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t el0al : 3; /**< [ 8: 6](RO) Endpoint L0s acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t etfs : 1; /**< [ 5: 5](RO/WRSL) Extended tag field supported. This bit is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pfs : 2; /**< [ 4: 3](RO/WRSL) Phantom function supported. This field is writable through
+ PEM()_CFG_WR. However, phantom function is not supported. Therefore, the application
+ must not write any value other than 0x0 to this field. */
+ uint32_t mpss : 3; /**< [ 2: 0](RO/WRSL) Max_Payload_Size supported, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t mpss : 3; /**< [ 2: 0](RO/WRSL) Max_Payload_Size supported, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t pfs : 2; /**< [ 4: 3](RO/WRSL) Phantom function supported. This field is writable through
+ PEM()_CFG_WR. However, phantom function is not supported. Therefore, the application
+ must not write any value other than 0x0 to this field. */
+ uint32_t etfs : 1; /**< [ 5: 5](RO/WRSL) Extended tag field supported. This bit is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t el0al : 3; /**< [ 8: 6](RO) Endpoint L0s acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t el1al : 3; /**< [ 11: 9](RO) Endpoint L1 acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t rber : 1; /**< [ 15: 15](RO/WRSL) Role-based error reporting, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t csplv : 8; /**< [ 25: 18](RO) Captured slot power limit value. Not applicable for RC port, upstream port only. */
+ uint32_t cspls : 2; /**< [ 27: 26](RO) Captured slot power limit scale. Not applicable for RC port, upstream port only */
+ uint32_t flr_cap : 1; /**< [ 28: 28](RO) Function level reset capability. This bit applies to endpoints only. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_dev_cap_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t flr_cap : 1; /**< [ 28: 28](RO) Function level reset capability. This bit applies to endpoints only. */
+ uint32_t cspls : 2; /**< [ 27: 26](RO) Captured slot power limit scale. Not applicable for RC port, upstream port only */
+ uint32_t csplv : 8; /**< [ 25: 18](RO) Captured slot power limit value. Not applicable for RC port, upstream port only. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t rber : 1; /**< [ 15: 15](RO/WRSL) Role-based error reporting, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t reserved_14 : 1;
+ uint32_t reserved_13 : 1;
+ uint32_t reserved_12 : 1;
+ uint32_t el1al : 3; /**< [ 11: 9](RO) Endpoint L1 acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t el0al : 3; /**< [ 8: 6](RO) Endpoint L0s acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t etfs : 1; /**< [ 5: 5](RO/WRSL) Extended tag field supported. This bit is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pfs : 2; /**< [ 4: 3](RO/WRSL) Phantom function supported. This field is writable through
+ PEM()_CFG_WR. However, phantom function is not supported. Therefore, the application
+ must not write any value other than 0x0 to this field. */
+ uint32_t mpss : 3; /**< [ 2: 0](RO/WRSL) Max_Payload_Size supported, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t mpss : 3; /**< [ 2: 0](RO/WRSL) Max_Payload_Size supported, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t pfs : 2; /**< [ 4: 3](RO/WRSL) Phantom function supported. This field is writable through
+ PEM()_CFG_WR. However, phantom function is not supported. Therefore, the application
+ must not write any value other than 0x0 to this field. */
+ uint32_t etfs : 1; /**< [ 5: 5](RO/WRSL) Extended tag field supported. This bit is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t el0al : 3; /**< [ 8: 6](RO) Endpoint L0s acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t el1al : 3; /**< [ 11: 9](RO) Endpoint L1 acceptable latency, writable through PEM()_CFG_WR. Must be 0x0 for non-
+ endpoint devices. */
+ uint32_t reserved_12 : 1;
+ uint32_t reserved_13 : 1;
+ uint32_t reserved_14 : 1;
+ uint32_t rber : 1; /**< [ 15: 15](RO/WRSL) Role-based error reporting, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t csplv : 8; /**< [ 25: 18](RO) Captured slot power limit value. Not applicable for RC port, upstream port only. */
+ uint32_t cspls : 2; /**< [ 27: 26](RO) Captured slot power limit scale. Not applicable for RC port, upstream port only */
+ uint32_t flr_cap : 1; /**< [ 28: 28](RO) Function level reset capability. This bit applies to endpoints only. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_pciercx_dev_cap bdk_pciercx_dev_cap_t;
+
+static inline uint64_t BDK_PCIERCX_DEV_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_DEV_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x74ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_DEV_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_DEV_CAP(a) bdk_pciercx_dev_cap_t
+#define bustype_BDK_PCIERCX_DEV_CAP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_DEV_CAP(a) "PCIERCX_DEV_CAP"
+#define busnum_BDK_PCIERCX_DEV_CAP(a) (a)
+#define arguments_BDK_PCIERCX_DEV_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_dev_cap2
+ *
+ * PCIe RC Device Capabilities 2 Register
+ */
+union bdk_pciercx_dev_cap2
+{
+ uint32_t u;
+ struct bdk_pciercx_dev_cap2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t meetp : 2; /**< [ 23: 22](RO/WRSL) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t eetps : 1; /**< [ 21: 21](RO/WRSL) End-end TLP prefix supported. */
+ uint32_t effs : 1; /**< [ 20: 20](RO/WRSL) Extended fmt field supported. */
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported. */
+ uint32_t tag10b_req_supp : 1; /**< [ 17: 17](RO/H) 10-bit tag requestor supported.
+
+ Reset values:
+ _ UPEM: 0x1.
+ _ BPEM: 0x0. */
+ uint32_t tag10b_cpl_supp : 1; /**< [ 16: 16](RO) 10-bit tag completer supported. */
+ uint32_t ln_sys_cls : 2; /**< [ 15: 14](RO) LN System CLS (not supported). */
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported. */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported. */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctrs : 4; /**< [ 3: 0](RO/H) Completion timeout ranges supported. */
+ uint32_t ctds : 1; /**< [ 4: 4](RO) Completion timeout disable supported. */
+ uint32_t ari_fw : 1; /**< [ 5: 5](RO) Alternate routing ID forwarding supported. */
+ uint32_t atom_ops : 1; /**< [ 6: 6](RO) AtomicOp routing supported. */
+ uint32_t atom32s : 1; /**< [ 7: 7](RO) 32-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t atom64s : 1; /**< [ 8: 8](RO) 64-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t atom128s : 1; /**< [ 9: 9](RO) 128-bit AtomicOp supported.
+ Note that inbound AtomicOps targeting BAR0 are not supported and are dropped as an
+ unsupported request. */
+ uint32_t noroprpr : 1; /**< [ 10: 10](RO/H) No RO-enabled PR-PR passing. When set, the routing element never carries out the passing
+ permitted in the relaxed ordering model. */
+ uint32_t ltrs : 1; /**< [ 11: 11](RO) Latency tolerance reporting (LTR) mechanism supported. */
+ uint32_t tph : 2; /**< [ 13: 12](RO) TPH completer supported. */
+ uint32_t ln_sys_cls : 2; /**< [ 15: 14](RO) LN System CLS (not supported). */
+ uint32_t tag10b_cpl_supp : 1; /**< [ 16: 16](RO) 10-bit tag completer supported. */
+ uint32_t tag10b_req_supp : 1; /**< [ 17: 17](RO/H) 10-bit tag requestor supported.
+
+ Reset values:
+ _ UPEM: 0x1.
+ _ BPEM: 0x0. */
+ uint32_t obffs : 2; /**< [ 19: 18](RO) Optimized buffer flush fill (OBFF) supported. */
+ uint32_t effs : 1; /**< [ 20: 20](RO/WRSL) Extended fmt field supported. */
+ uint32_t eetps : 1; /**< [ 21: 21](RO/WRSL) End-end TLP prefix supported. */
+ uint32_t meetp : 2; /**< [ 23: 22](RO/WRSL) Max end-end TLP prefixes.
+ 0x1 = 1.
+ 0x2 = 2.
+ 0x3 = 3.
+ 0x0 = 4. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_dev_cap2_s cn; */
+};
+typedef union bdk_pciercx_dev_cap2 bdk_pciercx_dev_cap2_t;
+
+static inline uint64_t BDK_PCIERCX_DEV_CAP2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_DEV_CAP2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x94ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_DEV_CAP2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_DEV_CAP2(a) bdk_pciercx_dev_cap2_t
+#define bustype_BDK_PCIERCX_DEV_CAP2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_DEV_CAP2(a) "PCIERCX_DEV_CAP2"
+#define busnum_BDK_PCIERCX_DEV_CAP2(a) (a)
+#define arguments_BDK_PCIERCX_DEV_CAP2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_dev_ctl
+ *
+ * PCIe RC Device Control/Device Status Register
+ */
+union bdk_pciercx_dev_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_dev_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t tp : 1; /**< [ 21: 21](RO) Transaction pending. Hardwired to zero. */
+ uint32_t ap_d : 1; /**< [ 20: 20](RO) AUX power detected. Set to one if AUX power detected. */
+ uint32_t ur_d : 1; /**< [ 19: 19](R/W1C/H) Unsupported request detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. [UR_D] occurs when PEM receives
+ something unsupported. Unsupported requests are nonfatal errors, so [UR_D] should cause
+ [NFE_D]. Receiving a vendor-defined message should cause an unsupported request. */
+ uint32_t fe_d : 1; /**< [ 18: 18](R/W1C/H) Fatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC_UCOR_ERR_MSK that has a severity set to fatal. Malformed
+ TLPs generally fit into this category. */
+ uint32_t nfe_d : 1; /**< [ 17: 17](R/W1C/H) Nonfatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC_UCOR_ERR_MSK that has a severity set to Nonfatal and does
+ not meet advisory nonfatal criteria, which most poisoned TLPs should. */
+ uint32_t ce_d : 1; /**< [ 16: 16](R/W1C/H) Correctable error detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. This field is set if we
+ receive any of the errors in PCIERC_COR_ERR_STAT, for example, a replay timer timeout.
+ Also, it can be set if we get any of the errors in PCIERC_UCOR_ERR_MSK that has a
+ severity set to nonfatal and meets the advisory nonfatal criteria, which most ECRC errors should. */
+ uint32_t reserved_15 : 1;
+ uint32_t mrrs : 3; /**< [ 14: 12](R/W) Max read request size.
+ 0x0 =128 bytes.
+ 0x1 = 256 bytes.
+ 0x2 = 512 bytes.
+ 0x3 = 1024 bytes.
+ 0x4 = 2048 bytes.
+ 0x5 = 4096 bytes. */
+ uint32_t ns_en : 1; /**< [ 11: 11](R/W) Enable no snoop. */
+ uint32_t ap_en : 1; /**< [ 10: 10](RO) AUX power PM enable (not supported). */
+ uint32_t pf_en : 1; /**< [ 9: 9](R/W/H) Phantom function enable. This bit should never be set; CNXXXX requests never uses phantom
+ functions. */
+ uint32_t etf_en : 1; /**< [ 8: 8](R/W) Extended tag field enable. Set this bit to enable extended tags. */
+ uint32_t mps : 3; /**< [ 7: 5](R/W) Max payload size. Legal values:
+ 0x0 = 128 bytes.
+ 0x1 = 256 bytes.
+ 0x2 = 512 bytes.
+ 0x3 = 1024 bytes.
+ Larger sizes are not supported by CNXXXX.
+
+ DPI_SLI_PRT()_CFG[MPS] must be set to the same value as this field for proper
+ functionality. */
+ uint32_t ro_en : 1; /**< [ 4: 4](R/W) Enable relaxed ordering. */
+ uint32_t ur_en : 1; /**< [ 3: 3](R/W) Unsupported request reporting enable. */
+ uint32_t fe_en : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. */
+ uint32_t nfe_en : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. */
+ uint32_t ce_en : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t ce_en : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. */
+ uint32_t nfe_en : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. */
+ uint32_t fe_en : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. */
+ uint32_t ur_en : 1; /**< [ 3: 3](R/W) Unsupported request reporting enable. */
+ uint32_t ro_en : 1; /**< [ 4: 4](R/W) Enable relaxed ordering. */
+ uint32_t mps : 3; /**< [ 7: 5](R/W) Max payload size. Legal values:
+ 0x0 = 128 bytes.
+ 0x1 = 256 bytes.
+ 0x2 = 512 bytes.
+ 0x3 = 1024 bytes.
+ Larger sizes are not supported by CNXXXX.
+
+ DPI_SLI_PRT()_CFG[MPS] must be set to the same value as this field for proper
+ functionality. */
+ uint32_t etf_en : 1; /**< [ 8: 8](R/W) Extended tag field enable. Set this bit to enable extended tags. */
+ uint32_t pf_en : 1; /**< [ 9: 9](R/W/H) Phantom function enable. This bit should never be set; CNXXXX requests never uses phantom
+ functions. */
+ uint32_t ap_en : 1; /**< [ 10: 10](RO) AUX power PM enable (not supported). */
+ uint32_t ns_en : 1; /**< [ 11: 11](R/W) Enable no snoop. */
+ uint32_t mrrs : 3; /**< [ 14: 12](R/W) Max read request size.
+ 0x0 =128 bytes.
+ 0x1 = 256 bytes.
+ 0x2 = 512 bytes.
+ 0x3 = 1024 bytes.
+ 0x4 = 2048 bytes.
+ 0x5 = 4096 bytes. */
+ uint32_t reserved_15 : 1;
+ uint32_t ce_d : 1; /**< [ 16: 16](R/W1C/H) Correctable error detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. This field is set if we
+ receive any of the errors in PCIERC_COR_ERR_STAT, for example, a replay timer timeout.
+ Also, it can be set if we get any of the errors in PCIERC_UCOR_ERR_MSK that has a
+ severity set to nonfatal and meets the advisory nonfatal criteria, which most ECRC errors should. */
+ uint32_t nfe_d : 1; /**< [ 17: 17](R/W1C/H) Nonfatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC_UCOR_ERR_MSK that has a severity set to Nonfatal and does
+ not meet advisory nonfatal criteria, which most poisoned TLPs should. */
+ uint32_t fe_d : 1; /**< [ 18: 18](R/W1C/H) Fatal error detected. Errors are logged in this register regardless of whether or not
+ error reporting is enabled in the device control register. This field is set if we receive
+ any of the errors in PCIERC_UCOR_ERR_MSK that has a severity set to fatal. Malformed
+ TLPs generally fit into this category. */
+ uint32_t ur_d : 1; /**< [ 19: 19](R/W1C/H) Unsupported request detected. Errors are logged in this register regardless of whether or
+ not error reporting is enabled in the device control register. [UR_D] occurs when PEM receives
+ something unsupported. Unsupported requests are nonfatal errors, so [UR_D] should cause
+ [NFE_D]. Receiving a vendor-defined message should cause an unsupported request. */
+ uint32_t ap_d : 1; /**< [ 20: 20](RO) AUX power detected. Set to one if AUX power detected. */
+ uint32_t tp : 1; /**< [ 21: 21](RO) Transaction pending. Hardwired to zero. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_dev_ctl_s cn; */
+};
+typedef union bdk_pciercx_dev_ctl bdk_pciercx_dev_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_DEV_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_DEV_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x78ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_DEV_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_DEV_CTL(a) bdk_pciercx_dev_ctl_t
+#define bustype_BDK_PCIERCX_DEV_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_DEV_CTL(a) "PCIERCX_DEV_CTL"
+#define busnum_BDK_PCIERCX_DEV_CTL(a) (a)
+#define arguments_BDK_PCIERCX_DEV_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_dev_ctl2
+ *
+ * PCIe RC Device Control 2 Register/Device Status 2 Register
+ */
+union bdk_pciercx_dev_ctl2
+{
+ uint32_t u;
+ struct bdk_pciercx_dev_ctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t eetpb : 1; /**< [ 15: 15](R/W) End-end TLP prefix blocking.
+ Writeable when PCIERC_DEV_CAP2[EETPS] is set. */
+ uint32_t obffe : 2; /**< [ 14: 13](R/W) Optimized buffer flush fill (OBFF) enabled. */
+ uint32_t tag10b_req_en : 1; /**< [ 12: 12](R/W/H) 10-bit tag requester enabled.
+
+ For UPEM, this bit is R/W and can be set to enable 10-bit
+ tag requester enable.
+
+ For BPEM, this bit is RO and cannot be written. */
+ uint32_t reserved_11 : 1;
+ uint32_t ltre : 1; /**< [ 10: 10](RO) Latency tolerance reporting (LTR) mechanism enable. (not supported). */
+ uint32_t id0_cp : 1; /**< [ 9: 9](R/W) ID based ordering completion enable (not supported). */
+ uint32_t id0_rq : 1; /**< [ 8: 8](R/W) ID based ordering request enable. */
+ uint32_t atom_op_eb : 1; /**< [ 7: 7](R/W) AtomicOp egress blocking. */
+ uint32_t atom_op : 1; /**< [ 6: 6](R/W) AtomicOp requester enable. */
+ uint32_t ari : 1; /**< [ 5: 5](R/W) Alternate routing ID forwarding supported. */
+ uint32_t ctd : 1; /**< [ 4: 4](R/W) Completion timeout disable. */
+ uint32_t ctv : 4; /**< [ 3: 0](R/W/H) Completion timeout value.
+ 0x0 = Default range: 16 ms to 55 ms.
+ 0x1 = 50 us to 100 us.
+ 0x2 = 1 ms to 10 ms.
+ 0x3 = 16 ms to 55 ms.
+ 0x6 = 65 ms to 210 ms.
+ 0x9 = 260 ms to 900 ms.
+ 0xA = 1 s to 3.5 s.
+ 0xD = 4 s to 13 s.
+ 0xE = 17 s to 64 s.
+
+ Values not defined are reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctv : 4; /**< [ 3: 0](R/W/H) Completion timeout value.
+ 0x0 = Default range: 16 ms to 55 ms.
+ 0x1 = 50 us to 100 us.
+ 0x2 = 1 ms to 10 ms.
+ 0x3 = 16 ms to 55 ms.
+ 0x6 = 65 ms to 210 ms.
+ 0x9 = 260 ms to 900 ms.
+ 0xA = 1 s to 3.5 s.
+ 0xD = 4 s to 13 s.
+ 0xE = 17 s to 64 s.
+
+ Values not defined are reserved. */
+ uint32_t ctd : 1; /**< [ 4: 4](R/W) Completion timeout disable. */
+ uint32_t ari : 1; /**< [ 5: 5](R/W) Alternate routing ID forwarding supported. */
+ uint32_t atom_op : 1; /**< [ 6: 6](R/W) AtomicOp requester enable. */
+ uint32_t atom_op_eb : 1; /**< [ 7: 7](R/W) AtomicOp egress blocking. */
+ uint32_t id0_rq : 1; /**< [ 8: 8](R/W) ID based ordering request enable. */
+ uint32_t id0_cp : 1; /**< [ 9: 9](R/W) ID based ordering completion enable (not supported). */
+ uint32_t ltre : 1; /**< [ 10: 10](RO) Latency tolerance reporting (LTR) mechanism enable. (not supported). */
+ uint32_t reserved_11 : 1;
+ uint32_t tag10b_req_en : 1; /**< [ 12: 12](R/W/H) 10-bit tag requester enabled.
+
+ For UPEM, this bit is R/W and can be set to enable 10-bit
+ tag requester enable.
+
+ For BPEM, this bit is RO and cannot be written. */
+ uint32_t obffe : 2; /**< [ 14: 13](R/W) Optimized buffer flush fill (OBFF) enabled. */
+ uint32_t eetpb : 1; /**< [ 15: 15](R/W) End-end TLP prefix blocking.
+ Writeable when PCIERC_DEV_CAP2[EETPS] is set. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_dev_ctl2_s cn; */
+};
+typedef union bdk_pciercx_dev_ctl2 bdk_pciercx_dev_ctl2_t;
+
+static inline uint64_t BDK_PCIERCX_DEV_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_DEV_CTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x98ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_DEV_CTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_DEV_CTL2(a) bdk_pciercx_dev_ctl2_t
+#define bustype_BDK_PCIERCX_DEV_CTL2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_DEV_CTL2(a) "PCIERCX_DEV_CTL2"
+#define busnum_BDK_PCIERCX_DEV_CTL2(a) (a)
+#define arguments_BDK_PCIERCX_DEV_CTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_dl_feature_cap
+ *
+ * PCIe RC Data Link Feature Capabilities Register
+ */
+union bdk_pciercx_dl_feature_cap
+{
+ uint32_t u;
+ struct bdk_pciercx_dl_feature_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dl_fex_en : 1; /**< [ 31: 31](RO/WRSL) Data link feature exchange enable. */
+ uint32_t reserved_23_30 : 8;
+ uint32_t lfdlfs : 22; /**< [ 22: 1](RO/WRSL) Local future data link feature supported. */
+ uint32_t lsfcs : 1; /**< [ 0: 0](RO/WRSL) Local scaled flow control supported, */
+#else /* Word 0 - Little Endian */
+ uint32_t lsfcs : 1; /**< [ 0: 0](RO/WRSL) Local scaled flow control supported, */
+ uint32_t lfdlfs : 22; /**< [ 22: 1](RO/WRSL) Local future data link feature supported. */
+ uint32_t reserved_23_30 : 8;
+ uint32_t dl_fex_en : 1; /**< [ 31: 31](RO/WRSL) Data link feature exchange enable. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_dl_feature_cap_s cn; */
+};
+typedef union bdk_pciercx_dl_feature_cap bdk_pciercx_dl_feature_cap_t;
+
+static inline uint64_t BDK_PCIERCX_DL_FEATURE_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_DL_FEATURE_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x454ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_DL_FEATURE_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_DL_FEATURE_CAP(a) bdk_pciercx_dl_feature_cap_t
+#define bustype_BDK_PCIERCX_DL_FEATURE_CAP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_DL_FEATURE_CAP(a) "PCIERCX_DL_FEATURE_CAP"
+#define busnum_BDK_PCIERCX_DL_FEATURE_CAP(a) (a)
+#define arguments_BDK_PCIERCX_DL_FEATURE_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_dl_feature_ext_hdr
+ *
+ * PCIe RC Data Link Feature Extended Capability Header Register
+ */
+union bdk_pciercx_dl_feature_ext_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_dl_feature_ext_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_dl_feature_ext_hdr_s cn; */
+};
+typedef union bdk_pciercx_dl_feature_ext_hdr bdk_pciercx_dl_feature_ext_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_DL_FEATURE_EXT_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_DL_FEATURE_EXT_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x450ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_DL_FEATURE_EXT_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_DL_FEATURE_EXT_HDR(a) bdk_pciercx_dl_feature_ext_hdr_t
+#define bustype_BDK_PCIERCX_DL_FEATURE_EXT_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_DL_FEATURE_EXT_HDR(a) "PCIERCX_DL_FEATURE_EXT_HDR"
+#define busnum_BDK_PCIERCX_DL_FEATURE_EXT_HDR(a) (a)
+#define arguments_BDK_PCIERCX_DL_FEATURE_EXT_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_dl_feature_status
+ *
+ * PCIe RC Data Link Feature Status Register
+ */
+union bdk_pciercx_dl_feature_status
+{
+ uint32_t u;
+ struct bdk_pciercx_dl_feature_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dlfsv : 1; /**< [ 31: 31](RO) Remote data link feature supported valid. */
+ uint32_t reserved_23_30 : 8;
+ uint32_t rdlfs : 23; /**< [ 22: 0](RO/H) Features Currently defined are: Bit 0 - Remote Scaled Flow Control Supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t rdlfs : 23; /**< [ 22: 0](RO/H) Features Currently defined are: Bit 0 - Remote Scaled Flow Control Supported. */
+ uint32_t reserved_23_30 : 8;
+ uint32_t dlfsv : 1; /**< [ 31: 31](RO) Remote data link feature supported valid. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_dl_feature_status_s cn; */
+};
+typedef union bdk_pciercx_dl_feature_status bdk_pciercx_dl_feature_status_t;
+
+static inline uint64_t BDK_PCIERCX_DL_FEATURE_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_DL_FEATURE_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x458ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_DL_FEATURE_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_DL_FEATURE_STATUS(a) bdk_pciercx_dl_feature_status_t
+#define bustype_BDK_PCIERCX_DL_FEATURE_STATUS(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_DL_FEATURE_STATUS(a) "PCIERCX_DL_FEATURE_STATUS"
+#define busnum_BDK_PCIERCX_DL_FEATURE_STATUS(a) (a)
+#define arguments_BDK_PCIERCX_DL_FEATURE_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_e_cap_list
+ *
+ * PCIe RC PCIe Capabilities/PCIe Capabilities List Register
+ */
+union bdk_pciercx_e_cap_list
+{
+ uint32_t u;
+ struct bdk_pciercx_e_cap_list_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t imn : 5; /**< [ 29: 25](RO/WRSL) Interrupt message number. Updated by hardware, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t si : 1; /**< [ 24: 24](RO/WRSL) Slot implemented. This bit is writable through PEM()_CFG_WR. */
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Device port type. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCI Express capability version. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCI Express capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieid : 8; /**< [ 7: 0](RO) PCI Express capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t pciecv : 4; /**< [ 19: 16](RO) PCI Express capability version. */
+ uint32_t dpt : 4; /**< [ 23: 20](RO) Device port type. */
+ uint32_t si : 1; /**< [ 24: 24](RO/WRSL) Slot implemented. This bit is writable through PEM()_CFG_WR. */
+ uint32_t imn : 5; /**< [ 29: 25](RO/WRSL) Interrupt message number. Updated by hardware, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_e_cap_list_s cn; */
+};
+typedef union bdk_pciercx_e_cap_list bdk_pciercx_e_cap_list_t;
+
+static inline uint64_t BDK_PCIERCX_E_CAP_LIST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_E_CAP_LIST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x70ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_E_CAP_LIST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_E_CAP_LIST(a) bdk_pciercx_e_cap_list_t
+#define bustype_BDK_PCIERCX_E_CAP_LIST(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_E_CAP_LIST(a) "PCIERCX_E_CAP_LIST"
+#define busnum_BDK_PCIERCX_E_CAP_LIST(a) (a)
+#define arguments_BDK_PCIERCX_E_CAP_LIST(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ea_cap_hdr
+ *
+ * PCIe RC Enhanced Allocation Capability ID Register
+ */
+union bdk_pciercx_ea_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_ea_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ea_rsvd : 10; /**< [ 31: 22](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t num_entries : 6; /**< [ 21: 16](RO/WRSL) Number of entries following the first DW of the capability.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer.
+ Writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t eacid : 8; /**< [ 7: 0](RO/WRSL) Enhanced allocation capability ID.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+#else /* Word 0 - Little Endian */
+ uint32_t eacid : 8; /**< [ 7: 0](RO/WRSL) Enhanced allocation capability ID.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer.
+ Writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t num_entries : 6; /**< [ 21: 16](RO/WRSL) Number of entries following the first DW of the capability.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ea_rsvd : 10; /**< [ 31: 22](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ea_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_ea_cap_hdr bdk_pciercx_ea_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_EA_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EA_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x50ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EA_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EA_CAP_HDR(a) bdk_pciercx_ea_cap_hdr_t
+#define bustype_BDK_PCIERCX_EA_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EA_CAP_HDR(a) "PCIERCX_EA_CAP_HDR"
+#define busnum_BDK_PCIERCX_EA_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_EA_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ea_entry0
+ *
+ * PCIe RC Enhanced Allocation Capability Second DW Register
+ */
+union bdk_pciercx_ea_entry0
+{
+ uint32_t u;
+ struct bdk_pciercx_ea_entry0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ea_rsvd : 16; /**< [ 31: 16](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t fixed_subnum : 8; /**< [ 15: 8](RO/WRSL) Fixed subordinate bus number.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t fixed_secnum : 8; /**< [ 7: 0](RO/WRSL) Fixed secondary bus number.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+#else /* Word 0 - Little Endian */
+ uint32_t fixed_secnum : 8; /**< [ 7: 0](RO/WRSL) Fixed secondary bus number.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t fixed_subnum : 8; /**< [ 15: 8](RO/WRSL) Fixed subordinate bus number.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ea_rsvd : 16; /**< [ 31: 16](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ea_entry0_s cn; */
+};
+typedef union bdk_pciercx_ea_entry0 bdk_pciercx_ea_entry0_t;
+
+static inline uint64_t BDK_PCIERCX_EA_ENTRY0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EA_ENTRY0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x54ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EA_ENTRY0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EA_ENTRY0(a) bdk_pciercx_ea_entry0_t
+#define bustype_BDK_PCIERCX_EA_ENTRY0(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EA_ENTRY0(a) "PCIERCX_EA_ENTRY0"
+#define busnum_BDK_PCIERCX_EA_ENTRY0(a) (a)
+#define arguments_BDK_PCIERCX_EA_ENTRY0(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ea_entry1
+ *
+ * PCIe RC Enhanced Allocation Entry 0 First DW Register
+ */
+union bdk_pciercx_ea_entry1
+{
+ uint32_t u;
+ struct bdk_pciercx_ea_entry1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ena : 1; /**< [ 31: 31](RO/WRSL) Enable for this entry. This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t wr : 1; /**< [ 30: 30](RO/WRSL) Writable. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t ea_rsvd_1 : 6; /**< [ 29: 24](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t sprop : 8; /**< [ 23: 16](RO/WRSL) Secondary properties.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t pprop : 8; /**< [ 15: 8](RO/WRSL) Primary properties.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t bei : 4; /**< [ 7: 4](RO/WRSL) Bar equivalent indicator.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ea_rsvd_0 : 1; /**< [ 3: 3](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t esize : 3; /**< [ 2: 0](RO/WRSL) Entry size - the number of DW following the initial DW in this entry.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+#else /* Word 0 - Little Endian */
+ uint32_t esize : 3; /**< [ 2: 0](RO/WRSL) Entry size - the number of DW following the initial DW in this entry.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ea_rsvd_0 : 1; /**< [ 3: 3](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t bei : 4; /**< [ 7: 4](RO/WRSL) Bar equivalent indicator.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t pprop : 8; /**< [ 15: 8](RO/WRSL) Primary properties.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t sprop : 8; /**< [ 23: 16](RO/WRSL) Secondary properties.
+ This field is writable through PEM()_CFG_WR. However, the application must not change this
+ field. */
+ uint32_t ea_rsvd_1 : 6; /**< [ 29: 24](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t wr : 1; /**< [ 30: 30](RO/WRSL) Writable. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t ena : 1; /**< [ 31: 31](RO/WRSL) Enable for this entry. This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ea_entry1_s cn; */
+};
+typedef union bdk_pciercx_ea_entry1 bdk_pciercx_ea_entry1_t;
+
+static inline uint64_t BDK_PCIERCX_EA_ENTRY1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EA_ENTRY1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x58ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EA_ENTRY1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EA_ENTRY1(a) bdk_pciercx_ea_entry1_t
+#define bustype_BDK_PCIERCX_EA_ENTRY1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EA_ENTRY1(a) "PCIERCX_EA_ENTRY1"
+#define busnum_BDK_PCIERCX_EA_ENTRY1(a) (a)
+#define arguments_BDK_PCIERCX_EA_ENTRY1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ea_entry2
+ *
+ * PCIe RC Enhanced Allocation Entry 0 Lower Base Register
+ */
+union bdk_pciercx_ea_entry2
+{
+ uint32_t u;
+ struct bdk_pciercx_ea_entry2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lbase : 30; /**< [ 31: 2](RO/WRSL) Lower base. This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t size : 1; /**< [ 1: 1](RO/WRSL) Size - 64-bit (1), 32-bit (0). This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t ea_rsvd : 1; /**< [ 0: 0](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t ea_rsvd : 1; /**< [ 0: 0](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t size : 1; /**< [ 1: 1](RO/WRSL) Size - 64-bit (1), 32-bit (0). This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t lbase : 30; /**< [ 31: 2](RO/WRSL) Lower base. This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ea_entry2_s cn; */
+};
+typedef union bdk_pciercx_ea_entry2 bdk_pciercx_ea_entry2_t;
+
+static inline uint64_t BDK_PCIERCX_EA_ENTRY2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EA_ENTRY2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x5cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EA_ENTRY2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EA_ENTRY2(a) bdk_pciercx_ea_entry2_t
+#define bustype_BDK_PCIERCX_EA_ENTRY2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EA_ENTRY2(a) "PCIERCX_EA_ENTRY2"
+#define busnum_BDK_PCIERCX_EA_ENTRY2(a) (a)
+#define arguments_BDK_PCIERCX_EA_ENTRY2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ea_entry3
+ *
+ * PCIe RC Enhanced Allocation Entry 0 Max Offset Register
+ */
+union bdk_pciercx_ea_entry3
+{
+ uint32_t u;
+ struct bdk_pciercx_ea_entry3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t moffs : 30; /**< [ 31: 2](RO/WRSL) Lower base. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+
+ Internal:
+ This is the offset to cover PEMRC BAR4 0xfffff & 0xffffc \>\>2 */
+ uint32_t size : 1; /**< [ 1: 1](RO/WRSL) Size - 64-bit (1), 32-bit (0). This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t ea_rsvd : 1; /**< [ 0: 0](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t ea_rsvd : 1; /**< [ 0: 0](RO/WRSL) Reserved. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t size : 1; /**< [ 1: 1](RO/WRSL) Size - 64-bit (1), 32-bit (0). This field is writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t moffs : 30; /**< [ 31: 2](RO/WRSL) Lower base. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+
+ Internal:
+ This is the offset to cover PEMRC BAR4 0xfffff & 0xffffc \>\>2 */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ea_entry3_s cn; */
+};
+typedef union bdk_pciercx_ea_entry3 bdk_pciercx_ea_entry3_t;
+
+static inline uint64_t BDK_PCIERCX_EA_ENTRY3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EA_ENTRY3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x60ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EA_ENTRY3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EA_ENTRY3(a) bdk_pciercx_ea_entry3_t
+#define bustype_BDK_PCIERCX_EA_ENTRY3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EA_ENTRY3(a) "PCIERCX_EA_ENTRY3"
+#define busnum_BDK_PCIERCX_EA_ENTRY3(a) (a)
+#define arguments_BDK_PCIERCX_EA_ENTRY3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ea_entry4
+ *
+ * PCIe RC Enhanced Allocation Entry 0 Upper Base Register
+ */
+union bdk_pciercx_ea_entry4
+{
+ uint32_t u;
+ struct bdk_pciercx_ea_entry4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ubase : 32; /**< [ 31: 0](RO/WRSL) Upper base. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+
+ Internal:
+ This is the upper 32 bits of PEM_BAR_E::PEM()_PF_BAR0 */
+#else /* Word 0 - Little Endian */
+ uint32_t ubase : 32; /**< [ 31: 0](RO/WRSL) Upper base. This field is writable through PEM()_CFG_WR. However, the application must
+ not change this field.
+
+ Internal:
+ This is the upper 32 bits of PEM_BAR_E::PEM()_PF_BAR0 */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ea_entry4_s cn; */
+};
+typedef union bdk_pciercx_ea_entry4 bdk_pciercx_ea_entry4_t;
+
+static inline uint64_t BDK_PCIERCX_EA_ENTRY4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EA_ENTRY4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x64ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EA_ENTRY4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EA_ENTRY4(a) bdk_pciercx_ea_entry4_t
+#define bustype_BDK_PCIERCX_EA_ENTRY4(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EA_ENTRY4(a) "PCIERCX_EA_ENTRY4"
+#define busnum_BDK_PCIERCX_EA_ENTRY4(a) (a)
+#define arguments_BDK_PCIERCX_EA_ENTRY4(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ebar
+ *
+ * PCIe RC Expansion ROM Base Address Register
+ */
+union bdk_pciercx_ebar
+{
+ uint32_t u;
+ struct bdk_pciercx_ebar_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t unused : 32; /**< [ 31: 0](RO/WRSL) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t unused : 32; /**< [ 31: 0](RO/WRSL) Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ebar_s cn; */
+};
+typedef union bdk_pciercx_ebar bdk_pciercx_ebar_t;
+
+static inline uint64_t BDK_PCIERCX_EBAR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EBAR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x38ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EBAR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EBAR(a) bdk_pciercx_ebar_t
+#define bustype_BDK_PCIERCX_EBAR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EBAR(a) "PCIERCX_EBAR"
+#define busnum_BDK_PCIERCX_EBAR(a) (a)
+#define arguments_BDK_PCIERCX_EBAR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_eq_ctl01
+ *
+ * PCIe RC Equalization Control Lane 0/1 Register
+ */
+union bdk_pciercx_eq_ctl01
+{
+ uint32_t u;
+ struct bdk_pciercx_eq_ctl01_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l1urph : 3; /**< [ 30: 28](RO/WRSL) Lane 1 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l1utp : 4; /**< [ 27: 24](RO/WRSL) Lane 1 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l1drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 1 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l1dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 1 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l0urph : 3; /**< [ 14: 12](RO/WRSL) Lane 0 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l0utp : 4; /**< [ 11: 8](RO/WRSL) Lane 0 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l0drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 0 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l0dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 0 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l0dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 0 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l0drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 0 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l0utp : 4; /**< [ 11: 8](RO/WRSL) Lane 0 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l0urph : 3; /**< [ 14: 12](RO/WRSL) Lane 0 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l1dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 1 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l1drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 1 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l1utp : 4; /**< [ 27: 24](RO/WRSL) Lane 1 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l1urph : 3; /**< [ 30: 28](RO/WRSL) Lane 1 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_eq_ctl01_s cn; */
+};
+typedef union bdk_pciercx_eq_ctl01 bdk_pciercx_eq_ctl01_t;
+
+static inline uint64_t BDK_PCIERCX_EQ_CTL01(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EQ_CTL01(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x184ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EQ_CTL01", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EQ_CTL01(a) bdk_pciercx_eq_ctl01_t
+#define bustype_BDK_PCIERCX_EQ_CTL01(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EQ_CTL01(a) "PCIERCX_EQ_CTL01"
+#define busnum_BDK_PCIERCX_EQ_CTL01(a) (a)
+#define arguments_BDK_PCIERCX_EQ_CTL01(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_eq_ctl1011
+ *
+ * PCIe RC Equalization Control Lane 10/11 Register
+ */
+union bdk_pciercx_eq_ctl1011
+{
+ uint32_t u;
+ struct bdk_pciercx_eq_ctl1011_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l11urph : 3; /**< [ 30: 28](RO/WRSL) Lane 11 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l11utp : 4; /**< [ 27: 24](RO/WRSL) Lane 11 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l11drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 11 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l11dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 11 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l10urph : 3; /**< [ 14: 12](RO/WRSL) Lane 10 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l10utp : 4; /**< [ 11: 8](RO/WRSL) Lane 10 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l10drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 10 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l10dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 10 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l10dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 10 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l10drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 10 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l10utp : 4; /**< [ 11: 8](RO/WRSL) Lane 10 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l10urph : 3; /**< [ 14: 12](RO/WRSL) Lane 10 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l11dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 11 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l11drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 11 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l11utp : 4; /**< [ 27: 24](RO/WRSL) Lane 11 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l11urph : 3; /**< [ 30: 28](RO/WRSL) Lane 11 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_eq_ctl1011_s cn; */
+};
+typedef union bdk_pciercx_eq_ctl1011 bdk_pciercx_eq_ctl1011_t;
+
+static inline uint64_t BDK_PCIERCX_EQ_CTL1011(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EQ_CTL1011(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x198ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EQ_CTL1011", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EQ_CTL1011(a) bdk_pciercx_eq_ctl1011_t
+#define bustype_BDK_PCIERCX_EQ_CTL1011(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EQ_CTL1011(a) "PCIERCX_EQ_CTL1011"
+#define busnum_BDK_PCIERCX_EQ_CTL1011(a) (a)
+#define arguments_BDK_PCIERCX_EQ_CTL1011(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_eq_ctl1213
+ *
+ * PCIe RC Equalization Control Lane 12/13 Register
+ */
+union bdk_pciercx_eq_ctl1213
+{
+ uint32_t u;
+ struct bdk_pciercx_eq_ctl1213_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l13urph : 3; /**< [ 30: 28](RO/WRSL) Lane 13 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l13utp : 4; /**< [ 27: 24](RO/WRSL) Lane 13 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l13drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 13 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l13dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 13 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l12urph : 3; /**< [ 14: 12](RO/WRSL) Lane 12 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l12utp : 4; /**< [ 11: 8](RO/WRSL) Lane 12 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l12drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 12 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l12dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 12 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l12dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 12 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l12drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 12 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l12utp : 4; /**< [ 11: 8](RO/WRSL) Lane 12 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l12urph : 3; /**< [ 14: 12](RO/WRSL) Lane 12 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l13dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 13 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l13drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 13 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l13utp : 4; /**< [ 27: 24](RO/WRSL) Lane 13 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l13urph : 3; /**< [ 30: 28](RO/WRSL) Lane 13 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_eq_ctl1213_s cn; */
+};
+typedef union bdk_pciercx_eq_ctl1213 bdk_pciercx_eq_ctl1213_t;
+
+static inline uint64_t BDK_PCIERCX_EQ_CTL1213(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EQ_CTL1213(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x19cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EQ_CTL1213", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EQ_CTL1213(a) bdk_pciercx_eq_ctl1213_t
+#define bustype_BDK_PCIERCX_EQ_CTL1213(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EQ_CTL1213(a) "PCIERCX_EQ_CTL1213"
+#define busnum_BDK_PCIERCX_EQ_CTL1213(a) (a)
+#define arguments_BDK_PCIERCX_EQ_CTL1213(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_eq_ctl1415
+ *
+ * PCIe RC Equalization Control Lane 14/15 Register
+ */
+union bdk_pciercx_eq_ctl1415
+{
+ uint32_t u;
+ struct bdk_pciercx_eq_ctl1415_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l15urph : 3; /**< [ 30: 28](RO/WRSL) Lane 15 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l15utp : 4; /**< [ 27: 24](RO/WRSL) Lane 15 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l15drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 15 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l15dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 15 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l14urph : 3; /**< [ 14: 12](RO/WRSL) Lane 14 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l14utp : 4; /**< [ 11: 8](RO/WRSL) Lane 14 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l14drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 14 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l14dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 14 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l14dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 14 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l14drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 14 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l14utp : 4; /**< [ 11: 8](RO/WRSL) Lane 14 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l14urph : 3; /**< [ 14: 12](RO/WRSL) Lane 14 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l15dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 15 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l15drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 15 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l15utp : 4; /**< [ 27: 24](RO/WRSL) Lane 15 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l15urph : 3; /**< [ 30: 28](RO/WRSL) Lane 15 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_eq_ctl1415_s cn; */
+};
+typedef union bdk_pciercx_eq_ctl1415 bdk_pciercx_eq_ctl1415_t;
+
+static inline uint64_t BDK_PCIERCX_EQ_CTL1415(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EQ_CTL1415(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1a0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EQ_CTL1415", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EQ_CTL1415(a) bdk_pciercx_eq_ctl1415_t
+#define bustype_BDK_PCIERCX_EQ_CTL1415(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EQ_CTL1415(a) "PCIERCX_EQ_CTL1415"
+#define busnum_BDK_PCIERCX_EQ_CTL1415(a) (a)
+#define arguments_BDK_PCIERCX_EQ_CTL1415(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_eq_ctl23
+ *
+ * PCIe RC Equalization Control Lane 2/3 Register
+ */
+union bdk_pciercx_eq_ctl23
+{
+ uint32_t u;
+ struct bdk_pciercx_eq_ctl23_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l3urph : 3; /**< [ 30: 28](RO/WRSL) Lane 3 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l3utp : 4; /**< [ 27: 24](RO/WRSL) Lane 3 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l3drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 3 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l3dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 3 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l2urph : 3; /**< [ 14: 12](RO/WRSL) Lane 2 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l2utp : 4; /**< [ 11: 8](RO/WRSL) Lane 2 upstream component transmitter preset. Writable through PEM()_CFG_WR. How */
+ uint32_t reserved_7 : 1;
+ uint32_t l2drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 2 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l2dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 2 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l2dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 2 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l2drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 2 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l2utp : 4; /**< [ 11: 8](RO/WRSL) Lane 2 upstream component transmitter preset. Writable through PEM()_CFG_WR. How */
+ uint32_t l2urph : 3; /**< [ 14: 12](RO/WRSL) Lane 2 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l3dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 3 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l3drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 3 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l3utp : 4; /**< [ 27: 24](RO/WRSL) Lane 3 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l3urph : 3; /**< [ 30: 28](RO/WRSL) Lane 3 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_eq_ctl23_s cn; */
+};
+typedef union bdk_pciercx_eq_ctl23 bdk_pciercx_eq_ctl23_t;
+
+static inline uint64_t BDK_PCIERCX_EQ_CTL23(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EQ_CTL23(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x188ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EQ_CTL23", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EQ_CTL23(a) bdk_pciercx_eq_ctl23_t
+#define bustype_BDK_PCIERCX_EQ_CTL23(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EQ_CTL23(a) "PCIERCX_EQ_CTL23"
+#define busnum_BDK_PCIERCX_EQ_CTL23(a) (a)
+#define arguments_BDK_PCIERCX_EQ_CTL23(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_eq_ctl45
+ *
+ * PCIe RC Equalization Control Lane 4/5 Register
+ */
+union bdk_pciercx_eq_ctl45
+{
+ uint32_t u;
+ struct bdk_pciercx_eq_ctl45_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l5urph : 3; /**< [ 30: 28](RO/WRSL) Lane 5 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l5utp : 4; /**< [ 27: 24](RO/WRSL) Lane 5 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l5drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 5 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l5dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 5 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l4urph : 3; /**< [ 14: 12](RO/WRSL) Lane 4 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l4utp : 4; /**< [ 11: 8](RO/WRSL) Lane 4 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l4drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 4 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l4dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 4 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l4dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 4 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l4drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 4 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l4utp : 4; /**< [ 11: 8](RO/WRSL) Lane 4 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l4urph : 3; /**< [ 14: 12](RO/WRSL) Lane 4 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l5dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 5 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l5drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 5 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l5utp : 4; /**< [ 27: 24](RO/WRSL) Lane 5 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l5urph : 3; /**< [ 30: 28](RO/WRSL) Lane 5 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_eq_ctl45_s cn; */
+};
+typedef union bdk_pciercx_eq_ctl45 bdk_pciercx_eq_ctl45_t;
+
+static inline uint64_t BDK_PCIERCX_EQ_CTL45(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EQ_CTL45(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x18cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EQ_CTL45", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EQ_CTL45(a) bdk_pciercx_eq_ctl45_t
+#define bustype_BDK_PCIERCX_EQ_CTL45(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EQ_CTL45(a) "PCIERCX_EQ_CTL45"
+#define busnum_BDK_PCIERCX_EQ_CTL45(a) (a)
+#define arguments_BDK_PCIERCX_EQ_CTL45(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_eq_ctl67
+ *
+ * PCIe RC Equalization Control Lane 6/7 Register
+ */
+union bdk_pciercx_eq_ctl67
+{
+ uint32_t u;
+ struct bdk_pciercx_eq_ctl67_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l7urph : 3; /**< [ 30: 28](RO/WRSL) Lane 7 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l7utp : 4; /**< [ 27: 24](RO/WRSL) Lane 7 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l7drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 7 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l7dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 7 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l6urph : 3; /**< [ 14: 12](RO/WRSL) Lane 6 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l6utp : 4; /**< [ 11: 8](RO/WRSL) Lane 6 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l6drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 6 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l6dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 6 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l6dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 6 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l6drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 6 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l6utp : 4; /**< [ 11: 8](RO/WRSL) Lane 6 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l6urph : 3; /**< [ 14: 12](RO/WRSL) Lane 6 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l7dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 7 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l7drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 7 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l7utp : 4; /**< [ 27: 24](RO/WRSL) Lane 7 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l7urph : 3; /**< [ 30: 28](RO/WRSL) Lane 7 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_eq_ctl67_s cn; */
+};
+typedef union bdk_pciercx_eq_ctl67 bdk_pciercx_eq_ctl67_t;
+
+static inline uint64_t BDK_PCIERCX_EQ_CTL67(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EQ_CTL67(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x190ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EQ_CTL67", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EQ_CTL67(a) bdk_pciercx_eq_ctl67_t
+#define bustype_BDK_PCIERCX_EQ_CTL67(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EQ_CTL67(a) "PCIERCX_EQ_CTL67"
+#define busnum_BDK_PCIERCX_EQ_CTL67(a) (a)
+#define arguments_BDK_PCIERCX_EQ_CTL67(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_eq_ctl89
+ *
+ * PCIe RC Equalization Control Lane 8/9 Register
+ */
+union bdk_pciercx_eq_ctl89
+{
+ uint32_t u;
+ struct bdk_pciercx_eq_ctl89_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t l9urph : 3; /**< [ 30: 28](RO/WRSL) Lane 9 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l9utp : 4; /**< [ 27: 24](RO/WRSL) Lane 9 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l9drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 9 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l9dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 9 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l8urph : 3; /**< [ 14: 12](RO/WRSL) Lane 8 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l8utp : 4; /**< [ 11: 8](RO/WRSL) Lane 8 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l8drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 8 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t l8dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 8 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t l8dtp : 4; /**< [ 3: 0](RO/WRSL/H) Lane 8 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l8drph : 3; /**< [ 6: 4](RO/WRSL/H) Lane 8 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_7 : 1;
+ uint32_t l8utp : 4; /**< [ 11: 8](RO/WRSL) Lane 8 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l8urph : 3; /**< [ 14: 12](RO/WRSL) Lane 8 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_15 : 1;
+ uint32_t l9dtp : 4; /**< [ 19: 16](RO/WRSL/H) Lane 9 downstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l9drph : 3; /**< [ 22: 20](RO/WRSL/H) Lane 9 downstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_23 : 1;
+ uint32_t l9utp : 4; /**< [ 27: 24](RO/WRSL) Lane 9 upstream component transmitter preset. Writable through PEM()_CFG_WR. */
+ uint32_t l9urph : 3; /**< [ 30: 28](RO/WRSL) Lane 9 upstream component receiver preset hint. Writable through PEM()_CFG_WR. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_eq_ctl89_s cn; */
+};
+typedef union bdk_pciercx_eq_ctl89 bdk_pciercx_eq_ctl89_t;
+
+static inline uint64_t BDK_PCIERCX_EQ_CTL89(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EQ_CTL89(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x194ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EQ_CTL89", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EQ_CTL89(a) bdk_pciercx_eq_ctl89_t
+#define bustype_BDK_PCIERCX_EQ_CTL89(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EQ_CTL89(a) "PCIERCX_EQ_CTL89"
+#define busnum_BDK_PCIERCX_EQ_CTL89(a) (a)
+#define arguments_BDK_PCIERCX_EQ_CTL89(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_err_source
+ *
+ * PCIe RC Error Source Identification Register
+ */
+union bdk_pciercx_err_source
+{
+ uint32_t u;
+ struct bdk_pciercx_err_source_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t efnfsi : 16; /**< [ 31: 16](RO/H) ERR_FATAL/NONFATAL source identification. */
+ uint32_t ecsi : 16; /**< [ 15: 0](RO/H) ERR_COR source identification. */
+#else /* Word 0 - Little Endian */
+ uint32_t ecsi : 16; /**< [ 15: 0](RO/H) ERR_COR source identification. */
+ uint32_t efnfsi : 16; /**< [ 31: 16](RO/H) ERR_FATAL/NONFATAL source identification. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_err_source_s cn; */
+};
+typedef union bdk_pciercx_err_source bdk_pciercx_err_source_t;
+
+static inline uint64_t BDK_PCIERCX_ERR_SOURCE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ERR_SOURCE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x134ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ERR_SOURCE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ERR_SOURCE(a) bdk_pciercx_err_source_t
+#define bustype_BDK_PCIERCX_ERR_SOURCE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ERR_SOURCE(a) "PCIERCX_ERR_SOURCE"
+#define busnum_BDK_PCIERCX_ERR_SOURCE(a) (a)
+#define arguments_BDK_PCIERCX_ERR_SOURCE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ext_cap
+ *
+ * PCIe RC PCI Express Extended Capability Header Register
+ */
+union bdk_pciercx_ext_cap
+{
+ uint32_t u;
+ struct bdk_pciercx_ext_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ext_cap_s cn; */
+};
+typedef union bdk_pciercx_ext_cap bdk_pciercx_ext_cap_t;
+
+static inline uint64_t BDK_PCIERCX_EXT_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_EXT_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x100ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_EXT_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_EXT_CAP(a) bdk_pciercx_ext_cap_t
+#define bustype_BDK_PCIERCX_EXT_CAP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_EXT_CAP(a) "PCIERCX_EXT_CAP"
+#define busnum_BDK_PCIERCX_EXT_CAP(a) (a)
+#define arguments_BDK_PCIERCX_EXT_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_filt_msk2
+ *
+ * PCIe RC Filter Mask Register 2
+ */
+union bdk_pciercx_filt_msk2
+{
+ uint32_t u;
+ struct bdk_pciercx_filt_msk2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t m_prs : 1; /**< [ 7: 7](R/W) Mask PRS messages dropped silently. */
+ uint32_t m_unmask_td : 1; /**< [ 6: 6](R/W) Disable unmask TD bit. */
+ uint32_t m_unmask_ur_pois : 1; /**< [ 5: 5](R/W) Disable unmask UR Poison with TRGT0 destination. */
+ uint32_t m_ln_vend1_drop : 1; /**< [ 4: 4](R/W) Mask LN messages dropped silently. */
+ uint32_t m_handle_flush : 1; /**< [ 3: 3](R/W) Mask core filter to handle flush request. */
+ uint32_t m_dabort_4ucpl : 1; /**< [ 2: 2](R/W) Mask DLLP abort for unexpected CPL. */
+ uint32_t m_vend1_drp : 1; /**< [ 1: 1](R/W) Mask vendor MSG type 1 dropped silently. */
+ uint32_t m_vend0_drp : 1; /**< [ 0: 0](R/W) Mask vendor MSG type 0 dropped with UR error reporting. */
+#else /* Word 0 - Little Endian */
+ uint32_t m_vend0_drp : 1; /**< [ 0: 0](R/W) Mask vendor MSG type 0 dropped with UR error reporting. */
+ uint32_t m_vend1_drp : 1; /**< [ 1: 1](R/W) Mask vendor MSG type 1 dropped silently. */
+ uint32_t m_dabort_4ucpl : 1; /**< [ 2: 2](R/W) Mask DLLP abort for unexpected CPL. */
+ uint32_t m_handle_flush : 1; /**< [ 3: 3](R/W) Mask core filter to handle flush request. */
+ uint32_t m_ln_vend1_drop : 1; /**< [ 4: 4](R/W) Mask LN messages dropped silently. */
+ uint32_t m_unmask_ur_pois : 1; /**< [ 5: 5](R/W) Disable unmask UR Poison with TRGT0 destination. */
+ uint32_t m_unmask_td : 1; /**< [ 6: 6](R/W) Disable unmask TD bit. */
+ uint32_t m_prs : 1; /**< [ 7: 7](R/W) Mask PRS messages dropped silently. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_filt_msk2_s cn; */
+};
+typedef union bdk_pciercx_filt_msk2 bdk_pciercx_filt_msk2_t;
+
+static inline uint64_t BDK_PCIERCX_FILT_MSK2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_FILT_MSK2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x720ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_FILT_MSK2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_FILT_MSK2(a) bdk_pciercx_filt_msk2_t
+#define bustype_BDK_PCIERCX_FILT_MSK2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_FILT_MSK2(a) "PCIERCX_FILT_MSK2"
+#define busnum_BDK_PCIERCX_FILT_MSK2(a) (a)
+#define arguments_BDK_PCIERCX_FILT_MSK2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_gen2_port
+ *
+ * PCIe RC Gen2 Port Logic Register
+ */
+union bdk_pciercx_gen2_port
+{
+ uint32_t u;
+ struct bdk_pciercx_gen2_port_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_22_31 : 10;
+ uint32_t gen1_ei_inf : 1; /**< [ 21: 21](R/W) Electrical idle inference mode at Gen1 Rate. Programmable mode to determine
+ inferred electrical idle (EI) in Recovery.Speed or Loopback.Active (as slave)
+ state at Gen1 speed by looking for a one value on RxElecIdle instead of looking
+ for a zero on RxValid. If the PHY fails to deassert the RxValid signal in
+ Recovery.Speed or Loopback.Active (because of corrupted EIOS for example),
+ then EI cannot be inferred successfully in the controller by just detecting the
+ condition RxValid=0.
+ 0 = Use RxElecIdle signal to infer electrical idle.
+ 1 = Use RxValid signal to infer electrical idle. */
+ uint32_t s_d_e : 1; /**< [ 20: 20](R/W) Set the deemphasis level for upstream ports.
+ 0 = -6 dB.
+ 1 = -3.5 dB. */
+ uint32_t ctcrb : 1; /**< [ 19: 19](R/W) Config TX compliance receive bit. When set to one, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to one). */
+ uint32_t cpyts : 1; /**< [ 18: 18](R/W) Config PHY TX swing. Indicates the voltage level that the PHY should drive. When set to one,
+ indicates low swing. When set to 0, indicates full swing. */
+ uint32_t dsc : 1; /**< [ 17: 17](R/W/H) Directed speed change. A write of one initiates a speed change.
+ When the speed change occurs, the controller will clear the contents of this field. */
+ uint32_t alaneflip : 1; /**< [ 16: 16](R/W) Enable auto flipping of the lanes. */
+ uint32_t pdetlane : 3; /**< [ 15: 13](R/W) Predetermined lane for auto flip. This field defines which
+ physical lane is connected to logical Lane0 by the flip
+ operation performed in detect.
+ 0x0 = Reserved.
+ 0x1 = Connect logical Lane0 to physical lane 1.
+ 0x2 = Connect logical Lane0 to physical lane 3.
+ 0x3 = Connect logical Lane0 to physical lane 7.
+ 0x4 = Connect logical Lane0 to physical lane 15.
+ 0x5 - 0x7 = Reserved. */
+ uint32_t nlanes : 5; /**< [ 12: 8](R/W) Predetermined number of lanes. Defines the number of
+ lanes which are connected and not bad. Used to limit the
+ effective link width to ignore 'broken" or "unused" lanes that
+ detect a receiver. Indicates the number of lanes to check for
+ exit from electrical idle in Polling.Active and L2.Idle.
+
+ 0x1 = 1 lane.
+ 0x2 = 2 lanes.
+ 0x3 = 3 lanes.
+ _ ...
+ 0x10 = 16 lanes.
+ 0x11-0x1F = Reserved.
+
+ When you have unused lanes in your system, then you must
+ change the value in this register to reflect the number of
+ lanes. You must also change PCIERC_PORT_CTL[LME]. */
+ uint32_t n_fts : 8; /**< [ 7: 0](R/W) Sets the number of fast training sequences (N_FTS) that the core advertises as its
+ N_FTS during GEN2 Link training. This value is used to inform the link partner about the
+ PHY's ability to recover synchronization after a low power state.
+
+ Do not set [N_FTS] to zero; doing so can cause the LTSSM to go into the recovery
+ state when exiting from L0s. */
+#else /* Word 0 - Little Endian */
+ uint32_t n_fts : 8; /**< [ 7: 0](R/W) Sets the number of fast training sequences (N_FTS) that the core advertises as its
+ N_FTS during GEN2 Link training. This value is used to inform the link partner about the
+ PHY's ability to recover synchronization after a low power state.
+
+ Do not set [N_FTS] to zero; doing so can cause the LTSSM to go into the recovery
+ state when exiting from L0s. */
+ uint32_t nlanes : 5; /**< [ 12: 8](R/W) Predetermined number of lanes. Defines the number of
+ lanes which are connected and not bad. Used to limit the
+ effective link width to ignore 'broken" or "unused" lanes that
+ detect a receiver. Indicates the number of lanes to check for
+ exit from electrical idle in Polling.Active and L2.Idle.
+
+ 0x1 = 1 lane.
+ 0x2 = 2 lanes.
+ 0x3 = 3 lanes.
+ _ ...
+ 0x10 = 16 lanes.
+ 0x11-0x1F = Reserved.
+
+ When you have unused lanes in your system, then you must
+ change the value in this register to reflect the number of
+ lanes. You must also change PCIERC_PORT_CTL[LME]. */
+ uint32_t pdetlane : 3; /**< [ 15: 13](R/W) Predetermined lane for auto flip. This field defines which
+ physical lane is connected to logical Lane0 by the flip
+ operation performed in detect.
+ 0x0 = Reserved.
+ 0x1 = Connect logical Lane0 to physical lane 1.
+ 0x2 = Connect logical Lane0 to physical lane 3.
+ 0x3 = Connect logical Lane0 to physical lane 7.
+ 0x4 = Connect logical Lane0 to physical lane 15.
+ 0x5 - 0x7 = Reserved. */
+ uint32_t alaneflip : 1; /**< [ 16: 16](R/W) Enable auto flipping of the lanes. */
+ uint32_t dsc : 1; /**< [ 17: 17](R/W/H) Directed speed change. A write of one initiates a speed change.
+ When the speed change occurs, the controller will clear the contents of this field. */
+ uint32_t cpyts : 1; /**< [ 18: 18](R/W) Config PHY TX swing. Indicates the voltage level that the PHY should drive. When set to one,
+ indicates low swing. When set to 0, indicates full swing. */
+ uint32_t ctcrb : 1; /**< [ 19: 19](R/W) Config TX compliance receive bit. When set to one, signals LTSSM to transmit TS ordered sets
+ with the compliance receive bit assert (equal to one). */
+ uint32_t s_d_e : 1; /**< [ 20: 20](R/W) Set the deemphasis level for upstream ports.
+ 0 = -6 dB.
+ 1 = -3.5 dB. */
+ uint32_t gen1_ei_inf : 1; /**< [ 21: 21](R/W) Electrical idle inference mode at Gen1 Rate. Programmable mode to determine
+ inferred electrical idle (EI) in Recovery.Speed or Loopback.Active (as slave)
+ state at Gen1 speed by looking for a one value on RxElecIdle instead of looking
+ for a zero on RxValid. If the PHY fails to deassert the RxValid signal in
+ Recovery.Speed or Loopback.Active (because of corrupted EIOS for example),
+ then EI cannot be inferred successfully in the controller by just detecting the
+ condition RxValid=0.
+ 0 = Use RxElecIdle signal to infer electrical idle.
+ 1 = Use RxValid signal to infer electrical idle. */
+ uint32_t reserved_22_31 : 10;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_gen2_port_s cn; */
+};
+typedef union bdk_pciercx_gen2_port bdk_pciercx_gen2_port_t;
+
+static inline uint64_t BDK_PCIERCX_GEN2_PORT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_GEN2_PORT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x80cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_GEN2_PORT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_GEN2_PORT(a) bdk_pciercx_gen2_port_t
+#define bustype_BDK_PCIERCX_GEN2_PORT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_GEN2_PORT(a) "PCIERCX_GEN2_PORT"
+#define busnum_BDK_PCIERCX_GEN2_PORT(a) (a)
+#define arguments_BDK_PCIERCX_GEN2_PORT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_gen3_eq_ctl
+ *
+ * PCIe RC Gen3 EQ Control Register
+ */
+union bdk_pciercx_gen3_eq_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_gen3_eq_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t scefpm : 1; /**< [ 26: 26](R/W) Request core to send back-to-back EIEOS in Recovery.RcvrLock state until
+ presets to coefficient mapping is complete. */
+ uint32_t eq_pset_req : 1; /**< [ 25: 25](R/W/H) Reserved. */
+ uint32_t iif : 1; /**< [ 24: 24](R/W) Include initial FOM. Include, or not, the FOM feedback from the initial preset evaluation
+ performed in the EQ master, when finding the highest FOM among all preset evaluations. */
+ uint32_t prv : 16; /**< [ 23: 8](R/W) Preset request vector. Requesting of presets during the initial part of the EQ master
+ phase. Encoding scheme as follows:
+
+ Bit [15:0] = 0x0: No preset is requested and evaluated in the EQ master phase.
+
+ Bit [i] = 1: Preset=i is requested and evaluated in the EQ master phase.
+
+ _ 0b0000000000000000 = No preset req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxxx1 = Preset 0 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxx1x = Preset 1 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxx1xx = Preset 2 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxx1xxx = Preset 3 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxx1xxxx = Preset 4 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxx1xxxxx = Preset 5 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxx1xxxxxx = Preset 6 req/evaluated in EQ master phase.
+
+ _ 0b00000xxx1xxxxxxx = Preset 7 req/evaluated in EQ master phase.
+
+ _ 0b00000xx1xxxxxxxx = Preset 8 req/evaluated in EQ master phase.
+
+ _ 0b00000x1xxxxxxxxx = Preset 9 req/evaluated in EQ master phase.
+
+ _ 0b000001xxxxxxxxxx = Preset 10 req/evaluated in EQ master phase.
+
+ _ All other encodings = Reserved. */
+ uint32_t reserved_7 : 1;
+ uint32_t eq_redo_en : 1; /**< [ 6: 6](R/W) Support EQ redo and lower rate change. */
+ uint32_t p23td : 1; /**< [ 5: 5](R/W) Phase2_3 2 ms timeout disable. Determine behavior in Phase2 for USP (Phase3 if DSP) when
+ the PHY does not respond within 2 ms to the assertion of RxEqEval:
+ 0 = Abort the current evaluation; stop any attempt to modify the remote transmitter
+ settings. Phase2 will be terminated by the 24 ms timeout.
+ 1 = Ignore the 2 ms timeout and continue as normal. This is used to support PHYs that
+ require more than 2 ms to respond to the assertion of RxEqEval. */
+ uint32_t bt : 1; /**< [ 4: 4](R/W) Behavior after 24 ms timeout (when optimal settings are not found).
+
+ For a USP: determine the next LTSSM state from Phase2:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.Phase3.
+
+ For a DSP: determine the next LTSSM state from Phase3:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.RcrLock.
+
+ When optimal settings are not found:
+ * Equalization phase 3 successful status bit is not set in the link status register.
+ * Equalization phase 3 complete status bit is set in the link status register. */
+ uint32_t fm : 4; /**< [ 3: 0](R/W) Feedback mode.
+ 0 = Direction of change.
+ 1 = Figure of merit.
+ 2-15 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t fm : 4; /**< [ 3: 0](R/W) Feedback mode.
+ 0 = Direction of change.
+ 1 = Figure of merit.
+ 2-15 = Reserved. */
+ uint32_t bt : 1; /**< [ 4: 4](R/W) Behavior after 24 ms timeout (when optimal settings are not found).
+
+ For a USP: determine the next LTSSM state from Phase2:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.Phase3.
+
+ For a DSP: determine the next LTSSM state from Phase3:
+ 0 = Recovery.Speed.
+ 1 = Recovry.Equalization.RcrLock.
+
+ When optimal settings are not found:
+ * Equalization phase 3 successful status bit is not set in the link status register.
+ * Equalization phase 3 complete status bit is set in the link status register. */
+ uint32_t p23td : 1; /**< [ 5: 5](R/W) Phase2_3 2 ms timeout disable. Determine behavior in Phase2 for USP (Phase3 if DSP) when
+ the PHY does not respond within 2 ms to the assertion of RxEqEval:
+ 0 = Abort the current evaluation; stop any attempt to modify the remote transmitter
+ settings. Phase2 will be terminated by the 24 ms timeout.
+ 1 = Ignore the 2 ms timeout and continue as normal. This is used to support PHYs that
+ require more than 2 ms to respond to the assertion of RxEqEval. */
+ uint32_t eq_redo_en : 1; /**< [ 6: 6](R/W) Support EQ redo and lower rate change. */
+ uint32_t reserved_7 : 1;
+ uint32_t prv : 16; /**< [ 23: 8](R/W) Preset request vector. Requesting of presets during the initial part of the EQ master
+ phase. Encoding scheme as follows:
+
+ Bit [15:0] = 0x0: No preset is requested and evaluated in the EQ master phase.
+
+ Bit [i] = 1: Preset=i is requested and evaluated in the EQ master phase.
+
+ _ 0b0000000000000000 = No preset req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxxx1 = Preset 0 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxxx1x = Preset 1 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxxx1xx = Preset 2 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxxx1xxx = Preset 3 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxxx1xxxx = Preset 4 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxxx1xxxxx = Preset 5 req/evaluated in EQ master phase.
+
+ _ 0b00000xxxx1xxxxxx = Preset 6 req/evaluated in EQ master phase.
+
+ _ 0b00000xxx1xxxxxxx = Preset 7 req/evaluated in EQ master phase.
+
+ _ 0b00000xx1xxxxxxxx = Preset 8 req/evaluated in EQ master phase.
+
+ _ 0b00000x1xxxxxxxxx = Preset 9 req/evaluated in EQ master phase.
+
+ _ 0b000001xxxxxxxxxx = Preset 10 req/evaluated in EQ master phase.
+
+ _ All other encodings = Reserved. */
+ uint32_t iif : 1; /**< [ 24: 24](R/W) Include initial FOM. Include, or not, the FOM feedback from the initial preset evaluation
+ performed in the EQ master, when finding the highest FOM among all preset evaluations. */
+ uint32_t eq_pset_req : 1; /**< [ 25: 25](R/W/H) Reserved. */
+ uint32_t scefpm : 1; /**< [ 26: 26](R/W) Request core to send back-to-back EIEOS in Recovery.RcvrLock state until
+ presets to coefficient mapping is complete. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_gen3_eq_ctl_s cn; */
+};
+typedef union bdk_pciercx_gen3_eq_ctl bdk_pciercx_gen3_eq_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_GEN3_EQ_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_GEN3_EQ_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8a8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_GEN3_EQ_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_GEN3_EQ_CTL(a) bdk_pciercx_gen3_eq_ctl_t
+#define bustype_BDK_PCIERCX_GEN3_EQ_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_GEN3_EQ_CTL(a) "PCIERCX_GEN3_EQ_CTL"
+#define busnum_BDK_PCIERCX_GEN3_EQ_CTL(a) (a)
+#define arguments_BDK_PCIERCX_GEN3_EQ_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_gen3_fb_mode_dir_chg
+ *
+ * PCIe RC Gen3 EQ Direction Change Feedback Mode Control Register
+ */
+union bdk_pciercx_gen3_fb_mode_dir_chg
+{
+ uint32_t u;
+ struct bdk_pciercx_gen3_fb_mode_dir_chg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_18_31 : 14;
+ uint32_t max_post_cur_delta : 4; /**< [ 17: 14](R/W) Convergence window aperture for C+1. Postcursor coefficients maximum delta
+ within the convergence window depth. */
+ uint32_t max_pre_cur_delta : 4; /**< [ 13: 10](R/W) Convergence window aperture for C-1. Precursor coefficients maximum delta
+ within the convergence window depth. */
+ uint32_t n_evals : 5; /**< [ 9: 5](R/W) Convergence window depth. Number of consecutive evaluations
+ considered in phase 2/3 when determining if optimal coefficients
+ have been found.
+
+ When 0x0, EQ master is performed without sending any
+ requests to the remote partner in phase 2 for USP and
+ phase 3 for DSP. Therefore, the remote partner will not
+ change its transmitter coefficients and will move to the next
+ state.
+
+ Legal values: 0x0, 0x1, and 0x2. */
+ uint32_t min_phase23 : 5; /**< [ 4: 0](R/W) Minimum time (in ms) to remain in EQ master phase. The
+ LTSSM stays in EQ master phase for at least this amount of
+ time, before starting to check for convergence of the
+ coefficients.
+
+ Legal values: 0..24. */
+#else /* Word 0 - Little Endian */
+ uint32_t min_phase23 : 5; /**< [ 4: 0](R/W) Minimum time (in ms) to remain in EQ master phase. The
+ LTSSM stays in EQ master phase for at least this amount of
+ time, before starting to check for convergence of the
+ coefficients.
+
+ Legal values: 0..24. */
+ uint32_t n_evals : 5; /**< [ 9: 5](R/W) Convergence window depth. Number of consecutive evaluations
+ considered in phase 2/3 when determining if optimal coefficients
+ have been found.
+
+ When 0x0, EQ master is performed without sending any
+ requests to the remote partner in phase 2 for USP and
+ phase 3 for DSP. Therefore, the remote partner will not
+ change its transmitter coefficients and will move to the next
+ state.
+
+ Legal values: 0x0, 0x1, and 0x2. */
+ uint32_t max_pre_cur_delta : 4; /**< [ 13: 10](R/W) Convergence window aperture for C-1. Precursor coefficients maximum delta
+ within the convergence window depth. */
+ uint32_t max_post_cur_delta : 4; /**< [ 17: 14](R/W) Convergence window aperture for C+1. Postcursor coefficients maximum delta
+ within the convergence window depth. */
+ uint32_t reserved_18_31 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_gen3_fb_mode_dir_chg_s cn; */
+};
+typedef union bdk_pciercx_gen3_fb_mode_dir_chg bdk_pciercx_gen3_fb_mode_dir_chg_t;
+
+static inline uint64_t BDK_PCIERCX_GEN3_FB_MODE_DIR_CHG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_GEN3_FB_MODE_DIR_CHG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8acll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_GEN3_FB_MODE_DIR_CHG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_GEN3_FB_MODE_DIR_CHG(a) bdk_pciercx_gen3_fb_mode_dir_chg_t
+#define bustype_BDK_PCIERCX_GEN3_FB_MODE_DIR_CHG(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_GEN3_FB_MODE_DIR_CHG(a) "PCIERCX_GEN3_FB_MODE_DIR_CHG"
+#define busnum_BDK_PCIERCX_GEN3_FB_MODE_DIR_CHG(a) (a)
+#define arguments_BDK_PCIERCX_GEN3_FB_MODE_DIR_CHG(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_gen3_pipe_lb
+ *
+ * PCIe RC Gen3 PIPE Loopback Register
+ */
+union bdk_pciercx_gen3_pipe_lb
+{
+ uint32_t u;
+ struct bdk_pciercx_gen3_pipe_lb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ple : 1; /**< [ 31: 31](R/W) Pipe loopback enable. */
+ uint32_t reserved_27_30 : 4;
+ uint32_t rx_stat : 3; /**< [ 26: 24](RO) Reserved. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t rxstat_ln : 6; /**< [ 21: 16](R/W) Reserved. */
+ uint32_t lpbk_rxvalid : 16; /**< [ 15: 0](R/W) Loopback rxvalid (lane enable - 1 bit per lane). */
+#else /* Word 0 - Little Endian */
+ uint32_t lpbk_rxvalid : 16; /**< [ 15: 0](R/W) Loopback rxvalid (lane enable - 1 bit per lane). */
+ uint32_t rxstat_ln : 6; /**< [ 21: 16](R/W) Reserved. */
+ uint32_t reserved_22_23 : 2;
+ uint32_t rx_stat : 3; /**< [ 26: 24](RO) Reserved. */
+ uint32_t reserved_27_30 : 4;
+ uint32_t ple : 1; /**< [ 31: 31](R/W) Pipe loopback enable. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_gen3_pipe_lb_s cn; */
+};
+typedef union bdk_pciercx_gen3_pipe_lb bdk_pciercx_gen3_pipe_lb_t;
+
+static inline uint64_t BDK_PCIERCX_GEN3_PIPE_LB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_GEN3_PIPE_LB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8b8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_GEN3_PIPE_LB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_GEN3_PIPE_LB(a) bdk_pciercx_gen3_pipe_lb_t
+#define bustype_BDK_PCIERCX_GEN3_PIPE_LB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_GEN3_PIPE_LB(a) "PCIERCX_GEN3_PIPE_LB"
+#define busnum_BDK_PCIERCX_GEN3_PIPE_LB(a) (a)
+#define arguments_BDK_PCIERCX_GEN3_PIPE_LB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_gen4_lane_margining_1
+ *
+ * PCIe RC Gen4 Lane Marginging Register 1
+ */
+union bdk_pciercx_gen4_lane_margining_1
+{
+ uint32_t u;
+ struct bdk_pciercx_gen4_lane_margining_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t mvo : 6; /**< [ 29: 24](R/W) Max voltage offset for lane margining at the receiver. */
+ uint32_t reserved_23 : 1;
+ uint32_t nvs : 7; /**< [ 22: 16](R/W) Num voltage steps for lane margining at the receiver. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t mto : 6; /**< [ 13: 8](R/W) Max timing offset for lane margining at the receiver. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t nts : 6; /**< [ 5: 0](R/W) Num timing steps for lane margining at the receiver. */
+#else /* Word 0 - Little Endian */
+ uint32_t nts : 6; /**< [ 5: 0](R/W) Num timing steps for lane margining at the receiver. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t mto : 6; /**< [ 13: 8](R/W) Max timing offset for lane margining at the receiver. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t nvs : 7; /**< [ 22: 16](R/W) Num voltage steps for lane margining at the receiver. */
+ uint32_t reserved_23 : 1;
+ uint32_t mvo : 6; /**< [ 29: 24](R/W) Max voltage offset for lane margining at the receiver. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_gen4_lane_margining_1_s cn; */
+};
+typedef union bdk_pciercx_gen4_lane_margining_1 bdk_pciercx_gen4_lane_margining_1_t;
+
+static inline uint64_t BDK_PCIERCX_GEN4_LANE_MARGINING_1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_GEN4_LANE_MARGINING_1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xb80ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_GEN4_LANE_MARGINING_1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_GEN4_LANE_MARGINING_1(a) bdk_pciercx_gen4_lane_margining_1_t
+#define bustype_BDK_PCIERCX_GEN4_LANE_MARGINING_1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_GEN4_LANE_MARGINING_1(a) "PCIERCX_GEN4_LANE_MARGINING_1"
+#define busnum_BDK_PCIERCX_GEN4_LANE_MARGINING_1(a) (a)
+#define arguments_BDK_PCIERCX_GEN4_LANE_MARGINING_1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_gen4_lane_margining_2
+ *
+ * PCIe RC Gen4 Lane Margining Register 2
+ */
+union bdk_pciercx_gen4_lane_margining_2
+{
+ uint32_t u;
+ struct bdk_pciercx_gen4_lane_margining_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t ies : 1; /**< [ 28: 28](R/W) Ind error sampler for lane margining at the receiver. */
+ uint32_t srm : 1; /**< [ 27: 27](R/W) Sample reporting method for lane margining at the receiver. */
+ uint32_t ilrt : 1; /**< [ 26: 26](R/W) Ind left right timing for lane margining at the receiver. */
+ uint32_t iudv : 1; /**< [ 25: 25](R/W) Ind up down voltage for lane margining at the receiver. */
+ uint32_t volt_sup : 1; /**< [ 24: 24](R/W) Voltage supported for lane margining at the receiver. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t max_lanes : 5; /**< [ 20: 16](R/W) Max lanes for lane margining at the receiver. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t srt : 6; /**< [ 13: 8](R/W) Sample rate timing for lane margining at the receiver. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t srv : 6; /**< [ 5: 0](R/W) Sample rate voltage for lane margining at the receiver. */
+#else /* Word 0 - Little Endian */
+ uint32_t srv : 6; /**< [ 5: 0](R/W) Sample rate voltage for lane margining at the receiver. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t srt : 6; /**< [ 13: 8](R/W) Sample rate timing for lane margining at the receiver. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t max_lanes : 5; /**< [ 20: 16](R/W) Max lanes for lane margining at the receiver. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t volt_sup : 1; /**< [ 24: 24](R/W) Voltage supported for lane margining at the receiver. */
+ uint32_t iudv : 1; /**< [ 25: 25](R/W) Ind up down voltage for lane margining at the receiver. */
+ uint32_t ilrt : 1; /**< [ 26: 26](R/W) Ind left right timing for lane margining at the receiver. */
+ uint32_t srm : 1; /**< [ 27: 27](R/W) Sample reporting method for lane margining at the receiver. */
+ uint32_t ies : 1; /**< [ 28: 28](R/W) Ind error sampler for lane margining at the receiver. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_gen4_lane_margining_2_s cn; */
+};
+typedef union bdk_pciercx_gen4_lane_margining_2 bdk_pciercx_gen4_lane_margining_2_t;
+
+static inline uint64_t BDK_PCIERCX_GEN4_LANE_MARGINING_2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_GEN4_LANE_MARGINING_2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xb84ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_GEN4_LANE_MARGINING_2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_GEN4_LANE_MARGINING_2(a) bdk_pciercx_gen4_lane_margining_2_t
+#define bustype_BDK_PCIERCX_GEN4_LANE_MARGINING_2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_GEN4_LANE_MARGINING_2(a) "PCIERCX_GEN4_LANE_MARGINING_2"
+#define busnum_BDK_PCIERCX_GEN4_LANE_MARGINING_2(a) (a)
+#define arguments_BDK_PCIERCX_GEN4_LANE_MARGINING_2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_hdr_log1
+ *
+ * PCIe RC Header Log Register 1
+ * The header log registers collect the header for the TLP corresponding to a detected error.
+ */
+union bdk_pciercx_hdr_log1
+{
+ uint32_t u;
+ struct bdk_pciercx_hdr_log1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword1 : 32; /**< [ 31: 0](RO/H) Header log register (first DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword1 : 32; /**< [ 31: 0](RO/H) Header log register (first DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_hdr_log1_s cn; */
+};
+typedef union bdk_pciercx_hdr_log1 bdk_pciercx_hdr_log1_t;
+
+static inline uint64_t BDK_PCIERCX_HDR_LOG1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_HDR_LOG1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x11cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_HDR_LOG1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_HDR_LOG1(a) bdk_pciercx_hdr_log1_t
+#define bustype_BDK_PCIERCX_HDR_LOG1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_HDR_LOG1(a) "PCIERCX_HDR_LOG1"
+#define busnum_BDK_PCIERCX_HDR_LOG1(a) (a)
+#define arguments_BDK_PCIERCX_HDR_LOG1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_hdr_log2
+ *
+ * PCIe RC Header Log Register 2
+ * The header log registers collect the header for the TLP corresponding to a detected error.
+ */
+union bdk_pciercx_hdr_log2
+{
+ uint32_t u;
+ struct bdk_pciercx_hdr_log2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword2 : 32; /**< [ 31: 0](RO/H) Header log register (second DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword2 : 32; /**< [ 31: 0](RO/H) Header log register (second DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_hdr_log2_s cn; */
+};
+typedef union bdk_pciercx_hdr_log2 bdk_pciercx_hdr_log2_t;
+
+static inline uint64_t BDK_PCIERCX_HDR_LOG2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_HDR_LOG2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x120ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_HDR_LOG2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_HDR_LOG2(a) bdk_pciercx_hdr_log2_t
+#define bustype_BDK_PCIERCX_HDR_LOG2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_HDR_LOG2(a) "PCIERCX_HDR_LOG2"
+#define busnum_BDK_PCIERCX_HDR_LOG2(a) (a)
+#define arguments_BDK_PCIERCX_HDR_LOG2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_hdr_log3
+ *
+ * PCIe RC Header Log Register 3
+ * The header log registers collect the header for the TLP corresponding to a detected error.
+ */
+union bdk_pciercx_hdr_log3
+{
+ uint32_t u;
+ struct bdk_pciercx_hdr_log3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword3 : 32; /**< [ 31: 0](RO/H) Header log register (third DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword3 : 32; /**< [ 31: 0](RO/H) Header log register (third DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_hdr_log3_s cn; */
+};
+typedef union bdk_pciercx_hdr_log3 bdk_pciercx_hdr_log3_t;
+
+static inline uint64_t BDK_PCIERCX_HDR_LOG3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_HDR_LOG3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x124ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_HDR_LOG3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_HDR_LOG3(a) bdk_pciercx_hdr_log3_t
+#define bustype_BDK_PCIERCX_HDR_LOG3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_HDR_LOG3(a) "PCIERCX_HDR_LOG3"
+#define busnum_BDK_PCIERCX_HDR_LOG3(a) (a)
+#define arguments_BDK_PCIERCX_HDR_LOG3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_hdr_log4
+ *
+ * PCIe RC Header Log Register 4
+ * The header log registers collect the header for the TLP corresponding to a detected error.
+ */
+union bdk_pciercx_hdr_log4
+{
+ uint32_t u;
+ struct bdk_pciercx_hdr_log4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword4 : 32; /**< [ 31: 0](RO/H) Header log register (fourth DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword4 : 32; /**< [ 31: 0](RO/H) Header log register (fourth DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_hdr_log4_s cn; */
+};
+typedef union bdk_pciercx_hdr_log4 bdk_pciercx_hdr_log4_t;
+
+static inline uint64_t BDK_PCIERCX_HDR_LOG4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_HDR_LOG4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x128ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_HDR_LOG4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_HDR_LOG4(a) bdk_pciercx_hdr_log4_t
+#define bustype_BDK_PCIERCX_HDR_LOG4(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_HDR_LOG4(a) "PCIERCX_HDR_LOG4"
+#define busnum_BDK_PCIERCX_HDR_LOG4(a) (a)
+#define arguments_BDK_PCIERCX_HDR_LOG4(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_id
+ *
+ * PCIe RC Device ID and Vendor ID Register
+ */
+union bdk_pciercx_id
+{
+ uint32_t u;
+ struct bdk_pciercx_id_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t devid : 16; /**< [ 31: 16](RO/WRSL) Device ID for PCIERC, writable through PEM()_CFG_WR.
+ Firmware must configure this field prior to starting the link.
+ _ \<15:8\> is typically set to the appropriate chip number, from the
+ FUS_FUSE_NUM_E::CHIP_TYPE() fuses, and as enumerated by PCC_PROD_E::CNXXXX.
+ _ \<7:0\> is typically set to PCC_DEV_IDL_E::PCIERC. */
+ uint32_t vendid : 16; /**< [ 15: 0](RO/WRSL) Vendor ID, writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t vendid : 16; /**< [ 15: 0](RO/WRSL) Vendor ID, writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t devid : 16; /**< [ 31: 16](RO/WRSL) Device ID for PCIERC, writable through PEM()_CFG_WR.
+ Firmware must configure this field prior to starting the link.
+ _ \<15:8\> is typically set to the appropriate chip number, from the
+ FUS_FUSE_NUM_E::CHIP_TYPE() fuses, and as enumerated by PCC_PROD_E::CNXXXX.
+ _ \<7:0\> is typically set to PCC_DEV_IDL_E::PCIERC. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_id_s cn; */
+};
+typedef union bdk_pciercx_id bdk_pciercx_id_t;
+
+static inline uint64_t BDK_PCIERCX_ID(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ID(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ID", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ID(a) bdk_pciercx_id_t
+#define bustype_BDK_PCIERCX_ID(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ID(a) "PCIERCX_ID"
+#define busnum_BDK_PCIERCX_ID(a) (a)
+#define arguments_BDK_PCIERCX_ID(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_int
+ *
+ * PCIe RC Interrupt Line Register/Interrupt Pin/Bridge Control Register
+ */
+union bdk_pciercx_int
+{
+ uint32_t u;
+ struct bdk_pciercx_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t dtsees : 1; /**< [ 27: 27](RO) Discard timer SERR enable status. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t dts : 1; /**< [ 26: 26](RO) Discard timer status. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t sdt : 1; /**< [ 25: 25](RO) Secondary discard timer. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t pdt : 1; /**< [ 24: 24](RO) Primary discard timer. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t fbbe : 1; /**< [ 23: 23](RO) Fast back-to-back transactions enable. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t sbrst : 1; /**< [ 22: 22](R/W) Secondary bus reset. Hot reset. Causes TS1s with the hot reset bit to be sent to the link
+ partner. When set, software should wait 2 ms before clearing. The link partner normally
+ responds by sending TS1s with the hot reset bit set, which will cause a link down event.
+ Refer to 'PCIe Link-Down Reset in RC Mode' section. */
+ uint32_t mam : 1; /**< [ 21: 21](RO) Master abort mode. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t vga16d : 1; /**< [ 20: 20](RO) VGA 16-bit decode. */
+ uint32_t vgae : 1; /**< [ 19: 19](RO) VGA enable. */
+ uint32_t isae : 1; /**< [ 18: 18](R/W) ISA enable. */
+ uint32_t see : 1; /**< [ 17: 17](R/W) SERR enable. */
+ uint32_t pere : 1; /**< [ 16: 16](R/W) Parity error response enable. */
+ uint32_t inta : 8; /**< [ 15: 8](RO/WRSL) Interrupt pin. Identifies the legacy interrupt message that the device (or device
+ function) uses. The interrupt pin register is writable through PEM()_CFG_WR. */
+ uint32_t il : 8; /**< [ 7: 0](R/W) Interrupt line. */
+#else /* Word 0 - Little Endian */
+ uint32_t il : 8; /**< [ 7: 0](R/W) Interrupt line. */
+ uint32_t inta : 8; /**< [ 15: 8](RO/WRSL) Interrupt pin. Identifies the legacy interrupt message that the device (or device
+ function) uses. The interrupt pin register is writable through PEM()_CFG_WR. */
+ uint32_t pere : 1; /**< [ 16: 16](R/W) Parity error response enable. */
+ uint32_t see : 1; /**< [ 17: 17](R/W) SERR enable. */
+ uint32_t isae : 1; /**< [ 18: 18](R/W) ISA enable. */
+ uint32_t vgae : 1; /**< [ 19: 19](RO) VGA enable. */
+ uint32_t vga16d : 1; /**< [ 20: 20](RO) VGA 16-bit decode. */
+ uint32_t mam : 1; /**< [ 21: 21](RO) Master abort mode. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t sbrst : 1; /**< [ 22: 22](R/W) Secondary bus reset. Hot reset. Causes TS1s with the hot reset bit to be sent to the link
+ partner. When set, software should wait 2 ms before clearing. The link partner normally
+ responds by sending TS1s with the hot reset bit set, which will cause a link down event.
+ Refer to 'PCIe Link-Down Reset in RC Mode' section. */
+ uint32_t fbbe : 1; /**< [ 23: 23](RO) Fast back-to-back transactions enable. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t pdt : 1; /**< [ 24: 24](RO) Primary discard timer. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t sdt : 1; /**< [ 25: 25](RO) Secondary discard timer. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t dts : 1; /**< [ 26: 26](RO) Discard timer status. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t dtsees : 1; /**< [ 27: 27](RO) Discard timer SERR enable status. Not applicable to PCI Express, hardwired to zero. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_int_s cn; */
+};
+typedef union bdk_pciercx_int bdk_pciercx_int_t;
+
+static inline uint64_t BDK_PCIERCX_INT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_INT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_INT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_INT(a) bdk_pciercx_int_t
+#define bustype_BDK_PCIERCX_INT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_INT(a) "PCIERCX_INT"
+#define busnum_BDK_PCIERCX_INT(a) (a)
+#define arguments_BDK_PCIERCX_INT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_iobasel
+ *
+ * PCIe RC I/O Base and I/O Limit/Secondary Status Register
+ */
+union bdk_pciercx_iobasel
+{
+ uint32_t u;
+ struct bdk_pciercx_iobasel_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dpe : 1; /**< [ 31: 31](R/W1C/H) Detected parity error. */
+ uint32_t sse : 1; /**< [ 30: 30](R/W1C/H) Signaled system error. */
+ uint32_t rma : 1; /**< [ 29: 29](R/W1C/H) Received master abort. */
+ uint32_t rta : 1; /**< [ 28: 28](R/W1C/H) Received target abort. */
+ uint32_t sta : 1; /**< [ 27: 27](R/W1C/H) Signaled target abort. */
+ uint32_t devt : 2; /**< [ 26: 25](RO) DEVSEL timing. Not applicable for PCI Express. Hardwired to zero. */
+ uint32_t mdpe : 1; /**< [ 24: 24](R/W1C/H) Master data parity error */
+ uint32_t fbb : 1; /**< [ 23: 23](RO) Fast back-to-back capable. Not applicable for PCI Express. Hardwired to zero. */
+ uint32_t reserved_22 : 1;
+ uint32_t m66 : 1; /**< [ 21: 21](RO) 66 MHz capable. Not applicable for PCI Express. Hardwired to zero. */
+ uint32_t reserved_16_20 : 5;
+ uint32_t lio_limi : 4; /**< [ 15: 12](R/W) I/O space limit. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t io32b : 1; /**< [ 8: 8](RO/H) 32-bit I/O space.
+ This is a read-only copy of [IO32A]. */
+ uint32_t lio_base : 4; /**< [ 7: 4](R/W) I/O space base. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t io32a : 1; /**< [ 0: 0](RO/WRSL) 32-bit I/O space.
+ 0 = 16-bit I/O addressing.
+ 1 = 32-bit I/O addressing.
+ This bit is writable through PEM()_CFG_WR. When the application writes to this bit
+ through PEM()_CFG_WR, the same value is written to [IO32B]. */
+#else /* Word 0 - Little Endian */
+ uint32_t io32a : 1; /**< [ 0: 0](RO/WRSL) 32-bit I/O space.
+ 0 = 16-bit I/O addressing.
+ 1 = 32-bit I/O addressing.
+ This bit is writable through PEM()_CFG_WR. When the application writes to this bit
+ through PEM()_CFG_WR, the same value is written to [IO32B]. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t lio_base : 4; /**< [ 7: 4](R/W) I/O space base. */
+ uint32_t io32b : 1; /**< [ 8: 8](RO/H) 32-bit I/O space.
+ This is a read-only copy of [IO32A]. */
+ uint32_t reserved_9_11 : 3;
+ uint32_t lio_limi : 4; /**< [ 15: 12](R/W) I/O space limit. */
+ uint32_t reserved_16_20 : 5;
+ uint32_t m66 : 1; /**< [ 21: 21](RO) 66 MHz capable. Not applicable for PCI Express. Hardwired to zero. */
+ uint32_t reserved_22 : 1;
+ uint32_t fbb : 1; /**< [ 23: 23](RO) Fast back-to-back capable. Not applicable for PCI Express. Hardwired to zero. */
+ uint32_t mdpe : 1; /**< [ 24: 24](R/W1C/H) Master data parity error */
+ uint32_t devt : 2; /**< [ 26: 25](RO) DEVSEL timing. Not applicable for PCI Express. Hardwired to zero. */
+ uint32_t sta : 1; /**< [ 27: 27](R/W1C/H) Signaled target abort. */
+ uint32_t rta : 1; /**< [ 28: 28](R/W1C/H) Received target abort. */
+ uint32_t rma : 1; /**< [ 29: 29](R/W1C/H) Received master abort. */
+ uint32_t sse : 1; /**< [ 30: 30](R/W1C/H) Signaled system error. */
+ uint32_t dpe : 1; /**< [ 31: 31](R/W1C/H) Detected parity error. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_iobasel_s cn; */
+};
+typedef union bdk_pciercx_iobasel bdk_pciercx_iobasel_t;
+
+static inline uint64_t BDK_PCIERCX_IOBASEL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_IOBASEL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_IOBASEL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_IOBASEL(a) bdk_pciercx_iobasel_t
+#define bustype_BDK_PCIERCX_IOBASEL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_IOBASEL(a) "PCIERCX_IOBASEL"
+#define busnum_BDK_PCIERCX_IOBASEL(a) (a)
+#define arguments_BDK_PCIERCX_IOBASEL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_iobaseu
+ *
+ * PCIe RC I/O Base and Limit Upper 16 Bits Register
+ */
+union bdk_pciercx_iobaseu
+{
+ uint32_t u;
+ struct bdk_pciercx_iobaseu_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t uio_limit : 16; /**< [ 31: 16](R/W) Upper 16 bits of I/O limit (if 32-bit I/O decoding is supported for devices on the secondary side). */
+ uint32_t uio_base : 16; /**< [ 15: 0](R/W) Upper 16 bits of I/O base (if 32-bit I/O decoding is supported for devices on the secondary side). */
+#else /* Word 0 - Little Endian */
+ uint32_t uio_base : 16; /**< [ 15: 0](R/W) Upper 16 bits of I/O base (if 32-bit I/O decoding is supported for devices on the secondary side). */
+ uint32_t uio_limit : 16; /**< [ 31: 16](R/W) Upper 16 bits of I/O limit (if 32-bit I/O decoding is supported for devices on the secondary side). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_iobaseu_s cn; */
+};
+typedef union bdk_pciercx_iobaseu bdk_pciercx_iobaseu_t;
+
+static inline uint64_t BDK_PCIERCX_IOBASEU(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_IOBASEU(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x30ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_IOBASEU", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_IOBASEU(a) bdk_pciercx_iobaseu_t
+#define bustype_BDK_PCIERCX_IOBASEU(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_IOBASEU(a) "PCIERCX_IOBASEU"
+#define busnum_BDK_PCIERCX_IOBASEU(a) (a)
+#define arguments_BDK_PCIERCX_IOBASEU(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_l1_substates
+ *
+ * PCIe RC L1 Substates Timing Register
+ */
+union bdk_pciercx_l1_substates
+{
+ uint32_t u;
+ struct bdk_pciercx_l1_substates_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t l1sub_t_pclkack : 2; /**< [ 7: 6](R/W) Max delay (in 1 us units) between a MAC request to remove
+ the clock on mac_phy_pclkreq_n and a PHY response on
+ phy_mac_pclkack_n. If the PHY does not respond within this
+ time the request is aborted. */
+ uint32_t l1sub_t_l1_2 : 4; /**< [ 5: 2](R/W) Duration (in us) of L1.2. */
+ uint32_t l1sub_t_power_off : 2; /**< [ 1: 0](R/W) Duration (in us) of L1.2 entry. */
+#else /* Word 0 - Little Endian */
+ uint32_t l1sub_t_power_off : 2; /**< [ 1: 0](R/W) Duration (in us) of L1.2 entry. */
+ uint32_t l1sub_t_l1_2 : 4; /**< [ 5: 2](R/W) Duration (in us) of L1.2. */
+ uint32_t l1sub_t_pclkack : 2; /**< [ 7: 6](R/W) Max delay (in 1 us units) between a MAC request to remove
+ the clock on mac_phy_pclkreq_n and a PHY response on
+ phy_mac_pclkack_n. If the PHY does not respond within this
+ time the request is aborted. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_l1_substates_s cn; */
+};
+typedef union bdk_pciercx_l1_substates bdk_pciercx_l1_substates_t;
+
+static inline uint64_t BDK_PCIERCX_L1_SUBSTATES(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_L1_SUBSTATES(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xb44ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_L1_SUBSTATES", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_L1_SUBSTATES(a) bdk_pciercx_l1_substates_t
+#define bustype_BDK_PCIERCX_L1_SUBSTATES(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_L1_SUBSTATES(a) "PCIERCX_L1_SUBSTATES"
+#define busnum_BDK_PCIERCX_L1_SUBSTATES(a) (a)
+#define arguments_BDK_PCIERCX_L1_SUBSTATES(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_l1sub_cap
+ *
+ * PCIe RC L1 PM Substates Capability Register
+ */
+union bdk_pciercx_l1sub_cap
+{
+ uint32_t u;
+ struct bdk_pciercx_l1sub_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t pwron_val : 5; /**< [ 23: 19](RO/WRSL) Port T power on value.
+ Along with [PWRON_SCALE] sets the time (in us) that this
+ Port requires the port on the opposite side of the Link to
+ wait in L.1.2.Exit after sampling PCI_CLKREQ_L asserted before
+ actively driving the interface. */
+ uint32_t reserved_18 : 1;
+ uint32_t pwron_scale : 2; /**< [ 17: 16](RO/WRSL) Port T power on scale.
+ 0x0 = 2 us.
+ 0x1 = 10 us.
+ 0x2 = 100 us.
+ 0x3 = Reserved. */
+ uint32_t com_md_supp : 8; /**< [ 15: 8](RO/WRSL) Port common mode restore time.
+ Time (in us) required for this Port to reestablish
+ common mode. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t l1_pmsub_sup : 1; /**< [ 4: 4](RO/WRSL) L1 PM substates ECN supported. */
+ uint32_t l1_1_aspm_sup : 1; /**< [ 3: 3](RO/WRSL) ASPM L11 supported. */
+ uint32_t l1_2_aspm_sup : 1; /**< [ 2: 2](RO/WRSL) ASPM L12 supported. */
+ uint32_t l1_1_pcipm_sup : 1; /**< [ 1: 1](RO/WRSL) PCI-PM L11 supported. */
+ uint32_t l1_2_pcipm_sup : 1; /**< [ 0: 0](RO/WRSL) PCI-PM L12 supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t l1_2_pcipm_sup : 1; /**< [ 0: 0](RO/WRSL) PCI-PM L12 supported. */
+ uint32_t l1_1_pcipm_sup : 1; /**< [ 1: 1](RO/WRSL) PCI-PM L11 supported. */
+ uint32_t l1_2_aspm_sup : 1; /**< [ 2: 2](RO/WRSL) ASPM L12 supported. */
+ uint32_t l1_1_aspm_sup : 1; /**< [ 3: 3](RO/WRSL) ASPM L11 supported. */
+ uint32_t l1_pmsub_sup : 1; /**< [ 4: 4](RO/WRSL) L1 PM substates ECN supported. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t com_md_supp : 8; /**< [ 15: 8](RO/WRSL) Port common mode restore time.
+ Time (in us) required for this Port to reestablish
+ common mode. */
+ uint32_t pwron_scale : 2; /**< [ 17: 16](RO/WRSL) Port T power on scale.
+ 0x0 = 2 us.
+ 0x1 = 10 us.
+ 0x2 = 100 us.
+ 0x3 = Reserved. */
+ uint32_t reserved_18 : 1;
+ uint32_t pwron_val : 5; /**< [ 23: 19](RO/WRSL) Port T power on value.
+ Along with [PWRON_SCALE] sets the time (in us) that this
+ Port requires the port on the opposite side of the Link to
+ wait in L.1.2.Exit after sampling PCI_CLKREQ_L asserted before
+ actively driving the interface. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_l1sub_cap_s cn; */
+};
+typedef union bdk_pciercx_l1sub_cap bdk_pciercx_l1sub_cap_t;
+
+static inline uint64_t BDK_PCIERCX_L1SUB_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_L1SUB_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x304ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_L1SUB_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_L1SUB_CAP(a) bdk_pciercx_l1sub_cap_t
+#define bustype_BDK_PCIERCX_L1SUB_CAP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_L1SUB_CAP(a) "PCIERCX_L1SUB_CAP"
+#define busnum_BDK_PCIERCX_L1SUB_CAP(a) (a)
+#define arguments_BDK_PCIERCX_L1SUB_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_l1sub_cap_hdr
+ *
+ * PCIe RC L1 Substates Capability Header Register
+ */
+union bdk_pciercx_l1sub_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_l1sub_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_l1sub_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_l1sub_cap_hdr bdk_pciercx_l1sub_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_L1SUB_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_L1SUB_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x300ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_L1SUB_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_L1SUB_CAP_HDR(a) bdk_pciercx_l1sub_cap_hdr_t
+#define bustype_BDK_PCIERCX_L1SUB_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_L1SUB_CAP_HDR(a) "PCIERCX_L1SUB_CAP_HDR"
+#define busnum_BDK_PCIERCX_L1SUB_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_L1SUB_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_l1sub_ctl1
+ *
+ * PCIe RC L1 Substates Control 1 Register
+ */
+union bdk_pciercx_l1sub_ctl1
+{
+ uint32_t u;
+ struct bdk_pciercx_l1sub_ctl1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t l1_2_th_sca : 3; /**< [ 31: 29](R/W) LTR L12 threshold scale.
+ 0x0 = 1 ns.
+ 0x1 = 32 ns.
+ 0x2 = 1024 ns.
+ 0x3 = 32,768 ns.
+ 0x4 = 1,048,575 ns.
+ 0x5 = 33,554,432 ns.
+ 0x6-7 = Reserved. */
+ uint32_t reserved_26_28 : 3;
+ uint32_t l1_2_th_val : 10; /**< [ 25: 16](R/W) LTR L12 threshold value.
+ Along with [L1_2_TH_SCA], this field indicates the LTR threshold
+ use to determine if entry into L1 results in L1.1 (if enabled) or
+ L1.2 (if enabled). */
+ uint32_t t_com_mode : 8; /**< [ 15: 8](RO/WRSL) Common mode restore time.
+ The value (in us), which must be used by the downstream port
+ for timing the reestablishment of common mode. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t l1_1_aspm_en : 1; /**< [ 3: 3](R/W) ASPM L11 enable. */
+ uint32_t l1_2_aspm_en : 1; /**< [ 2: 2](R/W) ASPM L12 enable. */
+ uint32_t l1_1_pcipm_en : 1; /**< [ 1: 1](R/W) PCI-PM L11 enable. */
+ uint32_t l1_2_pcipm_en : 1; /**< [ 0: 0](R/W) PCI-PM L12 enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t l1_2_pcipm_en : 1; /**< [ 0: 0](R/W) PCI-PM L12 enable. */
+ uint32_t l1_1_pcipm_en : 1; /**< [ 1: 1](R/W) PCI-PM L11 enable. */
+ uint32_t l1_2_aspm_en : 1; /**< [ 2: 2](R/W) ASPM L12 enable. */
+ uint32_t l1_1_aspm_en : 1; /**< [ 3: 3](R/W) ASPM L11 enable. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t t_com_mode : 8; /**< [ 15: 8](RO/WRSL) Common mode restore time.
+ The value (in us), which must be used by the downstream port
+ for timing the reestablishment of common mode. */
+ uint32_t l1_2_th_val : 10; /**< [ 25: 16](R/W) LTR L12 threshold value.
+ Along with [L1_2_TH_SCA], this field indicates the LTR threshold
+ use to determine if entry into L1 results in L1.1 (if enabled) or
+ L1.2 (if enabled). */
+ uint32_t reserved_26_28 : 3;
+ uint32_t l1_2_th_sca : 3; /**< [ 31: 29](R/W) LTR L12 threshold scale.
+ 0x0 = 1 ns.
+ 0x1 = 32 ns.
+ 0x2 = 1024 ns.
+ 0x3 = 32,768 ns.
+ 0x4 = 1,048,575 ns.
+ 0x5 = 33,554,432 ns.
+ 0x6-7 = Reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_l1sub_ctl1_s cn; */
+};
+typedef union bdk_pciercx_l1sub_ctl1 bdk_pciercx_l1sub_ctl1_t;
+
+static inline uint64_t BDK_PCIERCX_L1SUB_CTL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_L1SUB_CTL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x308ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_L1SUB_CTL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_L1SUB_CTL1(a) bdk_pciercx_l1sub_ctl1_t
+#define bustype_BDK_PCIERCX_L1SUB_CTL1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_L1SUB_CTL1(a) "PCIERCX_L1SUB_CTL1"
+#define busnum_BDK_PCIERCX_L1SUB_CTL1(a) (a)
+#define arguments_BDK_PCIERCX_L1SUB_CTL1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_l1sub_ctl2
+ *
+ * PCIe RC L1 Substates Control 2 Register
+ */
+union bdk_pciercx_l1sub_ctl2
+{
+ uint32_t u;
+ struct bdk_pciercx_l1sub_ctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t t_pwr_on_val : 5; /**< [ 7: 3](R/W) T power on value.
+ Along with the [T_PWR_ON_SCA], sets the minimum amount of time (in us)
+ that the Port must wait in L.1.2.Exit after sampling PCI_CLKREQ_L asserted
+ before actively driving the interface. */
+ uint32_t reserved_2 : 1;
+ uint32_t t_pwr_on_sca : 2; /**< [ 1: 0](R/W) T power on scale.
+ 0x0 = 2 us.
+ 0x1 = 10 us.
+ 0x2 = 100 us.
+ 0x3 = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t t_pwr_on_sca : 2; /**< [ 1: 0](R/W) T power on scale.
+ 0x0 = 2 us.
+ 0x1 = 10 us.
+ 0x2 = 100 us.
+ 0x3 = Reserved. */
+ uint32_t reserved_2 : 1;
+ uint32_t t_pwr_on_val : 5; /**< [ 7: 3](R/W) T power on value.
+ Along with the [T_PWR_ON_SCA], sets the minimum amount of time (in us)
+ that the Port must wait in L.1.2.Exit after sampling PCI_CLKREQ_L asserted
+ before actively driving the interface. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_l1sub_ctl2_s cn; */
+};
+typedef union bdk_pciercx_l1sub_ctl2 bdk_pciercx_l1sub_ctl2_t;
+
+static inline uint64_t BDK_PCIERCX_L1SUB_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_L1SUB_CTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x30cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_L1SUB_CTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_L1SUB_CTL2(a) bdk_pciercx_l1sub_ctl2_t
+#define bustype_BDK_PCIERCX_L1SUB_CTL2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_L1SUB_CTL2(a) "PCIERCX_L1SUB_CTL2"
+#define busnum_BDK_PCIERCX_L1SUB_CTL2(a) (a)
+#define arguments_BDK_PCIERCX_L1SUB_CTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_lane_skew
+ *
+ * PCIe RC Lane Skew Register
+ */
+union bdk_pciercx_lane_skew
+{
+ uint32_t u;
+ struct bdk_pciercx_lane_skew_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dlld : 1; /**< [ 31: 31](R/W) Disable lane-to-lane deskew. Disables the internal lane-to-lane deskew logic. */
+ uint32_t inuml : 4; /**< [ 30: 27](R/W) Implemented number of lanes (minus one). */
+ uint32_t lane_skew : 1; /**< [ 26: 26](R/W) Reserved. */
+ uint32_t ack_nak : 1; /**< [ 25: 25](R/W) ACK/NAK disable. Prevents the PCI Express bus from sending Ack and Nak DLLPs. */
+ uint32_t fcd : 1; /**< [ 24: 24](R/W) Flow control disable. Prevents the PCI Express bus from sending FC DLLPs. */
+ uint32_t ilst : 24; /**< [ 23: 0](R/W) Insert lane skew for transmit (not supported for *16). Causes skew between lanes for test
+ purposes. There are three bits per lane. The value is in units of one symbol time. For
+ example, the value 0x2 for a lane forces a skew of two symbol times for that lane. The
+ maximum skew value for any lane is five symbol times. */
+#else /* Word 0 - Little Endian */
+ uint32_t ilst : 24; /**< [ 23: 0](R/W) Insert lane skew for transmit (not supported for *16). Causes skew between lanes for test
+ purposes. There are three bits per lane. The value is in units of one symbol time. For
+ example, the value 0x2 for a lane forces a skew of two symbol times for that lane. The
+ maximum skew value for any lane is five symbol times. */
+ uint32_t fcd : 1; /**< [ 24: 24](R/W) Flow control disable. Prevents the PCI Express bus from sending FC DLLPs. */
+ uint32_t ack_nak : 1; /**< [ 25: 25](R/W) ACK/NAK disable. Prevents the PCI Express bus from sending Ack and Nak DLLPs. */
+ uint32_t lane_skew : 1; /**< [ 26: 26](R/W) Reserved. */
+ uint32_t inuml : 4; /**< [ 30: 27](R/W) Implemented number of lanes (minus one). */
+ uint32_t dlld : 1; /**< [ 31: 31](R/W) Disable lane-to-lane deskew. Disables the internal lane-to-lane deskew logic. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_lane_skew_s cn; */
+};
+typedef union bdk_pciercx_lane_skew bdk_pciercx_lane_skew_t;
+
+static inline uint64_t BDK_PCIERCX_LANE_SKEW(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_LANE_SKEW(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x714ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_LANE_SKEW", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_LANE_SKEW(a) bdk_pciercx_lane_skew_t
+#define bustype_BDK_PCIERCX_LANE_SKEW(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_LANE_SKEW(a) "PCIERCX_LANE_SKEW"
+#define busnum_BDK_PCIERCX_LANE_SKEW(a) (a)
+#define arguments_BDK_PCIERCX_LANE_SKEW(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_link_cap
+ *
+ * PCIe RC Link Capabilities Register
+ */
+union bdk_pciercx_link_cap
+{
+ uint32_t u;
+ struct bdk_pciercx_link_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pnum : 8; /**< [ 31: 24](RO/WRSL) Port number, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t reserved_23 : 1;
+ uint32_t aspm : 1; /**< [ 22: 22](RO/WRSL) ASPM optionality compliance. */
+ uint32_t lbnc : 1; /**< [ 21: 21](RO/WRSL) Link bandwidth notification capability. */
+ uint32_t dllarc : 1; /**< [ 20: 20](RO) Data link layer active reporting capable. Set to one for root complex devices and 0 for
+ endpoint devices. */
+ uint32_t sderc : 1; /**< [ 19: 19](RO/WRSL) Surprise down error reporting capable. Set to one for root complex devices and 0 for
+ endpoint devices. */
+ uint32_t cpm : 1; /**< [ 18: 18](RO) Clock power management. Set to 0 for root complex devices. */
+ uint32_t l1el : 3; /**< [ 17: 15](RO/WRSL) L1 exit latency. The default value is the value that software specifies during hardware
+ configuration, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t l0el : 3; /**< [ 14: 12](RO/WRSL) L0s exit latency. The default value is the value that software
+ specifies during hardware configuration, writable through PEM()_CFG_WR. */
+ uint32_t aslpms : 2; /**< [ 11: 10](RO/WRSL) Active state link PM support. Only L1 is supported (L0s not supported).
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t mlw : 6; /**< [ 9: 4](RO/WRSL/H) Maximum link width. Legal encodings are 0x1 (l lane), 0x2 (2 lanes), 0x4 (4 lanes),
+ 0x8 (8 lanes), and 0x10 (16 lanes). Some encodings may not be legal for all PEMs.
+ This field is writable through PEM()_CFG_WR. */
+ uint32_t mls : 4; /**< [ 3: 0](RO/WRSL) Maximum link speed.
+
+ 0x1 = 2.5 GHz supported.
+ 0x2 = 5.0 GHz and 2.5 GHz supported.
+ 0x3 = 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+ 0x4 = 16.0 GHz, 8.0 Ghz, 5.0 GHz, and 2.5 GHz supported.
+
+ This field is writable through PEM()_CFG_WR. */
+#else /* Word 0 - Little Endian */
+ uint32_t mls : 4; /**< [ 3: 0](RO/WRSL) Maximum link speed.
+
+ 0x1 = 2.5 GHz supported.
+ 0x2 = 5.0 GHz and 2.5 GHz supported.
+ 0x3 = 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+ 0x4 = 16.0 GHz, 8.0 Ghz, 5.0 GHz, and 2.5 GHz supported.
+
+ This field is writable through PEM()_CFG_WR. */
+ uint32_t mlw : 6; /**< [ 9: 4](RO/WRSL/H) Maximum link width. Legal encodings are 0x1 (l lane), 0x2 (2 lanes), 0x4 (4 lanes),
+ 0x8 (8 lanes), and 0x10 (16 lanes). Some encodings may not be legal for all PEMs.
+ This field is writable through PEM()_CFG_WR. */
+ uint32_t aslpms : 2; /**< [ 11: 10](RO/WRSL) Active state link PM support. Only L1 is supported (L0s not supported).
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t l0el : 3; /**< [ 14: 12](RO/WRSL) L0s exit latency. The default value is the value that software
+ specifies during hardware configuration, writable through PEM()_CFG_WR. */
+ uint32_t l1el : 3; /**< [ 17: 15](RO/WRSL) L1 exit latency. The default value is the value that software specifies during hardware
+ configuration, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t cpm : 1; /**< [ 18: 18](RO) Clock power management. Set to 0 for root complex devices. */
+ uint32_t sderc : 1; /**< [ 19: 19](RO/WRSL) Surprise down error reporting capable. Set to one for root complex devices and 0 for
+ endpoint devices. */
+ uint32_t dllarc : 1; /**< [ 20: 20](RO) Data link layer active reporting capable. Set to one for root complex devices and 0 for
+ endpoint devices. */
+ uint32_t lbnc : 1; /**< [ 21: 21](RO/WRSL) Link bandwidth notification capability. */
+ uint32_t aspm : 1; /**< [ 22: 22](RO/WRSL) ASPM optionality compliance. */
+ uint32_t reserved_23 : 1;
+ uint32_t pnum : 8; /**< [ 31: 24](RO/WRSL) Port number, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_link_cap_s cn; */
+};
+typedef union bdk_pciercx_link_cap bdk_pciercx_link_cap_t;
+
+static inline uint64_t BDK_PCIERCX_LINK_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_LINK_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x7cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_LINK_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_LINK_CAP(a) bdk_pciercx_link_cap_t
+#define bustype_BDK_PCIERCX_LINK_CAP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_LINK_CAP(a) "PCIERCX_LINK_CAP"
+#define busnum_BDK_PCIERCX_LINK_CAP(a) (a)
+#define arguments_BDK_PCIERCX_LINK_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_link_cap2
+ *
+ * PCIe RC Link Capabilities 2 Register
+ */
+union bdk_pciercx_link_cap2
+{
+ uint32_t u;
+ struct bdk_pciercx_link_cap2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t trtds : 1; /**< [ 24: 24](RO/WRSL) Two retimers presence detect supported. */
+ uint32_t rtds : 1; /**< [ 23: 23](RO/WRSL) Retimer presence detect supported. */
+ uint32_t reserved_9_22 : 14;
+ uint32_t cls : 1; /**< [ 8: 8](RO) Crosslink supported. */
+ uint32_t slsv : 7; /**< [ 7: 1](RO/WRSL) Supported link speeds vector. Indicates the supported link speeds of the associated port.
+ For each bit, a value of 1 b indicates that the corresponding link speed is supported;
+ otherwise, the link speed is not supported. Bit definitions are:
+
+ _ Bit \<1\> = 2.5 GT/s.
+ _ Bit \<2\> = 5.0 GT/s.
+ _ Bit \<3\> = 8.0 GT/s.
+ _ Bit \<4\> = 16.0 GT/s
+
+ _ Bits \<7:5\> are reserved. */
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t slsv : 7; /**< [ 7: 1](RO/WRSL) Supported link speeds vector. Indicates the supported link speeds of the associated port.
+ For each bit, a value of 1 b indicates that the corresponding link speed is supported;
+ otherwise, the link speed is not supported. Bit definitions are:
+
+ _ Bit \<1\> = 2.5 GT/s.
+ _ Bit \<2\> = 5.0 GT/s.
+ _ Bit \<3\> = 8.0 GT/s.
+ _ Bit \<4\> = 16.0 GT/s
+
+ _ Bits \<7:5\> are reserved. */
+ uint32_t cls : 1; /**< [ 8: 8](RO) Crosslink supported. */
+ uint32_t reserved_9_22 : 14;
+ uint32_t rtds : 1; /**< [ 23: 23](RO/WRSL) Retimer presence detect supported. */
+ uint32_t trtds : 1; /**< [ 24: 24](RO/WRSL) Two retimers presence detect supported. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_link_cap2_s cn; */
+};
+typedef union bdk_pciercx_link_cap2 bdk_pciercx_link_cap2_t;
+
+static inline uint64_t BDK_PCIERCX_LINK_CAP2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_LINK_CAP2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x9cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_LINK_CAP2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_LINK_CAP2(a) bdk_pciercx_link_cap2_t
+#define bustype_BDK_PCIERCX_LINK_CAP2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_LINK_CAP2(a) "PCIERCX_LINK_CAP2"
+#define busnum_BDK_PCIERCX_LINK_CAP2(a) (a)
+#define arguments_BDK_PCIERCX_LINK_CAP2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_link_ctl
+ *
+ * PCIe RC Link Control/Link Status Register
+ */
+union bdk_pciercx_link_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_link_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lab : 1; /**< [ 31: 31](R/W1C/H) Link autonomous bandwidth status. This bit is set to indicate that hardware has
+ autonomously changed link speed or width, without the port transitioning through DL_Down
+ status, for reasons other than to attempt to correct unreliable link operation. */
+ uint32_t lbm : 1; /**< [ 30: 30](R/W1C/H) Link bandwidth management status. This bit is set to indicate either of the following has
+ occurred without the port transitioning through DL_Down status:
+
+ * A link retraining has completed following a write of 1b to the retrain link bit.
+
+ * Hardware has changed the Link speed or width to attempt to correct unreliable link
+ operation, either through a LTSSM timeout of higher level process. This bit must be set if
+ the physical layer reports a speed or width change was initiated by the downstream
+ component that was not indicated as an autonomous change. */
+ uint32_t dlla : 1; /**< [ 29: 29](RO/H) Data link layer active. */
+ uint32_t scc : 1; /**< [ 28: 28](RO/WRSL) Slot clock configuration. Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector. The default value is the value
+ selected during hardware configuration, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t lt : 1; /**< [ 27: 27](RO/H) Link training. */
+ uint32_t reserved_26 : 1;
+ uint32_t nlw : 6; /**< [ 25: 20](RO/H) Negotiated link width. Set automatically by hardware after link initialization. Value is
+ undefined when link is not up. */
+ uint32_t ls : 4; /**< [ 19: 16](RO/H) Current link speed. The encoded value specifies a bit location in the supported link
+ speeds vector (in the link capabilities 2 register) that corresponds to the current link
+ speed.
+ 0x1 = Supported link speeds vector field bit 0.
+ 0x2 = Supported link speeds vector field bit 1.
+ 0x3 = Supported link speeds vector field bit 2.
+ 0x4 = Supported link speeds vector field bit 3. */
+ uint32_t drs_ctl : 2; /**< [ 15: 14](RO) DRS signaling control. */
+ uint32_t reserved_12_13 : 2;
+ uint32_t lab_int_enb : 1; /**< [ 11: 11](R/W) Link autonomous bandwidth interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link autonomous bandwidth status bit has been set. */
+ uint32_t lbm_int_enb : 1; /**< [ 10: 10](R/W) Link bandwidth management interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link bandwidth management status bit has been set. */
+ uint32_t hawd : 1; /**< [ 9: 9](R/W) Hardware autonomous width disable. */
+ uint32_t ecpm : 1; /**< [ 8: 8](R/W/H) Enable clock power management. Hardwired to 0 if clock power management is disabled in the
+ link capabilities register. */
+ uint32_t es : 1; /**< [ 7: 7](R/W) Extended synch. */
+ uint32_t ccc : 1; /**< [ 6: 6](R/W) Common clock configuration. */
+ uint32_t rl : 1; /**< [ 5: 5](R/W/H) Retrain link.
+ As per the PCIe specification this bit always reads as zero. */
+ uint32_t ld : 1; /**< [ 4: 4](R/W) Link disable. */
+ uint32_t rcb : 1; /**< [ 3: 3](RO/WRSL) Read completion boundary (RCB), writable through PEM()_CFG_WR.
+ However, the application must not change this field because an RCB of 64
+ bytes is not supported. */
+ uint32_t reserved_2 : 1;
+ uint32_t aslpc : 2; /**< [ 1: 0](R/W) Active state link PM control. */
+#else /* Word 0 - Little Endian */
+ uint32_t aslpc : 2; /**< [ 1: 0](R/W) Active state link PM control. */
+ uint32_t reserved_2 : 1;
+ uint32_t rcb : 1; /**< [ 3: 3](RO/WRSL) Read completion boundary (RCB), writable through PEM()_CFG_WR.
+ However, the application must not change this field because an RCB of 64
+ bytes is not supported. */
+ uint32_t ld : 1; /**< [ 4: 4](R/W) Link disable. */
+ uint32_t rl : 1; /**< [ 5: 5](R/W/H) Retrain link.
+ As per the PCIe specification this bit always reads as zero. */
+ uint32_t ccc : 1; /**< [ 6: 6](R/W) Common clock configuration. */
+ uint32_t es : 1; /**< [ 7: 7](R/W) Extended synch. */
+ uint32_t ecpm : 1; /**< [ 8: 8](R/W/H) Enable clock power management. Hardwired to 0 if clock power management is disabled in the
+ link capabilities register. */
+ uint32_t hawd : 1; /**< [ 9: 9](R/W) Hardware autonomous width disable. */
+ uint32_t lbm_int_enb : 1; /**< [ 10: 10](R/W) Link bandwidth management interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link bandwidth management status bit has been set. */
+ uint32_t lab_int_enb : 1; /**< [ 11: 11](R/W) Link autonomous bandwidth interrupt enable. When set, enables the generation of an
+ interrupt to indicate that the link autonomous bandwidth status bit has been set. */
+ uint32_t reserved_12_13 : 2;
+ uint32_t drs_ctl : 2; /**< [ 15: 14](RO) DRS signaling control. */
+ uint32_t ls : 4; /**< [ 19: 16](RO/H) Current link speed. The encoded value specifies a bit location in the supported link
+ speeds vector (in the link capabilities 2 register) that corresponds to the current link
+ speed.
+ 0x1 = Supported link speeds vector field bit 0.
+ 0x2 = Supported link speeds vector field bit 1.
+ 0x3 = Supported link speeds vector field bit 2.
+ 0x4 = Supported link speeds vector field bit 3. */
+ uint32_t nlw : 6; /**< [ 25: 20](RO/H) Negotiated link width. Set automatically by hardware after link initialization. Value is
+ undefined when link is not up. */
+ uint32_t reserved_26 : 1;
+ uint32_t lt : 1; /**< [ 27: 27](RO/H) Link training. */
+ uint32_t scc : 1; /**< [ 28: 28](RO/WRSL) Slot clock configuration. Indicates that the component uses the same physical reference
+ clock that the platform provides on the connector. The default value is the value
+ selected during hardware configuration, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t dlla : 1; /**< [ 29: 29](RO/H) Data link layer active. */
+ uint32_t lbm : 1; /**< [ 30: 30](R/W1C/H) Link bandwidth management status. This bit is set to indicate either of the following has
+ occurred without the port transitioning through DL_Down status:
+
+ * A link retraining has completed following a write of 1b to the retrain link bit.
+
+ * Hardware has changed the Link speed or width to attempt to correct unreliable link
+ operation, either through a LTSSM timeout of higher level process. This bit must be set if
+ the physical layer reports a speed or width change was initiated by the downstream
+ component that was not indicated as an autonomous change. */
+ uint32_t lab : 1; /**< [ 31: 31](R/W1C/H) Link autonomous bandwidth status. This bit is set to indicate that hardware has
+ autonomously changed link speed or width, without the port transitioning through DL_Down
+ status, for reasons other than to attempt to correct unreliable link operation. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_link_ctl_s cn; */
+};
+typedef union bdk_pciercx_link_ctl bdk_pciercx_link_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_LINK_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_LINK_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x80ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_LINK_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_LINK_CTL(a) bdk_pciercx_link_ctl_t
+#define bustype_BDK_PCIERCX_LINK_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_LINK_CTL(a) "PCIERCX_LINK_CTL"
+#define busnum_BDK_PCIERCX_LINK_CTL(a) (a)
+#define arguments_BDK_PCIERCX_LINK_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_link_ctl2
+ *
+ * PCIe RC Link Control 2 Register/Link Status 2 Register
+ */
+union bdk_pciercx_link_ctl2
+{
+ uint32_t u;
+ struct bdk_pciercx_link_ctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t drs_mr : 1; /**< [ 31: 31](R/W1C) DRS message received. */
+ uint32_t dcp : 3; /**< [ 30: 28](RO) Downstream component presence. */
+ uint32_t reserved_26_27 : 2;
+ uint32_t crossl : 2; /**< [ 25: 24](RO) Crosslink resolution (not supported). */
+ uint32_t trtd : 1; /**< [ 23: 23](RO) Two retimers presence detected. */
+ uint32_t rtd : 1; /**< [ 22: 22](RO) Retimer presence detected. */
+ uint32_t ler : 1; /**< [ 21: 21](R/W1C/H) Link equalization request 8.0 GT/s. */
+ uint32_t ep3s : 1; /**< [ 20: 20](RO/H) Equalization 8.0 GT/s phase 3 successful. */
+ uint32_t ep2s : 1; /**< [ 19: 19](RO/H) Equalization 8.0 GT/s phase 2 successful. */
+ uint32_t ep1s : 1; /**< [ 18: 18](RO/H) Equalization 8.0 GT/s phase 1 successful. */
+ uint32_t eqc : 1; /**< [ 17: 17](RO/H) Equalization 8.0 GT/s complete. */
+ uint32_t cdl : 1; /**< [ 16: 16](RO/H) Current deemphasis level. When the link is operating at 5 GT/s speed, this bit reflects
+ the level of deemphasis.
+ 0 = -6 dB.
+ 1 = -3.5 dB.
+
+ The value in this bit is undefined when the link is operating at 2.5 GT/s speed. */
+ uint32_t cde : 4; /**< [ 15: 12](R/W) Compliance deemphasis. This bit sets the deemphasis level in Polling.Compliance state if
+ the entry occurred due to the TX compliance receive bit being one.
+ 0x0 = -6 dB.
+ 0x1 = -3.5 dB.
+
+ When the link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t csos : 1; /**< [ 11: 11](R/W) Compliance SOS. When set to one, the LTSSM is required to send SKP ordered sets periodically
+ in between the (modified) compliance patterns.
+
+ When the link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t emc : 1; /**< [ 10: 10](R/W) Enter modified compliance. When this bit is set to one, the device transmits a modified
+ compliance pattern if the LTSSM enters Polling.Compliance state. */
+ uint32_t tm : 3; /**< [ 9: 7](R/W/H) Transmit margin. This field controls the value of the non-deemphasized voltage level at
+ the transmitter pins:
+ 0x0 = 800-1200 mV for full swing 400-600 mV for half-swing.
+ 0x1-0x2 = Values must be monotonic with a nonzero slope.
+ 0x3 = 200-400 mV for full-swing and 100-200 mV for half-swing.
+ 0x4-0x7 = Reserved.
+
+ This field is reset to 0x0 on entry to the LTSSM Polling.Compliance substate. When
+ operating in 5.0 GT/s mode with full swing, the deemphasis ratio must be maintained within
+ +/- 1 dB from the specification-defined operational value either -3.5 or -6 dB. */
+ uint32_t sde : 1; /**< [ 6: 6](RO/WRSL) Selectable deemphasis. When the link is operating at 5.0 GT/s speed, selects the level of
+ deemphasis on the downstream device. Must be set prior to link training.
+ 0 = -6 dB.
+ 1 = -3.5 dB.
+
+ When the link is operating at 2.5 GT/s speed, the setting of this bit has no effect.
+
+ PCIERC_GEN2_PORT[S_D_E] can be used to change the deemphasis on the upstream ports. */
+ uint32_t hasd : 1; /**< [ 5: 5](R/W) Hardware autonomous speed disable. When asserted, the application must disable hardware
+ from changing the link speed for device-specific reasons other than attempting to correct
+ unreliable link operation by reducing link speed. Initial transition to the highest
+ supported common link speed is not blocked by this signal. */
+ uint32_t ec : 1; /**< [ 4: 4](R/W) Enter compliance. Software is permitted to force a link to enter compliance mode at the
+ speed indicated in the target link speed field by setting this bit to one in both components
+ on a link and then initiating a hot reset on the link. */
+ uint32_t tls : 4; /**< [ 3: 0](R/W) Target link speed. For downstream ports, this field sets an upper limit on link
+ operational speed by restricting the values advertised by the upstream component in its
+ training sequences:
+
+ 0x1 = 2.5 Gb/s target link speed.
+ 0x2 = 5 Gb/s target link speed.
+ 0x3 = 8 Gb/s target link speed.
+ 0x4 = 16 Gb/s target link speed.
+
+ All other encodings are reserved.
+
+ If a value is written to this field that does not correspond to a speed included in the
+ supported link speeds field, the result is undefined. For both upstream and downstream
+ ports, this field is used to set the target compliance mode speed when software is using
+ the enter compliance bit to force a link into compliance mode.
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode). */
+#else /* Word 0 - Little Endian */
+ uint32_t tls : 4; /**< [ 3: 0](R/W) Target link speed. For downstream ports, this field sets an upper limit on link
+ operational speed by restricting the values advertised by the upstream component in its
+ training sequences:
+
+ 0x1 = 2.5 Gb/s target link speed.
+ 0x2 = 5 Gb/s target link speed.
+ 0x3 = 8 Gb/s target link speed.
+ 0x4 = 16 Gb/s target link speed.
+
+ All other encodings are reserved.
+
+ If a value is written to this field that does not correspond to a speed included in the
+ supported link speeds field, the result is undefined. For both upstream and downstream
+ ports, this field is used to set the target compliance mode speed when software is using
+ the enter compliance bit to force a link into compliance mode.
+
+ _ MD is 0x0, reset to 0x1: 2.5 GHz supported.
+
+ _ MD is 0x1, reset to 0x2: 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x2, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported.
+
+ _ MD is 0x3, reset to 0x3: 8.0 GHz, 5.0 GHz and 2.5 GHz supported (RC Mode). */
+ uint32_t ec : 1; /**< [ 4: 4](R/W) Enter compliance. Software is permitted to force a link to enter compliance mode at the
+ speed indicated in the target link speed field by setting this bit to one in both components
+ on a link and then initiating a hot reset on the link. */
+ uint32_t hasd : 1; /**< [ 5: 5](R/W) Hardware autonomous speed disable. When asserted, the application must disable hardware
+ from changing the link speed for device-specific reasons other than attempting to correct
+ unreliable link operation by reducing link speed. Initial transition to the highest
+ supported common link speed is not blocked by this signal. */
+ uint32_t sde : 1; /**< [ 6: 6](RO/WRSL) Selectable deemphasis. When the link is operating at 5.0 GT/s speed, selects the level of
+ deemphasis on the downstream device. Must be set prior to link training.
+ 0 = -6 dB.
+ 1 = -3.5 dB.
+
+ When the link is operating at 2.5 GT/s speed, the setting of this bit has no effect.
+
+ PCIERC_GEN2_PORT[S_D_E] can be used to change the deemphasis on the upstream ports. */
+ uint32_t tm : 3; /**< [ 9: 7](R/W/H) Transmit margin. This field controls the value of the non-deemphasized voltage level at
+ the transmitter pins:
+ 0x0 = 800-1200 mV for full swing 400-600 mV for half-swing.
+ 0x1-0x2 = Values must be monotonic with a nonzero slope.
+ 0x3 = 200-400 mV for full-swing and 100-200 mV for half-swing.
+ 0x4-0x7 = Reserved.
+
+ This field is reset to 0x0 on entry to the LTSSM Polling.Compliance substate. When
+ operating in 5.0 GT/s mode with full swing, the deemphasis ratio must be maintained within
+ +/- 1 dB from the specification-defined operational value either -3.5 or -6 dB. */
+ uint32_t emc : 1; /**< [ 10: 10](R/W) Enter modified compliance. When this bit is set to one, the device transmits a modified
+ compliance pattern if the LTSSM enters Polling.Compliance state. */
+ uint32_t csos : 1; /**< [ 11: 11](R/W) Compliance SOS. When set to one, the LTSSM is required to send SKP ordered sets periodically
+ in between the (modified) compliance patterns.
+
+ When the link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t cde : 4; /**< [ 15: 12](R/W) Compliance deemphasis. This bit sets the deemphasis level in Polling.Compliance state if
+ the entry occurred due to the TX compliance receive bit being one.
+ 0x0 = -6 dB.
+ 0x1 = -3.5 dB.
+
+ When the link is operating at 2.5 GT/s, the setting of this bit has no effect. */
+ uint32_t cdl : 1; /**< [ 16: 16](RO/H) Current deemphasis level. When the link is operating at 5 GT/s speed, this bit reflects
+ the level of deemphasis.
+ 0 = -6 dB.
+ 1 = -3.5 dB.
+
+ The value in this bit is undefined when the link is operating at 2.5 GT/s speed. */
+ uint32_t eqc : 1; /**< [ 17: 17](RO/H) Equalization 8.0 GT/s complete. */
+ uint32_t ep1s : 1; /**< [ 18: 18](RO/H) Equalization 8.0 GT/s phase 1 successful. */
+ uint32_t ep2s : 1; /**< [ 19: 19](RO/H) Equalization 8.0 GT/s phase 2 successful. */
+ uint32_t ep3s : 1; /**< [ 20: 20](RO/H) Equalization 8.0 GT/s phase 3 successful. */
+ uint32_t ler : 1; /**< [ 21: 21](R/W1C/H) Link equalization request 8.0 GT/s. */
+ uint32_t rtd : 1; /**< [ 22: 22](RO) Retimer presence detected. */
+ uint32_t trtd : 1; /**< [ 23: 23](RO) Two retimers presence detected. */
+ uint32_t crossl : 2; /**< [ 25: 24](RO) Crosslink resolution (not supported). */
+ uint32_t reserved_26_27 : 2;
+ uint32_t dcp : 3; /**< [ 30: 28](RO) Downstream component presence. */
+ uint32_t drs_mr : 1; /**< [ 31: 31](R/W1C) DRS message received. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_link_ctl2_s cn; */
+};
+typedef union bdk_pciercx_link_ctl2 bdk_pciercx_link_ctl2_t;
+
+static inline uint64_t BDK_PCIERCX_LINK_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_LINK_CTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xa0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_LINK_CTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_LINK_CTL2(a) bdk_pciercx_link_ctl2_t
+#define bustype_BDK_PCIERCX_LINK_CTL2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_LINK_CTL2(a) "PCIERCX_LINK_CTL2"
+#define busnum_BDK_PCIERCX_LINK_CTL2(a) (a)
+#define arguments_BDK_PCIERCX_LINK_CTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_link_ctl3
+ *
+ * PCIe RC Link Control 3 Register
+ */
+union bdk_pciercx_link_ctl3
+{
+ uint32_t u;
+ struct bdk_pciercx_link_ctl3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t ler : 1; /**< [ 1: 1](RO/WRSL) Link equalization request interrupt enable. */
+ uint32_t pe : 1; /**< [ 0: 0](RO/WRSL) Perform equalization. */
+#else /* Word 0 - Little Endian */
+ uint32_t pe : 1; /**< [ 0: 0](RO/WRSL) Perform equalization. */
+ uint32_t ler : 1; /**< [ 1: 1](RO/WRSL) Link equalization request interrupt enable. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_link_ctl3_s cn; */
+};
+typedef union bdk_pciercx_link_ctl3 bdk_pciercx_link_ctl3_t;
+
+static inline uint64_t BDK_PCIERCX_LINK_CTL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_LINK_CTL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x17cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_LINK_CTL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_LINK_CTL3(a) bdk_pciercx_link_ctl3_t
+#define bustype_BDK_PCIERCX_LINK_CTL3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_LINK_CTL3(a) "PCIERCX_LINK_CTL3"
+#define busnum_BDK_PCIERCX_LINK_CTL3(a) (a)
+#define arguments_BDK_PCIERCX_LINK_CTL3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_link_err_status
+ *
+ * Lane Error Status Register
+ */
+union bdk_pciercx_link_err_status
+{
+ uint32_t u;
+ struct bdk_pciercx_link_err_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t les : 16; /**< [ 15: 0](R/W1C) Lane error status bits. */
+#else /* Word 0 - Little Endian */
+ uint32_t les : 16; /**< [ 15: 0](R/W1C) Lane error status bits. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_link_err_status_s cn; */
+};
+typedef union bdk_pciercx_link_err_status bdk_pciercx_link_err_status_t;
+
+static inline uint64_t BDK_PCIERCX_LINK_ERR_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_LINK_ERR_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x180ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_LINK_ERR_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_LINK_ERR_STATUS(a) bdk_pciercx_link_err_status_t
+#define bustype_BDK_PCIERCX_LINK_ERR_STATUS(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_LINK_ERR_STATUS(a) "PCIERCX_LINK_ERR_STATUS"
+#define busnum_BDK_PCIERCX_LINK_ERR_STATUS(a) (a)
+#define arguments_BDK_PCIERCX_LINK_ERR_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_margin_ext_cap_hdr
+ *
+ * PCIe RC Margining Extended Capability Header Register
+ */
+union bdk_pciercx_margin_ext_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_margin_ext_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCIE Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCIE Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_margin_ext_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_margin_ext_cap_hdr bdk_pciercx_margin_ext_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_MARGIN_EXT_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MARGIN_EXT_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1d8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MARGIN_EXT_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MARGIN_EXT_CAP_HDR(a) bdk_pciercx_margin_ext_cap_hdr_t
+#define bustype_BDK_PCIERCX_MARGIN_EXT_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MARGIN_EXT_CAP_HDR(a) "PCIERCX_MARGIN_EXT_CAP_HDR"
+#define busnum_BDK_PCIERCX_MARGIN_EXT_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_MARGIN_EXT_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mem
+ *
+ * PCIe RC Memory Base and Memory Limit Register
+ */
+union bdk_pciercx_mem
+{
+ uint32_t u;
+ struct bdk_pciercx_mem_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ml_addr : 12; /**< [ 31: 20](R/W) Memory limit address. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t mb_addr : 12; /**< [ 15: 4](R/W) Memory base address. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t mb_addr : 12; /**< [ 15: 4](R/W) Memory base address. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t ml_addr : 12; /**< [ 31: 20](R/W) Memory limit address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mem_s cn; */
+};
+typedef union bdk_pciercx_mem bdk_pciercx_mem_t;
+
+static inline uint64_t BDK_PCIERCX_MEM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MEM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x20ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MEM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MEM(a) bdk_pciercx_mem_t
+#define bustype_BDK_PCIERCX_MEM(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MEM(a) "PCIERCX_MEM"
+#define busnum_BDK_PCIERCX_MEM(a) (a)
+#define arguments_BDK_PCIERCX_MEM(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_misc_ctl1
+ *
+ * PCIe RC Miscellaneous Control 1 Register
+ */
+union bdk_pciercx_misc_ctl1
+{
+ uint32_t u;
+ struct bdk_pciercx_misc_ctl1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_6_31 : 26;
+ uint32_t ari_devn : 1; /**< [ 5: 5](R/W) When ARI is enabled, enables use of the device ID. */
+ uint32_t dis_auto_ltr_clr : 1; /**< [ 4: 4](R/W) Disable the autonomous generation of LTR clear message in upstream port.
+ 0 = Allow the autonomous generation of LTR clear message.
+ 1 = Disable the autonomous generation of LTR clear message. */
+ uint32_t simp_replay_timer : 1; /**< [ 3: 3](R/W) Enables Simplified Replay Timer (Gen4). Simplified replay timer values are:
+
+ A value from 24,000 to 31,000 symbol times when extended synch is 0.
+ A value from 80,000 to 100,000 symbol times when extended synch is 1. */
+ uint32_t ur_c4_mask_4_trgt1 : 1; /**< [ 2: 2](R/W) This field only applies to request TLPs (with UR filtering status) that are
+ chosen to forward to the application (when [DEF_TARGET] is set).
+
+ When set, the core suppresses error logging, error message generation, and CPL
+ generation (for non-posted requests). */
+ uint32_t def_target : 1; /**< [ 1: 1](R/W) Default target a received IO or MEM request with UR/CA/CRS
+ is sent to be the controller.
+ 0x0 = The controller drops all incoming I/O or Mem (after
+ corresponding error reporting). A completion with
+ UR status will be generated for non-posted requests.
+ 0x1 = The controller forwards all incoming I/O or MEM
+ requests with UR/CA/CRS status to your application. */
+ uint32_t dbi_ro_wr_en : 1; /**< [ 0: 0](R/W) Write to RO registers using DBI. When you set this bit, then some
+ RO bits are writable from the DBI. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbi_ro_wr_en : 1; /**< [ 0: 0](R/W) Write to RO registers using DBI. When you set this bit, then some
+ RO bits are writable from the DBI. */
+ uint32_t def_target : 1; /**< [ 1: 1](R/W) Default target a received IO or MEM request with UR/CA/CRS
+ is sent to be the controller.
+ 0x0 = The controller drops all incoming I/O or Mem (after
+ corresponding error reporting). A completion with
+ UR status will be generated for non-posted requests.
+ 0x1 = The controller forwards all incoming I/O or MEM
+ requests with UR/CA/CRS status to your application. */
+ uint32_t ur_c4_mask_4_trgt1 : 1; /**< [ 2: 2](R/W) This field only applies to request TLPs (with UR filtering status) that are
+ chosen to forward to the application (when [DEF_TARGET] is set).
+
+ When set, the core suppresses error logging, error message generation, and CPL
+ generation (for non-posted requests). */
+ uint32_t simp_replay_timer : 1; /**< [ 3: 3](R/W) Enables Simplified Replay Timer (Gen4). Simplified replay timer values are:
+
+ A value from 24,000 to 31,000 symbol times when extended synch is 0.
+ A value from 80,000 to 100,000 symbol times when extended synch is 1. */
+ uint32_t dis_auto_ltr_clr : 1; /**< [ 4: 4](R/W) Disable the autonomous generation of LTR clear message in upstream port.
+ 0 = Allow the autonomous generation of LTR clear message.
+ 1 = Disable the autonomous generation of LTR clear message. */
+ uint32_t ari_devn : 1; /**< [ 5: 5](R/W) When ARI is enabled, enables use of the device ID. */
+ uint32_t reserved_6_31 : 26;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_misc_ctl1_s cn; */
+};
+typedef union bdk_pciercx_misc_ctl1 bdk_pciercx_misc_ctl1_t;
+
+static inline uint64_t BDK_PCIERCX_MISC_CTL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MISC_CTL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8bcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MISC_CTL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MISC_CTL1(a) bdk_pciercx_misc_ctl1_t
+#define bustype_BDK_PCIERCX_MISC_CTL1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MISC_CTL1(a) "PCIERCX_MISC_CTL1"
+#define busnum_BDK_PCIERCX_MISC_CTL1(a) (a)
+#define arguments_BDK_PCIERCX_MISC_CTL1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat0
+ *
+ * PCIe RC Margining Lane Control and Status Register 0
+ */
+union bdk_pciercx_mrg_lane_ctl_stat0
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat0_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat0 bdk_pciercx_mrg_lane_ctl_stat0_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1e0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT0(a) bdk_pciercx_mrg_lane_ctl_stat0_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT0(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT0(a) "PCIERCX_MRG_LANE_CTL_STAT0"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT0(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT0(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat1
+ *
+ * PCIe RC Margining Lane Control and Status Register 1
+ */
+union bdk_pciercx_mrg_lane_ctl_stat1
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat1_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat1 bdk_pciercx_mrg_lane_ctl_stat1_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1e4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT1(a) bdk_pciercx_mrg_lane_ctl_stat1_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT1(a) "PCIERCX_MRG_LANE_CTL_STAT1"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT1(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat10
+ *
+ * PCIe RC Margining Lane Control and Status Register 10
+ */
+union bdk_pciercx_mrg_lane_ctl_stat10
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat10_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat10_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat10 bdk_pciercx_mrg_lane_ctl_stat10_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT10(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT10(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x208ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT10", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT10(a) bdk_pciercx_mrg_lane_ctl_stat10_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT10(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT10(a) "PCIERCX_MRG_LANE_CTL_STAT10"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT10(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT10(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat11
+ *
+ * PCIe RC Margining Lane Control and Status Register 11
+ */
+union bdk_pciercx_mrg_lane_ctl_stat11
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat11_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat11_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat11 bdk_pciercx_mrg_lane_ctl_stat11_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT11(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT11(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x20cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT11", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT11(a) bdk_pciercx_mrg_lane_ctl_stat11_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT11(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT11(a) "PCIERCX_MRG_LANE_CTL_STAT11"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT11(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT11(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat12
+ *
+ * PCIe RC Margining Lane Control and Status Register 12
+ */
+union bdk_pciercx_mrg_lane_ctl_stat12
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat12_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat12 bdk_pciercx_mrg_lane_ctl_stat12_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT12(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT12(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x210ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT12", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT12(a) bdk_pciercx_mrg_lane_ctl_stat12_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT12(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT12(a) "PCIERCX_MRG_LANE_CTL_STAT12"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT12(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT12(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat13
+ *
+ * PCIe RC Margining Lane Control and Status Register 13
+ */
+union bdk_pciercx_mrg_lane_ctl_stat13
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat13_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat13_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat13 bdk_pciercx_mrg_lane_ctl_stat13_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT13(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT13(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x214ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT13", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT13(a) bdk_pciercx_mrg_lane_ctl_stat13_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT13(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT13(a) "PCIERCX_MRG_LANE_CTL_STAT13"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT13(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT13(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat14
+ *
+ * PCIe RC Margining Lane Control and Status Register 14
+ */
+union bdk_pciercx_mrg_lane_ctl_stat14
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat14_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat14_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat14 bdk_pciercx_mrg_lane_ctl_stat14_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT14(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT14(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x218ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT14", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT14(a) bdk_pciercx_mrg_lane_ctl_stat14_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT14(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT14(a) "PCIERCX_MRG_LANE_CTL_STAT14"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT14(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT14(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat15
+ *
+ * PCIe RC Margining Lane Control and Status Register 15
+ */
+union bdk_pciercx_mrg_lane_ctl_stat15
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat15_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat15_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat15 bdk_pciercx_mrg_lane_ctl_stat15_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT15(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT15(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x21cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT15", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT15(a) bdk_pciercx_mrg_lane_ctl_stat15_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT15(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT15(a) "PCIERCX_MRG_LANE_CTL_STAT15"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT15(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT15(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat2
+ *
+ * PCIe RC Margining Lane Control and Status Register 2
+ */
+union bdk_pciercx_mrg_lane_ctl_stat2
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat2_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat2 bdk_pciercx_mrg_lane_ctl_stat2_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1e8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT2(a) bdk_pciercx_mrg_lane_ctl_stat2_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT2(a) "PCIERCX_MRG_LANE_CTL_STAT2"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT2(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat3
+ *
+ * PCIe RC Margining Lane Control and Status Register 3
+ */
+union bdk_pciercx_mrg_lane_ctl_stat3
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat3_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat3 bdk_pciercx_mrg_lane_ctl_stat3_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1ecll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT3(a) bdk_pciercx_mrg_lane_ctl_stat3_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT3(a) "PCIERCX_MRG_LANE_CTL_STAT3"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT3(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat4
+ *
+ * PCIe RC Margining Lane Control and Status Register 4
+ */
+union bdk_pciercx_mrg_lane_ctl_stat4
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat4_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat4 bdk_pciercx_mrg_lane_ctl_stat4_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1f0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT4(a) bdk_pciercx_mrg_lane_ctl_stat4_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT4(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT4(a) "PCIERCX_MRG_LANE_CTL_STAT4"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT4(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT4(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat5
+ *
+ * PCIe RC Margining Lane Control and Status Register 5
+ */
+union bdk_pciercx_mrg_lane_ctl_stat5
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat5_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat5 bdk_pciercx_mrg_lane_ctl_stat5_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT5(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT5(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1f4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT5", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT5(a) bdk_pciercx_mrg_lane_ctl_stat5_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT5(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT5(a) "PCIERCX_MRG_LANE_CTL_STAT5"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT5(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT5(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat6
+ *
+ * PCIe RC Margining Lane Control and Status Register 6
+ */
+union bdk_pciercx_mrg_lane_ctl_stat6
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat6_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat6 bdk_pciercx_mrg_lane_ctl_stat6_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT6(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT6(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1f8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT6", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT6(a) bdk_pciercx_mrg_lane_ctl_stat6_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT6(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT6(a) "PCIERCX_MRG_LANE_CTL_STAT6"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT6(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT6(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat7
+ *
+ * PCIe RC Margining Lane Control and Status Register 7
+ */
+union bdk_pciercx_mrg_lane_ctl_stat7
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat7_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat7 bdk_pciercx_mrg_lane_ctl_stat7_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT7(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT7(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1fcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT7", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT7(a) bdk_pciercx_mrg_lane_ctl_stat7_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT7(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT7(a) "PCIERCX_MRG_LANE_CTL_STAT7"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT7(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT7(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat8
+ *
+ * PCIe RC Margining Lane Control and Status Register 8
+ */
+union bdk_pciercx_mrg_lane_ctl_stat8
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat8_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat8_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat8 bdk_pciercx_mrg_lane_ctl_stat8_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT8(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT8(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x200ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT8", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT8(a) bdk_pciercx_mrg_lane_ctl_stat8_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT8(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT8(a) "PCIERCX_MRG_LANE_CTL_STAT8"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT8(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT8(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_lane_ctl_stat9
+ *
+ * PCIe RC Margining Lane Control and Status Register 9
+ */
+union bdk_pciercx_mrg_lane_ctl_stat9
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_lane_ctl_stat9_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+#else /* Word 0 - Little Endian */
+ uint32_t rnum : 3; /**< [ 2: 0](R/W) Receiver number for this lane. */
+ uint32_t mt : 3; /**< [ 5: 3](R/W) Margin type for this lane. */
+ uint32_t um : 1; /**< [ 6: 6](R/W) Usage model for this lane. */
+ uint32_t reserved_7 : 1;
+ uint32_t mpl : 8; /**< [ 15: 8](R/W) Margin payload for this lane. */
+ uint32_t rnum_stat : 3; /**< [ 18: 16](RO/H) Receiver number (status) for this lane. */
+ uint32_t mt_stat : 3; /**< [ 21: 19](RO/H) Margin type (status) for this lane. */
+ uint32_t um_stat : 1; /**< [ 22: 22](RO/H) Usage model (status) for this lane. */
+ uint32_t reserved_23 : 1;
+ uint32_t pl_stat : 8; /**< [ 31: 24](RO/H) Margin payload (status) for this lane. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_lane_ctl_stat9_s cn; */
+};
+typedef union bdk_pciercx_mrg_lane_ctl_stat9 bdk_pciercx_mrg_lane_ctl_stat9_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT9(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_LANE_CTL_STAT9(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x204ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_LANE_CTL_STAT9", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_LANE_CTL_STAT9(a) bdk_pciercx_mrg_lane_ctl_stat9_t
+#define bustype_BDK_PCIERCX_MRG_LANE_CTL_STAT9(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_LANE_CTL_STAT9(a) "PCIERCX_MRG_LANE_CTL_STAT9"
+#define busnum_BDK_PCIERCX_MRG_LANE_CTL_STAT9(a) (a)
+#define arguments_BDK_PCIERCX_MRG_LANE_CTL_STAT9(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_mrg_port_cap_stat
+ *
+ * PCIe RC Margining Port Capabilities and Status Register
+ */
+union bdk_pciercx_mrg_port_cap_stat
+{
+ uint32_t u;
+ struct bdk_pciercx_mrg_port_cap_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_18_31 : 14;
+ uint32_t m_swrdy : 1; /**< [ 17: 17](RO/H) Margining software ready. */
+ uint32_t m_rdy : 1; /**< [ 16: 16](RO/H) Margining ready. */
+ uint32_t reserved_1_15 : 15;
+ uint32_t m_drv : 1; /**< [ 0: 0](RO/WRSL) Margining uses driver software. */
+#else /* Word 0 - Little Endian */
+ uint32_t m_drv : 1; /**< [ 0: 0](RO/WRSL) Margining uses driver software. */
+ uint32_t reserved_1_15 : 15;
+ uint32_t m_rdy : 1; /**< [ 16: 16](RO/H) Margining ready. */
+ uint32_t m_swrdy : 1; /**< [ 17: 17](RO/H) Margining software ready. */
+ uint32_t reserved_18_31 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_mrg_port_cap_stat_s cn; */
+};
+typedef union bdk_pciercx_mrg_port_cap_stat bdk_pciercx_mrg_port_cap_stat_t;
+
+static inline uint64_t BDK_PCIERCX_MRG_PORT_CAP_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MRG_PORT_CAP_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1dcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MRG_PORT_CAP_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MRG_PORT_CAP_STAT(a) bdk_pciercx_mrg_port_cap_stat_t
+#define bustype_BDK_PCIERCX_MRG_PORT_CAP_STAT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MRG_PORT_CAP_STAT(a) "PCIERCX_MRG_PORT_CAP_STAT"
+#define busnum_BDK_PCIERCX_MRG_PORT_CAP_STAT(a) (a)
+#define arguments_BDK_PCIERCX_MRG_PORT_CAP_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_msix_cap_cntrl
+ *
+ * PCIe RC PCI Express MSI-X Capability ID/MSI-X Next Item Pointer/MSI-X Control Register
+ */
+union bdk_pciercx_msix_cap_cntrl
+{
+ uint32_t u;
+ struct bdk_pciercx_msix_cap_cntrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixen : 1; /**< [ 31: 31](R/W) MSI-X enable. If MSI-X is enabled, MSI and INTx must be disabled. */
+ uint32_t funm : 1; /**< [ 30: 30](R/W) Function mask.
+ 0 = Each vectors mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits. */
+ uint32_t reserved_27_29 : 3;
+ uint32_t msixts : 11; /**< [ 26: 16](RO/WRSL) MSI-X table size encoded as (table size - 1). */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO/H) MSI-X capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixcid : 8; /**< [ 7: 0](RO/H) MSI-X capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t msixts : 11; /**< [ 26: 16](RO/WRSL) MSI-X table size encoded as (table size - 1). */
+ uint32_t reserved_27_29 : 3;
+ uint32_t funm : 1; /**< [ 30: 30](R/W) Function mask.
+ 0 = Each vectors mask bit determines whether the vector is masked or not.
+ 1 = All vectors associated with the function are masked, regardless of their respective
+ per-vector mask bits. */
+ uint32_t msixen : 1; /**< [ 31: 31](R/W) MSI-X enable. If MSI-X is enabled, MSI and INTx must be disabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_msix_cap_cntrl_s cn; */
+};
+typedef union bdk_pciercx_msix_cap_cntrl bdk_pciercx_msix_cap_cntrl_t;
+
+static inline uint64_t BDK_PCIERCX_MSIX_CAP_CNTRL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MSIX_CAP_CNTRL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xb0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MSIX_CAP_CNTRL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MSIX_CAP_CNTRL(a) bdk_pciercx_msix_cap_cntrl_t
+#define bustype_BDK_PCIERCX_MSIX_CAP_CNTRL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MSIX_CAP_CNTRL(a) "PCIERCX_MSIX_CAP_CNTRL"
+#define busnum_BDK_PCIERCX_MSIX_CAP_CNTRL(a) (a)
+#define arguments_BDK_PCIERCX_MSIX_CAP_CNTRL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_msix_pba
+ *
+ * PCIe RC PCI Express MSI-X PBA Offset and BIR Register
+ */
+union bdk_pciercx_msix_pba
+{
+ uint32_t u;
+ struct bdk_pciercx_msix_pba_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixpoffs : 29; /**< [ 31: 3](RO/WRSL/H) MSI-X table offset register. Base address of the MSI-X PBA, as an offset from the base
+ address of the BAR indicated by the table PBA bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t msixpbir : 3; /**< [ 2: 0](RO/WRSL) MSI-X PBA BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ pending bit array into memory space.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixpbir : 3; /**< [ 2: 0](RO/WRSL) MSI-X PBA BAR indicator register (BIR). Indicates which BAR is used to map the MSI-X
+ pending bit array into memory space.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t msixpoffs : 29; /**< [ 31: 3](RO/WRSL/H) MSI-X table offset register. Base address of the MSI-X PBA, as an offset from the base
+ address of the BAR indicated by the table PBA bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_msix_pba_s cn; */
+};
+typedef union bdk_pciercx_msix_pba bdk_pciercx_msix_pba_t;
+
+static inline uint64_t BDK_PCIERCX_MSIX_PBA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MSIX_PBA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xb8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MSIX_PBA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MSIX_PBA(a) bdk_pciercx_msix_pba_t
+#define bustype_BDK_PCIERCX_MSIX_PBA(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MSIX_PBA(a) "PCIERCX_MSIX_PBA"
+#define busnum_BDK_PCIERCX_MSIX_PBA(a) (a)
+#define arguments_BDK_PCIERCX_MSIX_PBA(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_msix_table
+ *
+ * PCIe RC PCI Express MSI-X Table Offset and BIR Register
+ */
+union bdk_pciercx_msix_table
+{
+ uint32_t u;
+ struct bdk_pciercx_msix_table_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t msixtoffs : 29; /**< [ 31: 3](RO/WRSL) MSI-X table offset register. Base address of the MSI-X table, as an offset from the base
+ address of the BAR indicated by the table BIR bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t msixtbir : 3; /**< [ 2: 0](RO/WRSL) MSI-X table BAR indicator register (BIR). Indicates which BAR is used to map the
+ MSI-X table into memory space. Writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t msixtbir : 3; /**< [ 2: 0](RO/WRSL) MSI-X table BAR indicator register (BIR). Indicates which BAR is used to map the
+ MSI-X table into memory space. Writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t msixtoffs : 29; /**< [ 31: 3](RO/WRSL) MSI-X table offset register. Base address of the MSI-X table, as an offset from the base
+ address of the BAR indicated by the table BIR bits. Writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_msix_table_s cn; */
+};
+typedef union bdk_pciercx_msix_table bdk_pciercx_msix_table_t;
+
+static inline uint64_t BDK_PCIERCX_MSIX_TABLE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_MSIX_TABLE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xb4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_MSIX_TABLE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_MSIX_TABLE(a) bdk_pciercx_msix_table_t
+#define bustype_BDK_PCIERCX_MSIX_TABLE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_MSIX_TABLE(a) "PCIERCX_MSIX_TABLE"
+#define busnum_BDK_PCIERCX_MSIX_TABLE(a) (a)
+#define arguments_BDK_PCIERCX_MSIX_TABLE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_np_rcv_credit
+ *
+ * PCIe RC VC0 Nonposted Receive Queue Control Register
+ */
+union bdk_pciercx_np_rcv_credit
+{
+ uint32_t u;
+ struct bdk_pciercx_np_rcv_credit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t data_sc : 2; /**< [ 27: 26](R/W) VC0 scale non-posted data credits. */
+ uint32_t hdr_sc : 2; /**< [ 25: 24](R/W) VC0 scale non-posted header credits. */
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 nonposted TLP queue mode. The operating mode of the nonposted receive queue for VC0,
+ used only in the segmented-buffer configuration, writable through PEM()_CFG_WR.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward.
+
+ The application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL) VC0 nonposted header credits. The number of initial nonposted header credits for VC0, used
+ for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL) VC0 nonposted data credits. The number of initial nonposted data credits for VC0, used for
+ all receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL) VC0 nonposted data credits. The number of initial nonposted data credits for VC0, used for
+ all receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL) VC0 nonposted header credits. The number of initial nonposted header credits for VC0, used
+ for all receive queue buffer configurations. This field is writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 nonposted TLP queue mode. The operating mode of the nonposted receive queue for VC0,
+ used only in the segmented-buffer configuration, writable through PEM()_CFG_WR.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward.
+
+ The application must not change this field. */
+ uint32_t hdr_sc : 2; /**< [ 25: 24](R/W) VC0 scale non-posted header credits. */
+ uint32_t data_sc : 2; /**< [ 27: 26](R/W) VC0 scale non-posted data credits. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_np_rcv_credit_s cn; */
+};
+typedef union bdk_pciercx_np_rcv_credit bdk_pciercx_np_rcv_credit_t;
+
+static inline uint64_t BDK_PCIERCX_NP_RCV_CREDIT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_NP_RCV_CREDIT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x74cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_NP_RCV_CREDIT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_NP_RCV_CREDIT(a) bdk_pciercx_np_rcv_credit_t
+#define bustype_BDK_PCIERCX_NP_RCV_CREDIT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_NP_RCV_CREDIT(a) "PCIERCX_NP_RCV_CREDIT"
+#define busnum_BDK_PCIERCX_NP_RCV_CREDIT(a) (a)
+#define arguments_BDK_PCIERCX_NP_RCV_CREDIT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_np_xmit_credit
+ *
+ * PCIe RC Transmit Nonposted FC Credit Status Register
+ */
+union bdk_pciercx_np_xmit_credit
+{
+ uint32_t u;
+ struct bdk_pciercx_np_xmit_credit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_20_31 : 12;
+ uint32_t tchfcc : 8; /**< [ 19: 12](RO/H) Transmit nonposted header FC credits. The nonposted header credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tcdfcc : 12; /**< [ 11: 0](RO/H) Transmit nonposted data FC credits. The nonposted data credits advertised by the receiver
+ at the other end of the link, updated with each UpdateFC DLLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t tcdfcc : 12; /**< [ 11: 0](RO/H) Transmit nonposted data FC credits. The nonposted data credits advertised by the receiver
+ at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tchfcc : 8; /**< [ 19: 12](RO/H) Transmit nonposted header FC credits. The nonposted header credits advertised by the
+ receiver at the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t reserved_20_31 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_np_xmit_credit_s cn; */
+};
+typedef union bdk_pciercx_np_xmit_credit bdk_pciercx_np_xmit_credit_t;
+
+static inline uint64_t BDK_PCIERCX_NP_XMIT_CREDIT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_NP_XMIT_CREDIT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x734ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_NP_XMIT_CREDIT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_NP_XMIT_CREDIT(a) bdk_pciercx_np_xmit_credit_t
+#define bustype_BDK_PCIERCX_NP_XMIT_CREDIT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_NP_XMIT_CREDIT(a) "PCIERCX_NP_XMIT_CREDIT"
+#define busnum_BDK_PCIERCX_NP_XMIT_CREDIT(a) (a)
+#define arguments_BDK_PCIERCX_NP_XMIT_CREDIT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_omsg_ptr
+ *
+ * PCIe RC Other Message Register
+ */
+union bdk_pciercx_omsg_ptr
+{
+ uint32_t u;
+ struct bdk_pciercx_omsg_ptr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t omr : 32; /**< [ 31: 0](R/W) Other message register. This register can be used for either of the following purposes:
+
+ * To send a specific PCI Express message, the application writes the payload of the
+ message into this register, then sets bit 0 of the port link control register to send the
+ message.
+
+ * To store a corruption pattern for corrupting the LCRC on all TLPs, the application
+ places a 32-bit corruption pattern into this register and enables this function by setting
+ bit 25 of the port link control register. When enabled, the transmit LCRC result is XORed
+ with this pattern before inserting it into the packet. */
+#else /* Word 0 - Little Endian */
+ uint32_t omr : 32; /**< [ 31: 0](R/W) Other message register. This register can be used for either of the following purposes:
+
+ * To send a specific PCI Express message, the application writes the payload of the
+ message into this register, then sets bit 0 of the port link control register to send the
+ message.
+
+ * To store a corruption pattern for corrupting the LCRC on all TLPs, the application
+ places a 32-bit corruption pattern into this register and enables this function by setting
+ bit 25 of the port link control register. When enabled, the transmit LCRC result is XORed
+ with this pattern before inserting it into the packet. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_omsg_ptr_s cn; */
+};
+typedef union bdk_pciercx_omsg_ptr bdk_pciercx_omsg_ptr_t;
+
+static inline uint64_t BDK_PCIERCX_OMSG_PTR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_OMSG_PTR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x704ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_OMSG_PTR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_OMSG_PTR(a) bdk_pciercx_omsg_ptr_t
+#define bustype_BDK_PCIERCX_OMSG_PTR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_OMSG_PTR(a) "PCIERCX_OMSG_PTR"
+#define busnum_BDK_PCIERCX_OMSG_PTR(a) (a)
+#define arguments_BDK_PCIERCX_OMSG_PTR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ord_rule_ctrl
+ *
+ * PCIe RC Order Rule Control Register
+ */
+union bdk_pciercx_ord_rule_ctrl
+{
+ uint32_t u;
+ struct bdk_pciercx_ord_rule_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t cpl_pass_p : 8; /**< [ 15: 8](R/W) Completion passing posted ordering rule control.
+ Determines if a CPL can pass halted P queue.
+ 0x0 = CPL can not pass P (recommended).
+ 0x1 = CPL can pass P.
+ 0x2-0xFF = Reserved. */
+ uint32_t np_pass_p : 8; /**< [ 7: 0](R/W) Non-Posted passing posted ordering rule control.
+ Determines if a NP can pass halted P queue.
+ 0x0 = NP can not pass P (recommended).
+ 0x1 = NP can pass P.
+ 0x2-0xFF = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t np_pass_p : 8; /**< [ 7: 0](R/W) Non-Posted passing posted ordering rule control.
+ Determines if a NP can pass halted P queue.
+ 0x0 = NP can not pass P (recommended).
+ 0x1 = NP can pass P.
+ 0x2-0xFF = Reserved. */
+ uint32_t cpl_pass_p : 8; /**< [ 15: 8](R/W) Completion passing posted ordering rule control.
+ Determines if a CPL can pass halted P queue.
+ 0x0 = CPL can not pass P (recommended).
+ 0x1 = CPL can pass P.
+ 0x2-0xFF = Reserved. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ord_rule_ctrl_s cn; */
+};
+typedef union bdk_pciercx_ord_rule_ctrl bdk_pciercx_ord_rule_ctrl_t;
+
+static inline uint64_t BDK_PCIERCX_ORD_RULE_CTRL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ORD_RULE_CTRL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8b4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ORD_RULE_CTRL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ORD_RULE_CTRL(a) bdk_pciercx_ord_rule_ctrl_t
+#define bustype_BDK_PCIERCX_ORD_RULE_CTRL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ORD_RULE_CTRL(a) "PCIERCX_ORD_RULE_CTRL"
+#define busnum_BDK_PCIERCX_ORD_RULE_CTRL(a) (a)
+#define arguments_BDK_PCIERCX_ORD_RULE_CTRL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_p_rcv_credit
+ *
+ * PCIe RC VC0 Posted Receive Queue Control Register
+ */
+union bdk_pciercx_p_rcv_credit
+{
+ uint32_t u;
+ struct bdk_pciercx_p_rcv_credit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rx_queue_order : 1; /**< [ 31: 31](R/W) VC ordering for receive queues. Determines the VC ordering rule for the receive queues,
+ used only in the segmented-buffer configuration, writable through PEM()_CFG_WR:
+ 0 = Round robin.
+ 1 = Strict ordering, higher numbered VCs have higher priority.
+
+ However, the application must not change this field. */
+ uint32_t type_ordering : 1; /**< [ 30: 30](RO/WRSL) TLP type ordering for VC0. Determines the TLP type ordering rule for VC0 receive queues,
+ used only in the segmented-buffer configuration, writable through
+ PEM()_CFG_WR:
+ 0 = Strict ordering for received TLPs: Posted, then completion, then NonPosted.
+ 1 = Ordering of received TLPs follows the rules in PCI Express Base Specification.
+
+ The application must not change this field. */
+ uint32_t reserved_28_29 : 2;
+ uint32_t data_sc : 2; /**< [ 27: 26](R/W) VC0 scale posted data credits. */
+ uint32_t hdr_sc : 2; /**< [ 25: 24](R/W) VC0 scale posted header credits. */
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 posted TLP queue mode. The operating mode of the posted receive queue for VC0, used
+ only in the segmented-buffer configuration, writable through PEM()_CFG_WR. However,
+ the application must not change this field.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward. */
+ uint32_t reserved_20 : 1;
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL/H) VC0 posted header credits. The number of initial posted header credits for VC0, used for
+ all receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+
+ Reset values:
+ _ UPEM: 0x40.
+ _ BPEM: 0x20. */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL/H) VC0 posted data credits. The number of initial posted data credits for VC0, used for all
+ receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+
+ Reset values:
+ _ UPEM: 0x400.
+ _ BPEM: 0x200. */
+#else /* Word 0 - Little Endian */
+ uint32_t data_credits : 12; /**< [ 11: 0](RO/WRSL/H) VC0 posted data credits. The number of initial posted data credits for VC0, used for all
+ receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+
+ Reset values:
+ _ UPEM: 0x400.
+ _ BPEM: 0x200. */
+ uint32_t header_credits : 8; /**< [ 19: 12](RO/WRSL/H) VC0 posted header credits. The number of initial posted header credits for VC0, used for
+ all receive queue buffer configurations. This field is writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+
+ Reset values:
+ _ UPEM: 0x40.
+ _ BPEM: 0x20. */
+ uint32_t reserved_20 : 1;
+ uint32_t queue_mode : 3; /**< [ 23: 21](RO/WRSL) VC0 posted TLP queue mode. The operating mode of the posted receive queue for VC0, used
+ only in the segmented-buffer configuration, writable through PEM()_CFG_WR. However,
+ the application must not change this field.
+ Only one bit can be set at a time:
+
+ _ Bit 23 = Bypass.
+
+ _ Bit 22 = Cut-through.
+
+ _ Bit 21 = Store-and-forward. */
+ uint32_t hdr_sc : 2; /**< [ 25: 24](R/W) VC0 scale posted header credits. */
+ uint32_t data_sc : 2; /**< [ 27: 26](R/W) VC0 scale posted data credits. */
+ uint32_t reserved_28_29 : 2;
+ uint32_t type_ordering : 1; /**< [ 30: 30](RO/WRSL) TLP type ordering for VC0. Determines the TLP type ordering rule for VC0 receive queues,
+ used only in the segmented-buffer configuration, writable through
+ PEM()_CFG_WR:
+ 0 = Strict ordering for received TLPs: Posted, then completion, then NonPosted.
+ 1 = Ordering of received TLPs follows the rules in PCI Express Base Specification.
+
+ The application must not change this field. */
+ uint32_t rx_queue_order : 1; /**< [ 31: 31](R/W) VC ordering for receive queues. Determines the VC ordering rule for the receive queues,
+ used only in the segmented-buffer configuration, writable through PEM()_CFG_WR:
+ 0 = Round robin.
+ 1 = Strict ordering, higher numbered VCs have higher priority.
+
+ However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_p_rcv_credit_s cn; */
+};
+typedef union bdk_pciercx_p_rcv_credit bdk_pciercx_p_rcv_credit_t;
+
+static inline uint64_t BDK_PCIERCX_P_RCV_CREDIT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_P_RCV_CREDIT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x748ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_P_RCV_CREDIT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_P_RCV_CREDIT(a) bdk_pciercx_p_rcv_credit_t
+#define bustype_BDK_PCIERCX_P_RCV_CREDIT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_P_RCV_CREDIT(a) "PCIERCX_P_RCV_CREDIT"
+#define busnum_BDK_PCIERCX_P_RCV_CREDIT(a) (a)
+#define arguments_BDK_PCIERCX_P_RCV_CREDIT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_p_xmit_credit
+ *
+ * PCIe RC Transmit Posted FC Credit Status Register
+ */
+union bdk_pciercx_p_xmit_credit
+{
+ uint32_t u;
+ struct bdk_pciercx_p_xmit_credit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_20_31 : 12;
+ uint32_t tphfcc : 8; /**< [ 19: 12](RO/H) Transmit posted header FC credits. The posted header credits advertised by the receiver at
+ the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tpdfcc : 12; /**< [ 11: 0](RO/H) Transmit posted data FC credits. The posted data credits advertised by the receiver at the
+ other end of the link, updated with each UpdateFC DLLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t tpdfcc : 12; /**< [ 11: 0](RO/H) Transmit posted data FC credits. The posted data credits advertised by the receiver at the
+ other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t tphfcc : 8; /**< [ 19: 12](RO/H) Transmit posted header FC credits. The posted header credits advertised by the receiver at
+ the other end of the link, updated with each UpdateFC DLLP. */
+ uint32_t reserved_20_31 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_p_xmit_credit_s cn; */
+};
+typedef union bdk_pciercx_p_xmit_credit bdk_pciercx_p_xmit_credit_t;
+
+static inline uint64_t BDK_PCIERCX_P_XMIT_CREDIT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_P_XMIT_CREDIT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x730ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_P_XMIT_CREDIT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_P_XMIT_CREDIT(a) bdk_pciercx_p_xmit_credit_t
+#define bustype_BDK_PCIERCX_P_XMIT_CREDIT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_P_XMIT_CREDIT(a) "PCIERCX_P_XMIT_CREDIT"
+#define busnum_BDK_PCIERCX_P_XMIT_CREDIT(a) (a)
+#define arguments_BDK_PCIERCX_P_XMIT_CREDIT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pb_base
+ *
+ * PCIe RC Power Budgeting Extended Capability Header Register
+ */
+union bdk_pciercx_pb_base
+{
+ uint32_t u;
+ struct bdk_pciercx_pb_base_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pb_base_s cn; */
+};
+typedef union bdk_pciercx_pb_base bdk_pciercx_pb_base_t;
+
+static inline uint64_t BDK_PCIERCX_PB_BASE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PB_BASE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x158ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PB_BASE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PB_BASE(a) bdk_pciercx_pb_base_t
+#define bustype_BDK_PCIERCX_PB_BASE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PB_BASE(a) "PCIERCX_PB_BASE"
+#define busnum_BDK_PCIERCX_PB_BASE(a) (a)
+#define arguments_BDK_PCIERCX_PB_BASE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pb_cap_hdr
+ *
+ * PCIe RC Power Budget Capability Header Register
+ */
+union bdk_pciercx_pb_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_pb_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t sapb : 1; /**< [ 0: 0](RO/WRSL) System allocated PB. */
+#else /* Word 0 - Little Endian */
+ uint32_t sapb : 1; /**< [ 0: 0](RO/WRSL) System allocated PB. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pb_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_pb_cap_hdr bdk_pciercx_pb_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_PB_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PB_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x164ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PB_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PB_CAP_HDR(a) bdk_pciercx_pb_cap_hdr_t
+#define bustype_BDK_PCIERCX_PB_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PB_CAP_HDR(a) "PCIERCX_PB_CAP_HDR"
+#define busnum_BDK_PCIERCX_PB_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_PB_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pb_data
+ *
+ * PCIe RC Power Budgeting Data Register
+ */
+union bdk_pciercx_pb_data
+{
+ uint32_t u;
+ struct bdk_pciercx_pb_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_21_31 : 11;
+ uint32_t prs : 3; /**< [ 20: 18](RO) Power rail state. */
+ uint32_t typ : 3; /**< [ 17: 15](RO) Type of operating condition. */
+ uint32_t pms : 2; /**< [ 14: 13](RO) PM state. */
+ uint32_t pmss : 3; /**< [ 12: 10](RO) PM substate. */
+ uint32_t pds : 2; /**< [ 9: 8](RO) Data scale. */
+ uint32_t pbp : 8; /**< [ 7: 0](RO) Base power. */
+#else /* Word 0 - Little Endian */
+ uint32_t pbp : 8; /**< [ 7: 0](RO) Base power. */
+ uint32_t pds : 2; /**< [ 9: 8](RO) Data scale. */
+ uint32_t pmss : 3; /**< [ 12: 10](RO) PM substate. */
+ uint32_t pms : 2; /**< [ 14: 13](RO) PM state. */
+ uint32_t typ : 3; /**< [ 17: 15](RO) Type of operating condition. */
+ uint32_t prs : 3; /**< [ 20: 18](RO) Power rail state. */
+ uint32_t reserved_21_31 : 11;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pb_data_s cn; */
+};
+typedef union bdk_pciercx_pb_data bdk_pciercx_pb_data_t;
+
+static inline uint64_t BDK_PCIERCX_PB_DATA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PB_DATA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x160ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PB_DATA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PB_DATA(a) bdk_pciercx_pb_data_t
+#define bustype_BDK_PCIERCX_PB_DATA(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PB_DATA(a) "PCIERCX_PB_DATA"
+#define busnum_BDK_PCIERCX_PB_DATA(a) (a)
+#define arguments_BDK_PCIERCX_PB_DATA(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pb_data_select
+ *
+ * PCIe RC Power Budgeting Data Select Register
+ */
+union bdk_pciercx_pb_data_select
+{
+ uint32_t u;
+ struct bdk_pciercx_pb_data_select_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t dsel : 8; /**< [ 7: 0](R/W) Data select register. */
+#else /* Word 0 - Little Endian */
+ uint32_t dsel : 8; /**< [ 7: 0](R/W) Data select register. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pb_data_select_s cn; */
+};
+typedef union bdk_pciercx_pb_data_select bdk_pciercx_pb_data_select_t;
+
+static inline uint64_t BDK_PCIERCX_PB_DATA_SELECT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PB_DATA_SELECT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x15cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PB_DATA_SELECT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PB_DATA_SELECT(a) bdk_pciercx_pb_data_select_t
+#define bustype_BDK_PCIERCX_PB_DATA_SELECT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PB_DATA_SELECT(a) "PCIERCX_PB_DATA_SELECT"
+#define busnum_BDK_PCIERCX_PB_DATA_SELECT(a) (a)
+#define arguments_BDK_PCIERCX_PB_DATA_SELECT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_phy_ctl
+ *
+ * PCIe RC PHY Control Register
+ */
+union bdk_pciercx_phy_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_phy_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t phy_ctrl : 32; /**< [ 31: 0](R/W) PHY control. Sideband control signaling (not supported). */
+#else /* Word 0 - Little Endian */
+ uint32_t phy_ctrl : 32; /**< [ 31: 0](R/W) PHY control. Sideband control signaling (not supported). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_phy_ctl_s cn; */
+};
+typedef union bdk_pciercx_phy_ctl bdk_pciercx_phy_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_PHY_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PHY_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x814ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PHY_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PHY_CTL(a) bdk_pciercx_phy_ctl_t
+#define bustype_BDK_PCIERCX_PHY_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PHY_CTL(a) "PCIERCX_PHY_CTL"
+#define busnum_BDK_PCIERCX_PHY_CTL(a) (a)
+#define arguments_BDK_PCIERCX_PHY_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_phy_gen3_ctl
+ *
+ * PCIe RC Gen3 Control Register
+ */
+union bdk_pciercx_phy_gen3_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_phy_gen3_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t rss : 2; /**< [ 25: 24](R/W) Data rate for shadow register. */
+ uint32_t eiedd : 1; /**< [ 23: 23](R/W) Eq InvalidRequest and RxEqEval different time assertion disable. Disable the assertion of
+ Eq InvalidRequest and RxEqEval at different time. */
+ uint32_t us8etd : 1; /**< [ 22: 22](R/W/H) Upstream port send 8GT/s EQ TS2 disable. The base spec defines that USP can
+ optionally send 8GT EQ TS2 and it means USP can set DSP TxPreset value in Gen4
+ Data Rate. If this register set to 0, USP sends 8GT EQ TS2. If this register
+ set to 1, USP does not send 8GT EQ TS2. This applies to upstream ports only.
+ No Function for downstream ports.
+ Note: When CX_GEN4_SPEED, this register is shadow register for Gen3 and Gen4 data
+ rate. If RATE_SHADOW_SEL==00b, this register is RSVD and cannot be written.
+ If RATE_SHADOW_SEL==01b, this register is for Gen4 data rate and can be written. */
+ uint32_t aed : 1; /**< [ 21: 21](R/W) Autonomous equalization disable. When the controller is in L0 state at Gen3
+ data rate and equalization was completed successfully in Autonomous EQ Mechanism,
+ setting this bit in DSP will not direct the controller to Recovery state to
+ perform Gen4 equalization. Link stays in Gen3 rate and DSP sends DLLPs to USP.
+ If the bit is 0, DSP will block DLLPs and direct the link to perform Gen4 EQ
+ in Autonomous Mechanism.
+ Note: When CX_GEN4_SPEED, this register is shadow register for Gen3 and Gen4 data
+ rate. If RATE_SHADOW_SEL==00b, this register is RSVD and cannot be written.
+ If RATE_SHADOW_SEL==01b, this register is for Gen4 data rate and can be written. */
+ uint32_t reserved_19_20 : 2;
+ uint32_t dcbd : 1; /**< [ 18: 18](R/W) Disable balance disable. Disable DC balance feature. */
+ uint32_t dtdd : 1; /**< [ 17: 17](R/W) DLLP transmission delay disable. Disable delay transmission of DLLPs before equalization. */
+ uint32_t ed : 1; /**< [ 16: 16](R/W) Equalization disable. Disable equalization feature. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t rxeq_rgrdless_rsts : 1; /**< [ 13: 13](R/W) The controller as Gen3 EQ master asserts RxEqEval to instruct the
+ PHY to do Rx adaptation and evaluation.
+ 0x0 = Asserts after 1 us and 2 TS1 received from remote partner.
+ 0x1 = Asserts after 500 ns regardless of TS's received or not. */
+ uint32_t rxeq_ph01_en : 1; /**< [ 12: 12](R/W) Rx equalization phase 0/phase 1 hold enable. */
+ uint32_t erd : 1; /**< [ 11: 11](R/W) Equalization redo disable. Disable requesting reset of EIEOS count during equalization. */
+ uint32_t ecrd : 1; /**< [ 10: 10](R/W) Equalization EIEOS count reset disable. Disable requesting reset of EIEOS count during
+ equalization. */
+ uint32_t ep2p3d : 1; /**< [ 9: 9](R/W) Equalization phase 2 and phase 3 disable. This applies to downstream ports only. */
+ uint32_t dsg3 : 1; /**< [ 8: 8](R/W) Disable scrambler for Gen3 data rate. The Gen3 scrambler/descrambler within the core needs
+ to be disabled when the scrambling function is implemented outside of the core (within the
+ PHY). */
+ uint32_t reserved_1_7 : 7;
+ uint32_t grizdnc : 1; /**< [ 0: 0](R/W) Gen3 receiver impedance ZRX-DC not compliant. */
+#else /* Word 0 - Little Endian */
+ uint32_t grizdnc : 1; /**< [ 0: 0](R/W) Gen3 receiver impedance ZRX-DC not compliant. */
+ uint32_t reserved_1_7 : 7;
+ uint32_t dsg3 : 1; /**< [ 8: 8](R/W) Disable scrambler for Gen3 data rate. The Gen3 scrambler/descrambler within the core needs
+ to be disabled when the scrambling function is implemented outside of the core (within the
+ PHY). */
+ uint32_t ep2p3d : 1; /**< [ 9: 9](R/W) Equalization phase 2 and phase 3 disable. This applies to downstream ports only. */
+ uint32_t ecrd : 1; /**< [ 10: 10](R/W) Equalization EIEOS count reset disable. Disable requesting reset of EIEOS count during
+ equalization. */
+ uint32_t erd : 1; /**< [ 11: 11](R/W) Equalization redo disable. Disable requesting reset of EIEOS count during equalization. */
+ uint32_t rxeq_ph01_en : 1; /**< [ 12: 12](R/W) Rx equalization phase 0/phase 1 hold enable. */
+ uint32_t rxeq_rgrdless_rsts : 1; /**< [ 13: 13](R/W) The controller as Gen3 EQ master asserts RxEqEval to instruct the
+ PHY to do Rx adaptation and evaluation.
+ 0x0 = Asserts after 1 us and 2 TS1 received from remote partner.
+ 0x1 = Asserts after 500 ns regardless of TS's received or not. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t ed : 1; /**< [ 16: 16](R/W) Equalization disable. Disable equalization feature. */
+ uint32_t dtdd : 1; /**< [ 17: 17](R/W) DLLP transmission delay disable. Disable delay transmission of DLLPs before equalization. */
+ uint32_t dcbd : 1; /**< [ 18: 18](R/W) Disable balance disable. Disable DC balance feature. */
+ uint32_t reserved_19_20 : 2;
+ uint32_t aed : 1; /**< [ 21: 21](R/W) Autonomous equalization disable. When the controller is in L0 state at Gen3
+ data rate and equalization was completed successfully in Autonomous EQ Mechanism,
+ setting this bit in DSP will not direct the controller to Recovery state to
+ perform Gen4 equalization. Link stays in Gen3 rate and DSP sends DLLPs to USP.
+ If the bit is 0, DSP will block DLLPs and direct the link to perform Gen4 EQ
+ in Autonomous Mechanism.
+ Note: When CX_GEN4_SPEED, this register is shadow register for Gen3 and Gen4 data
+ rate. If RATE_SHADOW_SEL==00b, this register is RSVD and cannot be written.
+ If RATE_SHADOW_SEL==01b, this register is for Gen4 data rate and can be written. */
+ uint32_t us8etd : 1; /**< [ 22: 22](R/W/H) Upstream port send 8GT/s EQ TS2 disable. The base spec defines that USP can
+ optionally send 8GT EQ TS2 and it means USP can set DSP TxPreset value in Gen4
+ Data Rate. If this register set to 0, USP sends 8GT EQ TS2. If this register
+ set to 1, USP does not send 8GT EQ TS2. This applies to upstream ports only.
+ No Function for downstream ports.
+ Note: When CX_GEN4_SPEED, this register is shadow register for Gen3 and Gen4 data
+ rate. If RATE_SHADOW_SEL==00b, this register is RSVD and cannot be written.
+ If RATE_SHADOW_SEL==01b, this register is for Gen4 data rate and can be written. */
+ uint32_t eiedd : 1; /**< [ 23: 23](R/W) Eq InvalidRequest and RxEqEval different time assertion disable. Disable the assertion of
+ Eq InvalidRequest and RxEqEval at different time. */
+ uint32_t rss : 2; /**< [ 25: 24](R/W) Data rate for shadow register. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_phy_gen3_ctl_s cn; */
+};
+typedef union bdk_pciercx_phy_gen3_ctl bdk_pciercx_phy_gen3_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_PHY_GEN3_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PHY_GEN3_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x890ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PHY_GEN3_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PHY_GEN3_CTL(a) bdk_pciercx_phy_gen3_ctl_t
+#define bustype_BDK_PCIERCX_PHY_GEN3_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PHY_GEN3_CTL(a) "PCIERCX_PHY_GEN3_CTL"
+#define busnum_BDK_PCIERCX_PHY_GEN3_CTL(a) (a)
+#define arguments_BDK_PCIERCX_PHY_GEN3_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_phy_intop_ctl
+ *
+ * PCIe RC PHY Interoperability Control Register
+ */
+union bdk_pciercx_phy_intop_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_phy_intop_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_11_31 : 21;
+ uint32_t l1_clk_sel : 1; /**< [ 10: 10](R/W) L1 clock control bit.
+ 0 = Controller requests aux_clk switch and core_clk gating in L1.
+ 1 = Controller does not request aux_clk switch and core_clk gating in L1. */
+ uint32_t l1_nowait_p1 : 1; /**< [ 9: 9](RO) L1 entry control bit.
+ 0 = Core waits for the PHY to acknowledge transition to P1 before entering L1.
+ 1 = Core does not wait for PHY to acknowledge transition to P1 before entering L1. */
+ uint32_t l1sub_exit_mode : 1; /**< [ 8: 8](R/W) L1 exit control using phy_mac_pclkack_n.
+ 0 = Core waits for the PHY to assert phy_mac_pclkack_n before exiting L1.
+ 1 = Core exits L1 without waiting for the PHY to assert phy_mac_pclkack_n. */
+ uint32_t reserved_7 : 1;
+ uint32_t rxstby_ctl : 7; /**< [ 6: 0](R/W) Rxstandby control. Bits 0..5 determine if the controller asserts the RxStandby signal
+ (mac_phy_rxstandby) in the indicated condition. Bit 6 enables the controller
+ to perform the RxStandby/RxStandbyStatus handshake.
+ 0x0 = Rx EIOS and subsequent T TX-IDLE-MIN.
+ 0x1 = Rate Change.
+ 0x2 = Inactive lane for upconfigure/downconfigure.
+ 0x3 = PowerDown = P1orP2.
+ 0x4 = RxL0s.Idle.
+ 0x5 = EI Infer in L0.
+ 0x6 = Execute RxStandby/RxStandbyStatus Handshake. */
+#else /* Word 0 - Little Endian */
+ uint32_t rxstby_ctl : 7; /**< [ 6: 0](R/W) Rxstandby control. Bits 0..5 determine if the controller asserts the RxStandby signal
+ (mac_phy_rxstandby) in the indicated condition. Bit 6 enables the controller
+ to perform the RxStandby/RxStandbyStatus handshake.
+ 0x0 = Rx EIOS and subsequent T TX-IDLE-MIN.
+ 0x1 = Rate Change.
+ 0x2 = Inactive lane for upconfigure/downconfigure.
+ 0x3 = PowerDown = P1orP2.
+ 0x4 = RxL0s.Idle.
+ 0x5 = EI Infer in L0.
+ 0x6 = Execute RxStandby/RxStandbyStatus Handshake. */
+ uint32_t reserved_7 : 1;
+ uint32_t l1sub_exit_mode : 1; /**< [ 8: 8](R/W) L1 exit control using phy_mac_pclkack_n.
+ 0 = Core waits for the PHY to assert phy_mac_pclkack_n before exiting L1.
+ 1 = Core exits L1 without waiting for the PHY to assert phy_mac_pclkack_n. */
+ uint32_t l1_nowait_p1 : 1; /**< [ 9: 9](RO) L1 entry control bit.
+ 0 = Core waits for the PHY to acknowledge transition to P1 before entering L1.
+ 1 = Core does not wait for PHY to acknowledge transition to P1 before entering L1. */
+ uint32_t l1_clk_sel : 1; /**< [ 10: 10](R/W) L1 clock control bit.
+ 0 = Controller requests aux_clk switch and core_clk gating in L1.
+ 1 = Controller does not request aux_clk switch and core_clk gating in L1. */
+ uint32_t reserved_11_31 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_phy_intop_ctl_s cn; */
+};
+typedef union bdk_pciercx_phy_intop_ctl bdk_pciercx_phy_intop_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_PHY_INTOP_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PHY_INTOP_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8c4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PHY_INTOP_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PHY_INTOP_CTL(a) bdk_pciercx_phy_intop_ctl_t
+#define bustype_BDK_PCIERCX_PHY_INTOP_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PHY_INTOP_CTL(a) "PCIERCX_PHY_INTOP_CTL"
+#define busnum_BDK_PCIERCX_PHY_INTOP_CTL(a) (a)
+#define arguments_BDK_PCIERCX_PHY_INTOP_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_phy_status
+ *
+ * PCIe RC PHY Status Register
+ */
+union bdk_pciercx_phy_status
+{
+ uint32_t u;
+ struct bdk_pciercx_phy_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t phy_stat : 32; /**< [ 31: 0](RO/H) PHY status. Sideband control signaling (not supported). */
+#else /* Word 0 - Little Endian */
+ uint32_t phy_stat : 32; /**< [ 31: 0](RO/H) PHY status. Sideband control signaling (not supported). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_phy_status_s cn; */
+};
+typedef union bdk_pciercx_phy_status bdk_pciercx_phy_status_t;
+
+static inline uint64_t BDK_PCIERCX_PHY_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PHY_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PHY_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PHY_STATUS(a) bdk_pciercx_phy_status_t
+#define bustype_BDK_PCIERCX_PHY_STATUS(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PHY_STATUS(a) "PCIERCX_PHY_STATUS"
+#define busnum_BDK_PCIERCX_PHY_STATUS(a) (a)
+#define arguments_BDK_PCIERCX_PHY_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pipe_rel
+ *
+ * PCIe RC Pipe Related Register
+ */
+union bdk_pciercx_pipe_rel
+{
+ uint32_t u;
+ struct bdk_pciercx_pipe_rel_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t tx_msg_wbuf_depth : 4; /**< [ 7: 4](RO/H) Tx message bus write buffer depth. */
+ uint32_t rx_msg_wbuf_depth : 4; /**< [ 3: 0](RO/H) Rx message bus write buffer depth. */
+#else /* Word 0 - Little Endian */
+ uint32_t rx_msg_wbuf_depth : 4; /**< [ 3: 0](RO/H) Rx message bus write buffer depth. */
+ uint32_t tx_msg_wbuf_depth : 4; /**< [ 7: 4](RO/H) Tx message bus write buffer depth. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pipe_rel_s cn; */
+};
+typedef union bdk_pciercx_pipe_rel bdk_pciercx_pipe_rel_t;
+
+static inline uint64_t BDK_PCIERCX_PIPE_REL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PIPE_REL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xb90ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PIPE_REL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PIPE_REL(a) bdk_pciercx_pipe_rel_t
+#define bustype_BDK_PCIERCX_PIPE_REL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PIPE_REL(a) "PCIERCX_PIPE_REL"
+#define busnum_BDK_PCIERCX_PIPE_REL(a) (a)
+#define arguments_BDK_PCIERCX_PIPE_REL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_cap
+ *
+ * PCIe RC 16.0 GT/s Capabilities Register
+ */
+union bdk_pciercx_pl16g_cap
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_cap_s cn; */
+};
+typedef union bdk_pciercx_pl16g_cap bdk_pciercx_pl16g_cap_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1acll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_CAP(a) bdk_pciercx_pl16g_cap_t
+#define bustype_BDK_PCIERCX_PL16G_CAP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_CAP(a) "PCIERCX_PL16G_CAP"
+#define busnum_BDK_PCIERCX_PL16G_CAP(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_ctl
+ *
+ * PCIe RC 16.0 GT/s Control Register
+ */
+union bdk_pciercx_pl16g_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_ctl_s cn; */
+};
+typedef union bdk_pciercx_pl16g_ctl bdk_pciercx_pl16g_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1b0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_CTL(a) bdk_pciercx_pl16g_ctl_t
+#define bustype_BDK_PCIERCX_PL16G_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_CTL(a) "PCIERCX_PL16G_CTL"
+#define busnum_BDK_PCIERCX_PL16G_CTL(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_eq_ctl0123
+ *
+ * PCIe RC 16.0 GT/s Lane Equalization Control for Lane 0-3 Register
+ */
+union bdk_pciercx_pl16g_eq_ctl0123
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_eq_ctl0123_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t l3utp : 4; /**< [ 31: 28](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 3. */
+ uint32_t l3dtp : 4; /**< [ 27: 24](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 3. */
+ uint32_t l2utp : 4; /**< [ 23: 20](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 2. */
+ uint32_t l2dtp : 4; /**< [ 19: 16](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 2. */
+ uint32_t l1utp : 4; /**< [ 15: 12](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 1. */
+ uint32_t l1dtp : 4; /**< [ 11: 8](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 1. */
+ uint32_t l0utp : 4; /**< [ 7: 4](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 0. */
+ uint32_t l0dtp : 4; /**< [ 3: 0](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 0. */
+#else /* Word 0 - Little Endian */
+ uint32_t l0dtp : 4; /**< [ 3: 0](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 0. */
+ uint32_t l0utp : 4; /**< [ 7: 4](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 0. */
+ uint32_t l1dtp : 4; /**< [ 11: 8](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 1. */
+ uint32_t l1utp : 4; /**< [ 15: 12](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 1. */
+ uint32_t l2dtp : 4; /**< [ 19: 16](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 2. */
+ uint32_t l2utp : 4; /**< [ 23: 20](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 2. */
+ uint32_t l3dtp : 4; /**< [ 27: 24](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 3. */
+ uint32_t l3utp : 4; /**< [ 31: 28](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 3. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_eq_ctl0123_s cn; */
+};
+typedef union bdk_pciercx_pl16g_eq_ctl0123 bdk_pciercx_pl16g_eq_ctl0123_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_EQ_CTL0123(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_EQ_CTL0123(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1c8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_EQ_CTL0123", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_EQ_CTL0123(a) bdk_pciercx_pl16g_eq_ctl0123_t
+#define bustype_BDK_PCIERCX_PL16G_EQ_CTL0123(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_EQ_CTL0123(a) "PCIERCX_PL16G_EQ_CTL0123"
+#define busnum_BDK_PCIERCX_PL16G_EQ_CTL0123(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_EQ_CTL0123(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_eq_ctl12131415
+ *
+ * PCIe RC 16.0 GT/s Lane Equalization Control for Lane 12-15 Register
+ */
+union bdk_pciercx_pl16g_eq_ctl12131415
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_eq_ctl12131415_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t l15utp : 4; /**< [ 31: 28](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 15. */
+ uint32_t l15dtp : 4; /**< [ 27: 24](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 15. */
+ uint32_t l14utp : 4; /**< [ 23: 20](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 14. */
+ uint32_t l14dtp : 4; /**< [ 19: 16](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 14. */
+ uint32_t l13utp : 4; /**< [ 15: 12](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 13. */
+ uint32_t l13dtp : 4; /**< [ 11: 8](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 13. */
+ uint32_t l12utp : 4; /**< [ 7: 4](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 12. */
+ uint32_t l12dtp : 4; /**< [ 3: 0](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 12. */
+#else /* Word 0 - Little Endian */
+ uint32_t l12dtp : 4; /**< [ 3: 0](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 12. */
+ uint32_t l12utp : 4; /**< [ 7: 4](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 12. */
+ uint32_t l13dtp : 4; /**< [ 11: 8](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 13. */
+ uint32_t l13utp : 4; /**< [ 15: 12](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 13. */
+ uint32_t l14dtp : 4; /**< [ 19: 16](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 14. */
+ uint32_t l14utp : 4; /**< [ 23: 20](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 14. */
+ uint32_t l15dtp : 4; /**< [ 27: 24](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 15. */
+ uint32_t l15utp : 4; /**< [ 31: 28](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 15. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_eq_ctl12131415_s cn; */
+};
+typedef union bdk_pciercx_pl16g_eq_ctl12131415 bdk_pciercx_pl16g_eq_ctl12131415_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_EQ_CTL12131415(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_EQ_CTL12131415(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1d4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_EQ_CTL12131415", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_EQ_CTL12131415(a) bdk_pciercx_pl16g_eq_ctl12131415_t
+#define bustype_BDK_PCIERCX_PL16G_EQ_CTL12131415(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_EQ_CTL12131415(a) "PCIERCX_PL16G_EQ_CTL12131415"
+#define busnum_BDK_PCIERCX_PL16G_EQ_CTL12131415(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_EQ_CTL12131415(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_eq_ctl4567
+ *
+ * PCIe RC 16.0 GT/s Lane Equalization Control for Lane 4-7 Register
+ */
+union bdk_pciercx_pl16g_eq_ctl4567
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_eq_ctl4567_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t l7utp : 4; /**< [ 31: 28](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 7. */
+ uint32_t l7dtp : 4; /**< [ 27: 24](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 7. */
+ uint32_t l6utp : 4; /**< [ 23: 20](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 6. */
+ uint32_t l6dtp : 4; /**< [ 19: 16](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 6. */
+ uint32_t l5utp : 4; /**< [ 15: 12](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 5. */
+ uint32_t l5dtp : 4; /**< [ 11: 8](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 5. */
+ uint32_t l4utp : 4; /**< [ 7: 4](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 4. */
+ uint32_t l4dtp : 4; /**< [ 3: 0](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 4. */
+#else /* Word 0 - Little Endian */
+ uint32_t l4dtp : 4; /**< [ 3: 0](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 4. */
+ uint32_t l4utp : 4; /**< [ 7: 4](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 4. */
+ uint32_t l5dtp : 4; /**< [ 11: 8](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 5. */
+ uint32_t l5utp : 4; /**< [ 15: 12](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 5. */
+ uint32_t l6dtp : 4; /**< [ 19: 16](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 6. */
+ uint32_t l6utp : 4; /**< [ 23: 20](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 6. */
+ uint32_t l7dtp : 4; /**< [ 27: 24](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 7. */
+ uint32_t l7utp : 4; /**< [ 31: 28](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 7. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_eq_ctl4567_s cn; */
+};
+typedef union bdk_pciercx_pl16g_eq_ctl4567 bdk_pciercx_pl16g_eq_ctl4567_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_EQ_CTL4567(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_EQ_CTL4567(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1ccll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_EQ_CTL4567", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_EQ_CTL4567(a) bdk_pciercx_pl16g_eq_ctl4567_t
+#define bustype_BDK_PCIERCX_PL16G_EQ_CTL4567(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_EQ_CTL4567(a) "PCIERCX_PL16G_EQ_CTL4567"
+#define busnum_BDK_PCIERCX_PL16G_EQ_CTL4567(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_EQ_CTL4567(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_eq_ctl891011
+ *
+ * PCIe RC 16.0 GT/s Lane Equalization Control for Lane 8-11 Register
+ */
+union bdk_pciercx_pl16g_eq_ctl891011
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_eq_ctl891011_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t l11utp : 4; /**< [ 31: 28](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 11. */
+ uint32_t l11dtp : 4; /**< [ 27: 24](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 11. */
+ uint32_t l10utp : 4; /**< [ 23: 20](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 10. */
+ uint32_t l10dtp : 4; /**< [ 19: 16](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 10. */
+ uint32_t l9utp : 4; /**< [ 15: 12](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 9. */
+ uint32_t l9dtp : 4; /**< [ 11: 8](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 9. */
+ uint32_t l8utp : 4; /**< [ 7: 4](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 8. */
+ uint32_t l8dtp : 4; /**< [ 3: 0](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 8. */
+#else /* Word 0 - Little Endian */
+ uint32_t l8dtp : 4; /**< [ 3: 0](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 8. */
+ uint32_t l8utp : 4; /**< [ 7: 4](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 8. */
+ uint32_t l9dtp : 4; /**< [ 11: 8](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 9. */
+ uint32_t l9utp : 4; /**< [ 15: 12](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 9. */
+ uint32_t l10dtp : 4; /**< [ 19: 16](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 10. */
+ uint32_t l10utp : 4; /**< [ 23: 20](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 10. */
+ uint32_t l11dtp : 4; /**< [ 27: 24](RO/WRSL) Downstream port 16.0 GT/s transmitter preset 11. */
+ uint32_t l11utp : 4; /**< [ 31: 28](RO/WRSL) Upstream port 16.0 GT/s transmitter preset 11. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_eq_ctl891011_s cn; */
+};
+typedef union bdk_pciercx_pl16g_eq_ctl891011 bdk_pciercx_pl16g_eq_ctl891011_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_EQ_CTL891011(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_EQ_CTL891011(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1d0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_EQ_CTL891011", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_EQ_CTL891011(a) bdk_pciercx_pl16g_eq_ctl891011_t
+#define bustype_BDK_PCIERCX_PL16G_EQ_CTL891011(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_EQ_CTL891011(a) "PCIERCX_PL16G_EQ_CTL891011"
+#define busnum_BDK_PCIERCX_PL16G_EQ_CTL891011(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_EQ_CTL891011(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_ext_cap_hdr
+ *
+ * PCIe RC Pysical Layer 16.0 GT/s Extended Capability Header Register
+ */
+union bdk_pciercx_pl16g_ext_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_ext_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_ext_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_pl16g_ext_cap_hdr bdk_pciercx_pl16g_ext_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_EXT_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_EXT_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1a8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_EXT_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_EXT_CAP_HDR(a) bdk_pciercx_pl16g_ext_cap_hdr_t
+#define bustype_BDK_PCIERCX_PL16G_EXT_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_EXT_CAP_HDR(a) "PCIERCX_PL16G_EXT_CAP_HDR"
+#define busnum_BDK_PCIERCX_PL16G_EXT_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_EXT_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_fret_dpar_stat
+ *
+ * PCIe RC 16.0 GT/s First Retimer Data Parity Mismatch Status Register
+ */
+union bdk_pciercx_pl16g_fret_dpar_stat
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_fret_dpar_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t frt_dp_status : 16; /**< [ 15: 0](R/W/H) First retimer data parity mismatch status. */
+#else /* Word 0 - Little Endian */
+ uint32_t frt_dp_status : 16; /**< [ 15: 0](R/W/H) First retimer data parity mismatch status. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_fret_dpar_stat_s cn; */
+};
+typedef union bdk_pciercx_pl16g_fret_dpar_stat bdk_pciercx_pl16g_fret_dpar_stat_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_FRET_DPAR_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_FRET_DPAR_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1bcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_FRET_DPAR_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_FRET_DPAR_STAT(a) bdk_pciercx_pl16g_fret_dpar_stat_t
+#define bustype_BDK_PCIERCX_PL16G_FRET_DPAR_STAT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_FRET_DPAR_STAT(a) "PCIERCX_PL16G_FRET_DPAR_STAT"
+#define busnum_BDK_PCIERCX_PL16G_FRET_DPAR_STAT(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_FRET_DPAR_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_lc_dpar_stat
+ *
+ * PCIe RC 16.0 GT/s Local Data Parity Mismatch Status Register
+ */
+union bdk_pciercx_pl16g_lc_dpar_stat
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_lc_dpar_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t ldp_status : 16; /**< [ 15: 0](R/W/H) Local data parity mismatch status. */
+#else /* Word 0 - Little Endian */
+ uint32_t ldp_status : 16; /**< [ 15: 0](R/W/H) Local data parity mismatch status. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_lc_dpar_stat_s cn; */
+};
+typedef union bdk_pciercx_pl16g_lc_dpar_stat bdk_pciercx_pl16g_lc_dpar_stat_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_LC_DPAR_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_LC_DPAR_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1b8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_LC_DPAR_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_LC_DPAR_STAT(a) bdk_pciercx_pl16g_lc_dpar_stat_t
+#define bustype_BDK_PCIERCX_PL16G_LC_DPAR_STAT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_LC_DPAR_STAT(a) "PCIERCX_PL16G_LC_DPAR_STAT"
+#define busnum_BDK_PCIERCX_PL16G_LC_DPAR_STAT(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_LC_DPAR_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_sret_dpar_stat
+ *
+ * PCIe RC 16.0 GT/s Second Retimer Data Parity Mismatch Status Register
+ */
+union bdk_pciercx_pl16g_sret_dpar_stat
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_sret_dpar_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t srt_dp_status : 16; /**< [ 15: 0](R/W/H) Second retimer data parity mismatch status. */
+#else /* Word 0 - Little Endian */
+ uint32_t srt_dp_status : 16; /**< [ 15: 0](R/W/H) Second retimer data parity mismatch status. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_sret_dpar_stat_s cn; */
+};
+typedef union bdk_pciercx_pl16g_sret_dpar_stat bdk_pciercx_pl16g_sret_dpar_stat_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_SRET_DPAR_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_SRET_DPAR_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1c0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_SRET_DPAR_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_SRET_DPAR_STAT(a) bdk_pciercx_pl16g_sret_dpar_stat_t
+#define bustype_BDK_PCIERCX_PL16G_SRET_DPAR_STAT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_SRET_DPAR_STAT(a) "PCIERCX_PL16G_SRET_DPAR_STAT"
+#define busnum_BDK_PCIERCX_PL16G_SRET_DPAR_STAT(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_SRET_DPAR_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl16g_status
+ *
+ * PCIe RC 16.0 GT/s Status Register
+ */
+union bdk_pciercx_pl16g_status
+{
+ uint32_t u;
+ struct bdk_pciercx_pl16g_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_5_31 : 27;
+ uint32_t leq_req : 1; /**< [ 4: 4](R/W/H) Link equalization request 16.0 GT/s */
+ uint32_t eq_cpl_p3 : 1; /**< [ 3: 3](RO/H) Equalization 16.0 GT/s phase 3 successful. */
+ uint32_t eq_cpl_p2 : 1; /**< [ 2: 2](RO/H) Equalization 16.0 GT/s phase 3 successful. */
+ uint32_t eq_cpl_p1 : 1; /**< [ 1: 1](RO/H) Equalization 16.0 GT/s phase 3 successful. */
+ uint32_t eq_cpl : 1; /**< [ 0: 0](RO/H) Equalization 16.0 GT/s complete. */
+#else /* Word 0 - Little Endian */
+ uint32_t eq_cpl : 1; /**< [ 0: 0](RO/H) Equalization 16.0 GT/s complete. */
+ uint32_t eq_cpl_p1 : 1; /**< [ 1: 1](RO/H) Equalization 16.0 GT/s phase 3 successful. */
+ uint32_t eq_cpl_p2 : 1; /**< [ 2: 2](RO/H) Equalization 16.0 GT/s phase 3 successful. */
+ uint32_t eq_cpl_p3 : 1; /**< [ 3: 3](RO/H) Equalization 16.0 GT/s phase 3 successful. */
+ uint32_t leq_req : 1; /**< [ 4: 4](R/W/H) Link equalization request 16.0 GT/s */
+ uint32_t reserved_5_31 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl16g_status_s cn; */
+};
+typedef union bdk_pciercx_pl16g_status bdk_pciercx_pl16g_status_t;
+
+static inline uint64_t BDK_PCIERCX_PL16G_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL16G_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x1b4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL16G_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL16G_STATUS(a) bdk_pciercx_pl16g_status_t
+#define bustype_BDK_PCIERCX_PL16G_STATUS(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL16G_STATUS(a) "PCIERCX_PL16G_STATUS"
+#define busnum_BDK_PCIERCX_PL16G_STATUS(a) (a)
+#define arguments_BDK_PCIERCX_PL16G_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pl_ltr_latency
+ *
+ * PCIe RC LTR Latency Register
+ */
+union bdk_pciercx_pl_ltr_latency
+{
+ uint32_t u;
+ struct bdk_pciercx_pl_ltr_latency_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nslreq : 1; /**< [ 31: 31](R/W) No snoop latency requirement. */
+ uint32_t reserved_29_30 : 2;
+ uint32_t nsls : 3; /**< [ 28: 26](R/W) No snoop latency scale. */
+ uint32_t nslv : 10; /**< [ 25: 16](R/W) No snoop latency value. */
+ uint32_t slr : 1; /**< [ 15: 15](R/W) Snoop latency requirement. */
+ uint32_t reserved_13_14 : 2;
+ uint32_t sls : 3; /**< [ 12: 10](R/W) Snoop latency scale. */
+ uint32_t slv : 10; /**< [ 9: 0](R/W) Snoop latency value. */
+#else /* Word 0 - Little Endian */
+ uint32_t slv : 10; /**< [ 9: 0](R/W) Snoop latency value. */
+ uint32_t sls : 3; /**< [ 12: 10](R/W) Snoop latency scale. */
+ uint32_t reserved_13_14 : 2;
+ uint32_t slr : 1; /**< [ 15: 15](R/W) Snoop latency requirement. */
+ uint32_t nslv : 10; /**< [ 25: 16](R/W) No snoop latency value. */
+ uint32_t nsls : 3; /**< [ 28: 26](R/W) No snoop latency scale. */
+ uint32_t reserved_29_30 : 2;
+ uint32_t nslreq : 1; /**< [ 31: 31](R/W) No snoop latency requirement. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pl_ltr_latency_s cn; */
+};
+typedef union bdk_pciercx_pl_ltr_latency bdk_pciercx_pl_ltr_latency_t;
+
+static inline uint64_t BDK_PCIERCX_PL_LTR_LATENCY(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PL_LTR_LATENCY(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xb30ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PL_LTR_LATENCY", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PL_LTR_LATENCY(a) bdk_pciercx_pl_ltr_latency_t
+#define bustype_BDK_PCIERCX_PL_LTR_LATENCY(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PL_LTR_LATENCY(a) "PCIERCX_PL_LTR_LATENCY"
+#define busnum_BDK_PCIERCX_PL_LTR_LATENCY(a) (a)
+#define arguments_BDK_PCIERCX_PL_LTR_LATENCY(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pm_cap_id
+ *
+ * PCIe RC Power Management Capability ID Register
+ */
+union bdk_pciercx_pm_cap_id
+{
+ uint32_t u;
+ struct bdk_pciercx_pm_cap_id_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pmes : 5; /**< [ 31: 27](RO/WRSL/H) PME_Support. A value of 0x0 for any bit indicates that the device (or function) is not
+ capable of generating PME messages while in that power state:
+
+ _ Bit 11: If set, PME Messages can be generated from D0.
+
+ _ Bit 12: If set, PME Messages can be generated from D1.
+
+ _ Bit 13: If set, PME Messages can be generated from D2.
+
+ _ Bit 14: If set, PME Messages can be generated from D3hot.
+
+ _ Bit 15: If set, PME Messages can be generated from D3cold.
+
+ This field is writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t d2s : 1; /**< [ 26: 26](RO/WRSL) D2 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t d1s : 1; /**< [ 25: 25](RO/WRSL) D1 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t auxc : 3; /**< [ 24: 22](RO/WRSL) AUX current, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t dsi : 1; /**< [ 21: 21](RO/WRSL) Device specific initialization (DSI), writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t reserved_20 : 1;
+ uint32_t pme_clock : 1; /**< [ 19: 19](RO) PME clock, hardwired to zero. */
+ uint32_t pmsv : 3; /**< [ 18: 16](RO/WRSL) Power management specification version, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the PCIe capabilities list by default, writable
+ through PEM()_CFG_WR. For a root complex, should be changed by configuration software
+ to 0x50 (Enhanced Allocation). */
+ uint32_t pmcid : 8; /**< [ 7: 0](RO) Power management capability ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pmcid : 8; /**< [ 7: 0](RO) Power management capability ID. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO/WRSL) Next capability pointer. Points to the PCIe capabilities list by default, writable
+ through PEM()_CFG_WR. For a root complex, should be changed by configuration software
+ to 0x50 (Enhanced Allocation). */
+ uint32_t pmsv : 3; /**< [ 18: 16](RO/WRSL) Power management specification version, writable through
+ PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pme_clock : 1; /**< [ 19: 19](RO) PME clock, hardwired to zero. */
+ uint32_t reserved_20 : 1;
+ uint32_t dsi : 1; /**< [ 21: 21](RO/WRSL) Device specific initialization (DSI), writable through PEM()_CFG_WR.
+ However, the application must not change this field. */
+ uint32_t auxc : 3; /**< [ 24: 22](RO/WRSL) AUX current, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t d1s : 1; /**< [ 25: 25](RO/WRSL) D1 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t d2s : 1; /**< [ 26: 26](RO/WRSL) D2 support, writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pmes : 5; /**< [ 31: 27](RO/WRSL/H) PME_Support. A value of 0x0 for any bit indicates that the device (or function) is not
+ capable of generating PME messages while in that power state:
+
+ _ Bit 11: If set, PME Messages can be generated from D0.
+
+ _ Bit 12: If set, PME Messages can be generated from D1.
+
+ _ Bit 13: If set, PME Messages can be generated from D2.
+
+ _ Bit 14: If set, PME Messages can be generated from D3hot.
+
+ _ Bit 15: If set, PME Messages can be generated from D3cold.
+
+ This field is writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pm_cap_id_s cn; */
+};
+typedef union bdk_pciercx_pm_cap_id bdk_pciercx_pm_cap_id_t;
+
+static inline uint64_t BDK_PCIERCX_PM_CAP_ID(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PM_CAP_ID(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x40ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PM_CAP_ID", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PM_CAP_ID(a) bdk_pciercx_pm_cap_id_t
+#define bustype_BDK_PCIERCX_PM_CAP_ID(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PM_CAP_ID(a) "PCIERCX_PM_CAP_ID"
+#define busnum_BDK_PCIERCX_PM_CAP_ID(a) (a)
+#define arguments_BDK_PCIERCX_PM_CAP_ID(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pm_ctl
+ *
+ * PCIe RC Power Management Control and Status Register
+ */
+union bdk_pciercx_pm_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_pm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pmdia : 8; /**< [ 31: 24](RO) Data register for additional information (not supported). */
+ uint32_t bpccee : 1; /**< [ 23: 23](RO) Bus power/clock control enable, hardwired to zero. */
+ uint32_t bd3h : 1; /**< [ 22: 22](RO) B2/B3 support, hardwired to zero. */
+ uint32_t reserved_16_21 : 6;
+ uint32_t pmess : 1; /**< [ 15: 15](R/W1C/H) PME status. Indicates whether or not a previously enabled PME event occurred. */
+ uint32_t pmedsia : 2; /**< [ 14: 13](RO) Data scale (not supported). */
+ uint32_t pmds : 4; /**< [ 12: 9](RO) Data select (not supported). */
+ uint32_t pmeens : 1; /**< [ 8: 8](R/W) PME enable. A value of one indicates that the device is enabled to generate PME. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t nsr : 1; /**< [ 3: 3](RO/WRSL) No soft reset, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t reserved_2 : 1;
+ uint32_t ps : 2; /**< [ 1: 0](R/W/H) Power state. Controls the device power state:
+ 0x0 = D0.
+ 0x1 = D1.
+ 0x2 = D2.
+ 0x3 = D3.
+
+ The written value is ignored if the specific state is not supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t ps : 2; /**< [ 1: 0](R/W/H) Power state. Controls the device power state:
+ 0x0 = D0.
+ 0x1 = D1.
+ 0x2 = D2.
+ 0x3 = D3.
+
+ The written value is ignored if the specific state is not supported. */
+ uint32_t reserved_2 : 1;
+ uint32_t nsr : 1; /**< [ 3: 3](RO/WRSL) No soft reset, writable through PEM()_CFG_WR. However, the application must not change
+ this field. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t pmeens : 1; /**< [ 8: 8](R/W) PME enable. A value of one indicates that the device is enabled to generate PME. */
+ uint32_t pmds : 4; /**< [ 12: 9](RO) Data select (not supported). */
+ uint32_t pmedsia : 2; /**< [ 14: 13](RO) Data scale (not supported). */
+ uint32_t pmess : 1; /**< [ 15: 15](R/W1C/H) PME status. Indicates whether or not a previously enabled PME event occurred. */
+ uint32_t reserved_16_21 : 6;
+ uint32_t bd3h : 1; /**< [ 22: 22](RO) B2/B3 support, hardwired to zero. */
+ uint32_t bpccee : 1; /**< [ 23: 23](RO) Bus power/clock control enable, hardwired to zero. */
+ uint32_t pmdia : 8; /**< [ 31: 24](RO) Data register for additional information (not supported). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pm_ctl_s cn; */
+};
+typedef union bdk_pciercx_pm_ctl bdk_pciercx_pm_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_PM_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PM_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x44ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PM_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PM_CTL(a) bdk_pciercx_pm_ctl_t
+#define bustype_BDK_PCIERCX_PM_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PM_CTL(a) "PCIERCX_PM_CTL"
+#define busnum_BDK_PCIERCX_PM_CTL(a) (a)
+#define arguments_BDK_PCIERCX_PM_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pmem
+ *
+ * PCIe RC Prefetchable Memory and Limit Register
+ */
+union bdk_pciercx_pmem
+{
+ uint32_t u;
+ struct bdk_pciercx_pmem_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lmem_limit : 12; /**< [ 31: 20](R/W) Upper 12 bits of 32-bit prefetchable memory end address. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t mem64b : 1; /**< [ 16: 16](RO) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing. */
+ uint32_t lmem_base : 12; /**< [ 15: 4](R/W) Upper 12 bits of 32-bit prefetchable memory start address. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t mem64a : 1; /**< [ 0: 0](RO/WRSL) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing.
+
+ This bit is writable through PEM()_CFG_WR. When the application writes to this bit
+ through PEM()_CFG_WR, the same value is written to bit 16 of this register. */
+#else /* Word 0 - Little Endian */
+ uint32_t mem64a : 1; /**< [ 0: 0](RO/WRSL) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing.
+
+ This bit is writable through PEM()_CFG_WR. When the application writes to this bit
+ through PEM()_CFG_WR, the same value is written to bit 16 of this register. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t lmem_base : 12; /**< [ 15: 4](R/W) Upper 12 bits of 32-bit prefetchable memory start address. */
+ uint32_t mem64b : 1; /**< [ 16: 16](RO) 64-bit memory addressing:
+ 0 = 32-bit memory addressing.
+ 1 = 64-bit memory addressing. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t lmem_limit : 12; /**< [ 31: 20](R/W) Upper 12 bits of 32-bit prefetchable memory end address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pmem_s cn; */
+};
+typedef union bdk_pciercx_pmem bdk_pciercx_pmem_t;
+
+static inline uint64_t BDK_PCIERCX_PMEM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PMEM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x24ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PMEM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PMEM(a) bdk_pciercx_pmem_t
+#define bustype_BDK_PCIERCX_PMEM(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PMEM(a) "PCIERCX_PMEM"
+#define busnum_BDK_PCIERCX_PMEM(a) (a)
+#define arguments_BDK_PCIERCX_PMEM(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_port_ctl
+ *
+ * PCIe RC Port Link Control Register
+ */
+union bdk_pciercx_port_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_port_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t xlr_en : 1; /**< [ 27: 27](R/W) Transmit lane reversible enable. Internally reserved field, do not set. */
+ uint32_t ex_synch : 1; /**< [ 26: 26](R/W) Extended synch. Internally reserved field, do not set. */
+ uint32_t clcrc_en : 1; /**< [ 25: 25](R/W) Corrupt LCRC enable. Internally reserved field, do not set. */
+ uint32_t beacon_en : 1; /**< [ 24: 24](R/W) Beacon enable. Internally reserved field, do not set. */
+ uint32_t cle : 2; /**< [ 23: 22](RAZ) Reserved. */
+ uint32_t lme : 6; /**< [ 21: 16](R/W) Link mode enable set as follows:
+ 0x1 = x1.
+ 0x3 = x2.
+ 0x7 = x4.
+ 0xF = x8.
+ 0x1F = x16.
+ 0x3F = x32 (not supported).
+
+ This field indicates the maximum number of lanes supported by the PCIe port. The value can
+ be set less than 0x1F to limit the number of lanes that the PCIe will attempt to use. The
+ programming of this field needs to be done by software before enabling the link. See also
+ PCIERC_LINK_CAP[MLW].
+ The value of this field does not indicate the number of lanes in use by the PCIe. This
+ field sets the maximum number of lanes in the PCIe core that could be used. As per the
+ PCIe specification, the PCIe core can negotiate a smaller link width, so all of x16, x8,
+ x4, x2, and x1 are supported when
+ [LME] = 0x1F, for example. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t link_rate : 4; /**< [ 11: 8](RO/H) Reserved. */
+ uint32_t flm : 1; /**< [ 7: 7](R/W/H) Fast link mode. Sets all internal timers to fast mode for simulation purposes.
+ The scaling factor is configured by PCIERC_TIMER_CTL[FLMSF]. */
+ uint32_t ldis : 1; /**< [ 6: 6](R/W) Link disable. Internally reserved field, do not set. */
+ uint32_t dllle : 1; /**< [ 5: 5](R/W) DLL link enable. Enables link initialization. If DLL link enable = 0, the PCI Express bus
+ does not transmit InitFC DLLPs and does not establish a link. */
+ uint32_t reserved_4 : 1;
+ uint32_t ra : 1; /**< [ 3: 3](R/W) Reset assert. Triggers a recovery and forces the LTSSM to the hot reset state (downstream
+ port only). */
+ uint32_t le : 1; /**< [ 2: 2](R/W) Loopback enable. Initiate loopback mode as a master. On a 0-\>1 transition, the PCIe core
+ sends TS ordered sets with the loopback bit set to cause the link partner to enter into
+ loopback mode as a slave. Normal transmission is not possible when LE=1. To exit loopback
+ mode, take the link through a reset sequence. */
+ uint32_t sd : 1; /**< [ 1: 1](R/W) Scramble disable. Setting this bit turns off data scrambling. */
+ uint32_t omr : 1; /**< [ 0: 0](WO/H) Other message request. When software writes a one to this bit, the PCI Express bus transmits
+ the message contained in the other message register. */
+#else /* Word 0 - Little Endian */
+ uint32_t omr : 1; /**< [ 0: 0](WO/H) Other message request. When software writes a one to this bit, the PCI Express bus transmits
+ the message contained in the other message register. */
+ uint32_t sd : 1; /**< [ 1: 1](R/W) Scramble disable. Setting this bit turns off data scrambling. */
+ uint32_t le : 1; /**< [ 2: 2](R/W) Loopback enable. Initiate loopback mode as a master. On a 0-\>1 transition, the PCIe core
+ sends TS ordered sets with the loopback bit set to cause the link partner to enter into
+ loopback mode as a slave. Normal transmission is not possible when LE=1. To exit loopback
+ mode, take the link through a reset sequence. */
+ uint32_t ra : 1; /**< [ 3: 3](R/W) Reset assert. Triggers a recovery and forces the LTSSM to the hot reset state (downstream
+ port only). */
+ uint32_t reserved_4 : 1;
+ uint32_t dllle : 1; /**< [ 5: 5](R/W) DLL link enable. Enables link initialization. If DLL link enable = 0, the PCI Express bus
+ does not transmit InitFC DLLPs and does not establish a link. */
+ uint32_t ldis : 1; /**< [ 6: 6](R/W) Link disable. Internally reserved field, do not set. */
+ uint32_t flm : 1; /**< [ 7: 7](R/W/H) Fast link mode. Sets all internal timers to fast mode for simulation purposes.
+ The scaling factor is configured by PCIERC_TIMER_CTL[FLMSF]. */
+ uint32_t link_rate : 4; /**< [ 11: 8](RO/H) Reserved. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t lme : 6; /**< [ 21: 16](R/W) Link mode enable set as follows:
+ 0x1 = x1.
+ 0x3 = x2.
+ 0x7 = x4.
+ 0xF = x8.
+ 0x1F = x16.
+ 0x3F = x32 (not supported).
+
+ This field indicates the maximum number of lanes supported by the PCIe port. The value can
+ be set less than 0x1F to limit the number of lanes that the PCIe will attempt to use. The
+ programming of this field needs to be done by software before enabling the link. See also
+ PCIERC_LINK_CAP[MLW].
+ The value of this field does not indicate the number of lanes in use by the PCIe. This
+ field sets the maximum number of lanes in the PCIe core that could be used. As per the
+ PCIe specification, the PCIe core can negotiate a smaller link width, so all of x16, x8,
+ x4, x2, and x1 are supported when
+ [LME] = 0x1F, for example. */
+ uint32_t cle : 2; /**< [ 23: 22](RAZ) Reserved. */
+ uint32_t beacon_en : 1; /**< [ 24: 24](R/W) Beacon enable. Internally reserved field, do not set. */
+ uint32_t clcrc_en : 1; /**< [ 25: 25](R/W) Corrupt LCRC enable. Internally reserved field, do not set. */
+ uint32_t ex_synch : 1; /**< [ 26: 26](R/W) Extended synch. Internally reserved field, do not set. */
+ uint32_t xlr_en : 1; /**< [ 27: 27](R/W) Transmit lane reversible enable. Internally reserved field, do not set. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_port_ctl_s cn; */
+};
+typedef union bdk_pciercx_port_ctl bdk_pciercx_port_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_PORT_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PORT_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x710ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PORT_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PORT_CTL(a) bdk_pciercx_port_ctl_t
+#define bustype_BDK_PCIERCX_PORT_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PORT_CTL(a) "PCIERCX_PORT_CTL"
+#define busnum_BDK_PCIERCX_PORT_CTL(a) (a)
+#define arguments_BDK_PCIERCX_PORT_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_port_flink
+ *
+ * PCIe RC Port Force Link Register
+ */
+union bdk_pciercx_port_flink
+{
+ uint32_t u;
+ struct bdk_pciercx_port_flink_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t deskew_for_sris : 1; /**< [ 23: 23](R/W) Use the transitions from TS2 to logical idle symbol, SKP OS to logical idle symbol,
+ and FTS sequence to SKP OS to do deskew for SRIS instead of using received SKP OS
+ if [DESKEW_FOR_SRIS] is one. */
+ uint32_t reserved_22 : 1;
+ uint32_t link_state : 6; /**< [ 21: 16](R/W) Link state. The link state that the PCI Express bus is forced to when bit 15 (force link)
+ is set. State encoding:
+ 0x0 = DETECT_QUIET.
+ 0x1 = DETECT_ACT.
+ 0x2 = POLL_ACTIVE.
+ 0x3 = POLL_COMPLIANCE.
+ 0x4 = POLL_CONFIG.
+ 0x5 = PRE_DETECT_QUIET.
+ 0x6 = DETECT_WAIT.
+ 0x7 = CFG_LINKWD_START.
+ 0x8 = CFG_LINKWD_ACEPT.
+ 0x9 = CFG_LANENUM_WAIT.
+ 0xA = CFG_LANENUM_ACEPT.
+ 0xB = CFG_COMPLETE.
+ 0xC = CFG_IDLE.
+ 0xD = RCVRY_LOCK.
+ 0xE = RCVRY_SPEED.
+ 0xF = RCVRY_RCVRCFG.
+ 0x10 = RCVRY_IDLE.
+ 0x11 = L0.
+ 0x12 = L0S.
+ 0x13 = L123_SEND_EIDLE.
+ 0x14 = L1_IDLE.
+ 0x15 = L2_IDLE.
+ 0x16 = L2_WAKE.
+ 0x17 = DISABLED_ENTRY.
+ 0x18 = DISABLED_IDLE.
+ 0x19 = DISABLED.
+ 0x1A = LPBK_ENTRY.
+ 0x1B = LPBK_ACTIVE.
+ 0x1C = LPBK_EXIT.
+ 0x1D = LPBK_EXIT_TIMEOUT.
+ 0x1E = HOT_RESET_ENTRY.
+ 0x1F = HOT_RESET. */
+ uint32_t force_link : 1; /**< [ 15: 15](WO/H) Force link. Forces the link to the state specified by [LINK_STATE]. The force link
+ pulse triggers link renegotiation.
+ As the force link is a pulse, writing a 1 to it does trigger the forced link state event,
+ even though reading it always returns a 0. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t forced_ltssm : 4; /**< [ 11: 8](R/W) Forced link command. */
+ uint32_t link_num : 8; /**< [ 7: 0](R/W) Link number. */
+#else /* Word 0 - Little Endian */
+ uint32_t link_num : 8; /**< [ 7: 0](R/W) Link number. */
+ uint32_t forced_ltssm : 4; /**< [ 11: 8](R/W) Forced link command. */
+ uint32_t reserved_12_14 : 3;
+ uint32_t force_link : 1; /**< [ 15: 15](WO/H) Force link. Forces the link to the state specified by [LINK_STATE]. The force link
+ pulse triggers link renegotiation.
+ As the force link is a pulse, writing a 1 to it does trigger the forced link state event,
+ even though reading it always returns a 0. */
+ uint32_t link_state : 6; /**< [ 21: 16](R/W) Link state. The link state that the PCI Express bus is forced to when bit 15 (force link)
+ is set. State encoding:
+ 0x0 = DETECT_QUIET.
+ 0x1 = DETECT_ACT.
+ 0x2 = POLL_ACTIVE.
+ 0x3 = POLL_COMPLIANCE.
+ 0x4 = POLL_CONFIG.
+ 0x5 = PRE_DETECT_QUIET.
+ 0x6 = DETECT_WAIT.
+ 0x7 = CFG_LINKWD_START.
+ 0x8 = CFG_LINKWD_ACEPT.
+ 0x9 = CFG_LANENUM_WAIT.
+ 0xA = CFG_LANENUM_ACEPT.
+ 0xB = CFG_COMPLETE.
+ 0xC = CFG_IDLE.
+ 0xD = RCVRY_LOCK.
+ 0xE = RCVRY_SPEED.
+ 0xF = RCVRY_RCVRCFG.
+ 0x10 = RCVRY_IDLE.
+ 0x11 = L0.
+ 0x12 = L0S.
+ 0x13 = L123_SEND_EIDLE.
+ 0x14 = L1_IDLE.
+ 0x15 = L2_IDLE.
+ 0x16 = L2_WAKE.
+ 0x17 = DISABLED_ENTRY.
+ 0x18 = DISABLED_IDLE.
+ 0x19 = DISABLED.
+ 0x1A = LPBK_ENTRY.
+ 0x1B = LPBK_ACTIVE.
+ 0x1C = LPBK_EXIT.
+ 0x1D = LPBK_EXIT_TIMEOUT.
+ 0x1E = HOT_RESET_ENTRY.
+ 0x1F = HOT_RESET. */
+ uint32_t reserved_22 : 1;
+ uint32_t deskew_for_sris : 1; /**< [ 23: 23](R/W) Use the transitions from TS2 to logical idle symbol, SKP OS to logical idle symbol,
+ and FTS sequence to SKP OS to do deskew for SRIS instead of using received SKP OS
+ if [DESKEW_FOR_SRIS] is one. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_port_flink_s cn; */
+};
+typedef union bdk_pciercx_port_flink bdk_pciercx_port_flink_t;
+
+static inline uint64_t BDK_PCIERCX_PORT_FLINK(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PORT_FLINK(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x708ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PORT_FLINK", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PORT_FLINK(a) bdk_pciercx_port_flink_t
+#define bustype_BDK_PCIERCX_PORT_FLINK(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PORT_FLINK(a) "PCIERCX_PORT_FLINK"
+#define busnum_BDK_PCIERCX_PORT_FLINK(a) (a)
+#define arguments_BDK_PCIERCX_PORT_FLINK(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pre_base
+ *
+ * PCIe RC Prefetchable Base Upper 32 Bits Register
+ */
+union bdk_pciercx_pre_base
+{
+ uint32_t u;
+ struct bdk_pciercx_pre_base_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t umem_base : 32; /**< [ 31: 0](R/W) Upper 32 bits of base address of prefetchable memory space. Used only when 64-bit
+ prefetchable memory addressing is enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t umem_base : 32; /**< [ 31: 0](R/W) Upper 32 bits of base address of prefetchable memory space. Used only when 64-bit
+ prefetchable memory addressing is enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pre_base_s cn; */
+};
+typedef union bdk_pciercx_pre_base bdk_pciercx_pre_base_t;
+
+static inline uint64_t BDK_PCIERCX_PRE_BASE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PRE_BASE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x28ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PRE_BASE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PRE_BASE(a) bdk_pciercx_pre_base_t
+#define bustype_BDK_PCIERCX_PRE_BASE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PRE_BASE(a) "PCIERCX_PRE_BASE"
+#define busnum_BDK_PCIERCX_PRE_BASE(a) (a)
+#define arguments_BDK_PCIERCX_PRE_BASE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_pre_limit
+ *
+ * PCIe RC Prefetchable Limit Upper 32 Bits Register
+ */
+union bdk_pciercx_pre_limit
+{
+ uint32_t u;
+ struct bdk_pciercx_pre_limit_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t umem_limit : 32; /**< [ 31: 0](R/W) Upper 32 bits of limit address of prefetchable memory space. Used only when 64-bit
+ prefetchable memory addressing is enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t umem_limit : 32; /**< [ 31: 0](R/W) Upper 32 bits of limit address of prefetchable memory space. Used only when 64-bit
+ prefetchable memory addressing is enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_pre_limit_s cn; */
+};
+typedef union bdk_pciercx_pre_limit bdk_pciercx_pre_limit_t;
+
+static inline uint64_t BDK_PCIERCX_PRE_LIMIT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PRE_LIMIT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x2cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PRE_LIMIT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PRE_LIMIT(a) bdk_pciercx_pre_limit_t
+#define bustype_BDK_PCIERCX_PRE_LIMIT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PRE_LIMIT(a) "PCIERCX_PRE_LIMIT"
+#define busnum_BDK_PCIERCX_PRE_LIMIT(a) (a)
+#define arguments_BDK_PCIERCX_PRE_LIMIT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_cap
+ *
+ * PCIe RC Precision Time Measurement Capabilities Register
+ */
+union bdk_pciercx_ptm_cap
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t clkg : 8; /**< [ 15: 8](RO/WRSL) PTM local clock granularity. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t rtc : 1; /**< [ 2: 2](RO/WRSL) PTM root capable. */
+ uint32_t rsc : 1; /**< [ 1: 1](RO/WRSL) PTM responder capable. */
+ uint32_t rqc : 1; /**< [ 0: 0](RO/WRSL) PTM requester capable. */
+#else /* Word 0 - Little Endian */
+ uint32_t rqc : 1; /**< [ 0: 0](RO/WRSL) PTM requester capable. */
+ uint32_t rsc : 1; /**< [ 1: 1](RO/WRSL) PTM responder capable. */
+ uint32_t rtc : 1; /**< [ 2: 2](RO/WRSL) PTM root capable. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t clkg : 8; /**< [ 15: 8](RO/WRSL) PTM local clock granularity. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_cap_s cn; */
+};
+typedef union bdk_pciercx_ptm_cap bdk_pciercx_ptm_cap_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x460ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_CAP(a) bdk_pciercx_ptm_cap_t
+#define bustype_BDK_PCIERCX_PTM_CAP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_CAP(a) "PCIERCX_PTM_CAP"
+#define busnum_BDK_PCIERCX_PTM_CAP(a) (a)
+#define arguments_BDK_PCIERCX_PTM_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_ctl
+ *
+ * PCIe RC Precision Time Measurement Control Register
+ */
+union bdk_pciercx_ptm_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t eff_gran : 8; /**< [ 15: 8](R/W) PTM effective granularity. */
+ uint32_t reserved_2_7 : 6;
+ uint32_t rt_sel : 1; /**< [ 1: 1](R/W) PTM root select. When set this time source is the PTM root.
+ Writeable only when PCIERC_PTM_CAP[RTC] is set. */
+ uint32_t en : 1; /**< [ 0: 0](R/W) PTM enable. When set, this function is permitted to participate in the PTM mechanism. */
+#else /* Word 0 - Little Endian */
+ uint32_t en : 1; /**< [ 0: 0](R/W) PTM enable. When set, this function is permitted to participate in the PTM mechanism. */
+ uint32_t rt_sel : 1; /**< [ 1: 1](R/W) PTM root select. When set this time source is the PTM root.
+ Writeable only when PCIERC_PTM_CAP[RTC] is set. */
+ uint32_t reserved_2_7 : 6;
+ uint32_t eff_gran : 8; /**< [ 15: 8](R/W) PTM effective granularity. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_ctl_s cn; */
+};
+typedef union bdk_pciercx_ptm_ctl bdk_pciercx_ptm_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x464ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_CTL(a) bdk_pciercx_ptm_ctl_t
+#define bustype_BDK_PCIERCX_PTM_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_CTL(a) "PCIERCX_PTM_CTL"
+#define busnum_BDK_PCIERCX_PTM_CTL(a) (a)
+#define arguments_BDK_PCIERCX_PTM_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_ext_cap_hdr
+ *
+ * PCIe RC Precision Time Measurement Capability Header Register
+ */
+union bdk_pciercx_ptm_ext_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_ext_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_ext_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_ptm_ext_cap_hdr bdk_pciercx_ptm_ext_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_EXT_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_EXT_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x45cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_EXT_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_EXT_CAP_HDR(a) bdk_pciercx_ptm_ext_cap_hdr_t
+#define bustype_BDK_PCIERCX_PTM_EXT_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_EXT_CAP_HDR(a) "PCIERCX_PTM_EXT_CAP_HDR"
+#define busnum_BDK_PCIERCX_PTM_EXT_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_PTM_EXT_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_cap_hdr
+ *
+ * PCIe RC Preicsion Time Measurement Responder Capability Header Register
+ */
+union bdk_pciercx_ptm_res_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_cap_hdr bdk_pciercx_ptm_res_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x468ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_CAP_HDR(a) bdk_pciercx_ptm_res_cap_hdr_t
+#define bustype_BDK_PCIERCX_PTM_RES_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_CAP_HDR(a) "PCIERCX_PTM_RES_CAP_HDR"
+#define busnum_BDK_PCIERCX_PTM_RES_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_ctl
+ *
+ * PCIe RC Precision Time Measurement Responder Control Register
+ */
+union bdk_pciercx_ptm_res_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t pres_ctx_vld : 1; /**< [ 0: 0](RO/WRSL/H) PTM responder control context valid - PTM local timing is valid.
+ A speed change or aux_clk_active will set this bit low. */
+#else /* Word 0 - Little Endian */
+ uint32_t pres_ctx_vld : 1; /**< [ 0: 0](RO/WRSL/H) PTM responder control context valid - PTM local timing is valid.
+ A speed change or aux_clk_active will set this bit low. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_ctl_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_ctl bdk_pciercx_ptm_res_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x470ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_CTL(a) bdk_pciercx_ptm_res_ctl_t
+#define bustype_BDK_PCIERCX_PTM_RES_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_CTL(a) "PCIERCX_PTM_RES_CTL"
+#define busnum_BDK_PCIERCX_PTM_RES_CTL(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_hdr
+ *
+ * PCIe RC Precision Time Measurement Responder Vendor Specific Header Register
+ */
+union bdk_pciercx_ptm_res_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vlen : 12; /**< [ 31: 20](RO/WRSL) PTM responder VSEC length. */
+ uint32_t vrev : 4; /**< [ 19: 16](RO/WRSL) PTM responder VSEC revision. */
+ uint32_t vid : 16; /**< [ 15: 0](RO/WRSL) PTM responder VSEC ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t vid : 16; /**< [ 15: 0](RO/WRSL) PTM responder VSEC ID. */
+ uint32_t vrev : 4; /**< [ 19: 16](RO/WRSL) PTM responder VSEC revision. */
+ uint32_t vlen : 12; /**< [ 31: 20](RO/WRSL) PTM responder VSEC length. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_hdr_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_hdr bdk_pciercx_ptm_res_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x46cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_HDR(a) bdk_pciercx_ptm_res_hdr_t
+#define bustype_BDK_PCIERCX_PTM_RES_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_HDR(a) "PCIERCX_PTM_RES_HDR"
+#define busnum_BDK_PCIERCX_PTM_RES_HDR(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_local_lsb
+ *
+ * PCIe RC PTM Responder Local Clock LSB Register
+ */
+union bdk_pciercx_ptm_res_local_lsb
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_local_lsb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t clk_lsb : 32; /**< [ 31: 0](RO/WRSL) PTM responder local clock LSB. Lower 32 bits of local timer value. */
+#else /* Word 0 - Little Endian */
+ uint32_t clk_lsb : 32; /**< [ 31: 0](RO/WRSL) PTM responder local clock LSB. Lower 32 bits of local timer value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_local_lsb_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_local_lsb bdk_pciercx_ptm_res_local_lsb_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_LOCAL_LSB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_LOCAL_LSB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x478ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_LOCAL_LSB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_LOCAL_LSB(a) bdk_pciercx_ptm_res_local_lsb_t
+#define bustype_BDK_PCIERCX_PTM_RES_LOCAL_LSB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_LOCAL_LSB(a) "PCIERCX_PTM_RES_LOCAL_LSB"
+#define busnum_BDK_PCIERCX_PTM_RES_LOCAL_LSB(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_LOCAL_LSB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_local_msb
+ *
+ * PCIe RC PTM Responder Local Clock MSB Register
+ */
+union bdk_pciercx_ptm_res_local_msb
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_local_msb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t clk_msb : 32; /**< [ 31: 0](RO/WRSL) PTM responder local clock MSB. Upper 32 bits of local timer value. */
+#else /* Word 0 - Little Endian */
+ uint32_t clk_msb : 32; /**< [ 31: 0](RO/WRSL) PTM responder local clock MSB. Upper 32 bits of local timer value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_local_msb_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_local_msb bdk_pciercx_ptm_res_local_msb_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_LOCAL_MSB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_LOCAL_MSB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x47cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_LOCAL_MSB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_LOCAL_MSB(a) bdk_pciercx_ptm_res_local_msb_t
+#define bustype_BDK_PCIERCX_PTM_RES_LOCAL_MSB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_LOCAL_MSB(a) "PCIERCX_PTM_RES_LOCAL_MSB"
+#define busnum_BDK_PCIERCX_PTM_RES_LOCAL_MSB(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_LOCAL_MSB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_rx_latency
+ *
+ * PCIe RC PTM Responder RX Latency Register
+ */
+union bdk_pciercx_ptm_res_rx_latency
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_rx_latency_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t rx_lat : 12; /**< [ 11: 0](R/W) PTM responder RX latency. */
+#else /* Word 0 - Little Endian */
+ uint32_t rx_lat : 12; /**< [ 11: 0](R/W) PTM responder RX latency. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_rx_latency_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_rx_latency bdk_pciercx_ptm_res_rx_latency_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_RX_LATENCY(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_RX_LATENCY(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x4a4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_RX_LATENCY", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_RX_LATENCY(a) bdk_pciercx_ptm_res_rx_latency_t
+#define bustype_BDK_PCIERCX_PTM_RES_RX_LATENCY(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_RX_LATENCY(a) "PCIERCX_PTM_RES_RX_LATENCY"
+#define busnum_BDK_PCIERCX_PTM_RES_RX_LATENCY(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_RX_LATENCY(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_status
+ *
+ * PCIe RC PTM Responder Status Register
+ */
+union bdk_pciercx_ptm_res_status
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t first_req_rcv : 1; /**< [ 1: 1](RO/H) PTM first request received. */
+ uint32_t ctxt_vld : 1; /**< [ 0: 0](RO/H) PTM responder status context valid. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctxt_vld : 1; /**< [ 0: 0](RO/H) PTM responder status context valid. */
+ uint32_t first_req_rcv : 1; /**< [ 1: 1](RO/H) PTM first request received. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_status_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_status bdk_pciercx_ptm_res_status_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x474ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_STATUS(a) bdk_pciercx_ptm_res_status_t
+#define bustype_BDK_PCIERCX_PTM_RES_STATUS(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_STATUS(a) "PCIERCX_PTM_RES_STATUS"
+#define busnum_BDK_PCIERCX_PTM_RES_STATUS(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_t2_lsb
+ *
+ * PCIe RC PTM Responder T2 Timestamp LSB Register
+ */
+union bdk_pciercx_ptm_res_t2_lsb
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_t2_lsb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ts_lsb : 32; /**< [ 31: 0](RO) PTM responder T2 timestamp LSB. Lower 32 bits of the T2 timestamp value. */
+#else /* Word 0 - Little Endian */
+ uint32_t ts_lsb : 32; /**< [ 31: 0](RO) PTM responder T2 timestamp LSB. Lower 32 bits of the T2 timestamp value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_t2_lsb_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_t2_lsb bdk_pciercx_ptm_res_t2_lsb_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_T2_LSB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_T2_LSB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x480ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_T2_LSB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_T2_LSB(a) bdk_pciercx_ptm_res_t2_lsb_t
+#define bustype_BDK_PCIERCX_PTM_RES_T2_LSB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_T2_LSB(a) "PCIERCX_PTM_RES_T2_LSB"
+#define busnum_BDK_PCIERCX_PTM_RES_T2_LSB(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_T2_LSB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_t2_msb
+ *
+ * PCIe RC PTM Responder T2 Timestamp MSB Register
+ */
+union bdk_pciercx_ptm_res_t2_msb
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_t2_msb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ts_msb : 32; /**< [ 31: 0](RO) PTM responder T2 timestamp MSB. Upper 32 bits of the T2 timestamp value. */
+#else /* Word 0 - Little Endian */
+ uint32_t ts_msb : 32; /**< [ 31: 0](RO) PTM responder T2 timestamp MSB. Upper 32 bits of the T2 timestamp value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_t2_msb_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_t2_msb bdk_pciercx_ptm_res_t2_msb_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_T2_MSB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_T2_MSB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x484ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_T2_MSB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_T2_MSB(a) bdk_pciercx_ptm_res_t2_msb_t
+#define bustype_BDK_PCIERCX_PTM_RES_T2_MSB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_T2_MSB(a) "PCIERCX_PTM_RES_T2_MSB"
+#define busnum_BDK_PCIERCX_PTM_RES_T2_MSB(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_T2_MSB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_t2p_lsb
+ *
+ * PCIe RC PTM Responder T2 Previous Timestamp LSB Register
+ */
+union bdk_pciercx_ptm_res_t2p_lsb
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_t2p_lsb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t t2p_lsb : 32; /**< [ 31: 0](RO) PTM responder T2 previous timestamp LSB. */
+#else /* Word 0 - Little Endian */
+ uint32_t t2p_lsb : 32; /**< [ 31: 0](RO) PTM responder T2 previous timestamp LSB. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_t2p_lsb_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_t2p_lsb bdk_pciercx_ptm_res_t2p_lsb_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_T2P_LSB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_T2P_LSB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x488ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_T2P_LSB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_T2P_LSB(a) bdk_pciercx_ptm_res_t2p_lsb_t
+#define bustype_BDK_PCIERCX_PTM_RES_T2P_LSB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_T2P_LSB(a) "PCIERCX_PTM_RES_T2P_LSB"
+#define busnum_BDK_PCIERCX_PTM_RES_T2P_LSB(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_T2P_LSB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_t2p_msb
+ *
+ * PCIe RC PTM Responder T2 Previous Timestamp MSB Register
+ */
+union bdk_pciercx_ptm_res_t2p_msb
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_t2p_msb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t t2p_msb : 32; /**< [ 31: 0](RO) PTM responder T2 previous timestamp MSB. */
+#else /* Word 0 - Little Endian */
+ uint32_t t2p_msb : 32; /**< [ 31: 0](RO) PTM responder T2 previous timestamp MSB. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_t2p_msb_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_t2p_msb bdk_pciercx_ptm_res_t2p_msb_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_T2P_MSB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_T2P_MSB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x48cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_T2P_MSB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_T2P_MSB(a) bdk_pciercx_ptm_res_t2p_msb_t
+#define bustype_BDK_PCIERCX_PTM_RES_T2P_MSB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_T2P_MSB(a) "PCIERCX_PTM_RES_T2P_MSB"
+#define busnum_BDK_PCIERCX_PTM_RES_T2P_MSB(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_T2P_MSB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_t3_lsb
+ *
+ * PCIe RC PTM Responder T3 Timestamp LSB Register
+ */
+union bdk_pciercx_ptm_res_t3_lsb
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_t3_lsb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t t3_lsb : 32; /**< [ 31: 0](RO/H) PTM responder T3 timestamp LSB. */
+#else /* Word 0 - Little Endian */
+ uint32_t t3_lsb : 32; /**< [ 31: 0](RO/H) PTM responder T3 timestamp LSB. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_t3_lsb_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_t3_lsb bdk_pciercx_ptm_res_t3_lsb_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_T3_LSB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_T3_LSB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x490ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_T3_LSB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_T3_LSB(a) bdk_pciercx_ptm_res_t3_lsb_t
+#define bustype_BDK_PCIERCX_PTM_RES_T3_LSB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_T3_LSB(a) "PCIERCX_PTM_RES_T3_LSB"
+#define busnum_BDK_PCIERCX_PTM_RES_T3_LSB(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_T3_LSB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_t3_msb
+ *
+ * PCIe RC PTM Responder T3 Timestamp MSB Register
+ */
+union bdk_pciercx_ptm_res_t3_msb
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_t3_msb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t t3 : 32; /**< [ 31: 0](RO/H) PTM responder T3 timestamp MSB. */
+#else /* Word 0 - Little Endian */
+ uint32_t t3 : 32; /**< [ 31: 0](RO/H) PTM responder T3 timestamp MSB. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_t3_msb_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_t3_msb bdk_pciercx_ptm_res_t3_msb_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_T3_MSB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_T3_MSB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x494ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_T3_MSB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_T3_MSB(a) bdk_pciercx_ptm_res_t3_msb_t
+#define bustype_BDK_PCIERCX_PTM_RES_T3_MSB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_T3_MSB(a) "PCIERCX_PTM_RES_T3_MSB"
+#define busnum_BDK_PCIERCX_PTM_RES_T3_MSB(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_T3_MSB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_t3p_lsb
+ *
+ * PCIe RC PTM Responder T3 Previous Timestamp LSB Register
+ */
+union bdk_pciercx_ptm_res_t3p_lsb
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_t3p_lsb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t t3p_lsb : 32; /**< [ 31: 0](RO/H) PTM responder T3 previous timestamp LSB. */
+#else /* Word 0 - Little Endian */
+ uint32_t t3p_lsb : 32; /**< [ 31: 0](RO/H) PTM responder T3 previous timestamp LSB. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_t3p_lsb_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_t3p_lsb bdk_pciercx_ptm_res_t3p_lsb_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_T3P_LSB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_T3P_LSB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x498ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_T3P_LSB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_T3P_LSB(a) bdk_pciercx_ptm_res_t3p_lsb_t
+#define bustype_BDK_PCIERCX_PTM_RES_T3P_LSB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_T3P_LSB(a) "PCIERCX_PTM_RES_T3P_LSB"
+#define busnum_BDK_PCIERCX_PTM_RES_T3P_LSB(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_T3P_LSB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_t3p_msb
+ *
+ * PCIe RC PTM Responder T3 Previous Timestamp MSB Register
+ */
+union bdk_pciercx_ptm_res_t3p_msb
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_t3p_msb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t t3p_msb : 32; /**< [ 31: 0](RO/H) PTM responder T3 previous timestamp MSB. */
+#else /* Word 0 - Little Endian */
+ uint32_t t3p_msb : 32; /**< [ 31: 0](RO/H) PTM responder T3 previous timestamp MSB. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_t3p_msb_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_t3p_msb bdk_pciercx_ptm_res_t3p_msb_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_T3P_MSB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_T3P_MSB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x49cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_T3P_MSB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_T3P_MSB(a) bdk_pciercx_ptm_res_t3p_msb_t
+#define bustype_BDK_PCIERCX_PTM_RES_T3P_MSB(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_T3P_MSB(a) "PCIERCX_PTM_RES_T3P_MSB"
+#define busnum_BDK_PCIERCX_PTM_RES_T3P_MSB(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_T3P_MSB(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ptm_res_tx_latency
+ *
+ * PCIe RC PTM Responder TX Latency Register
+ */
+union bdk_pciercx_ptm_res_tx_latency
+{
+ uint32_t u;
+ struct bdk_pciercx_ptm_res_tx_latency_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t tx_lat : 12; /**< [ 11: 0](R/W) PTM responder TX latency. */
+#else /* Word 0 - Little Endian */
+ uint32_t tx_lat : 12; /**< [ 11: 0](R/W) PTM responder TX latency. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ptm_res_tx_latency_s cn; */
+};
+typedef union bdk_pciercx_ptm_res_tx_latency bdk_pciercx_ptm_res_tx_latency_t;
+
+static inline uint64_t BDK_PCIERCX_PTM_RES_TX_LATENCY(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_PTM_RES_TX_LATENCY(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x4a0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_PTM_RES_TX_LATENCY", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_PTM_RES_TX_LATENCY(a) bdk_pciercx_ptm_res_tx_latency_t
+#define bustype_BDK_PCIERCX_PTM_RES_TX_LATENCY(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_PTM_RES_TX_LATENCY(a) "PCIERCX_PTM_RES_TX_LATENCY"
+#define busnum_BDK_PCIERCX_PTM_RES_TX_LATENCY(a) (a)
+#define arguments_BDK_PCIERCX_PTM_RES_TX_LATENCY(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_queue_status
+ *
+ * PCIe RC Queue Status Register
+ */
+union bdk_pciercx_queue_status
+{
+ uint32_t u;
+ struct bdk_pciercx_queue_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t fcltoe : 1; /**< [ 31: 31](R/W) FC latency timer override enable. When this bit is set, the value in
+ PCIERC_QUEUE_STATUS[FCLTOV] will override the FC latency timer value that the
+ core calculates according to the PCIe specification. */
+ uint32_t reserved_29_30 : 2;
+ uint32_t fcltov : 13; /**< [ 28: 16](R/W) FC latency timer override value. When you set PCIERC_QUEUE_STATUS[FCLTOE], the
+ value in this field will override the FC latency timer value that the core
+ calculates according to the PCIe specification. */
+ uint32_t rsqre : 1; /**< [ 15: 15](R/W1C) Receive serialization queue read error. Indicates the serialization queue has
+ attempted to read an incorrectly formatted TLP. */
+ uint32_t rsqwe : 1; /**< [ 14: 14](R/W1C) Receive serialization queue write error. Indicates insufficient buffer space
+ available to write to the serialization queue. */
+ uint32_t rsqne : 1; /**< [ 13: 13](RO/H) Receive serialization queue not empty. Indicates there is data in the serialization queue. */
+ uint32_t reserved_4_12 : 9;
+ uint32_t rqof : 1; /**< [ 3: 3](R/W1C) Receive credit queue overflow. Indicates insufficient buffer space available to
+ write to the P/NP/CPL credit queue. */
+ uint32_t rqne : 1; /**< [ 2: 2](RO/H) Received queue not empty. Indicates there is data in one or more of the receive buffers. */
+ uint32_t trbne : 1; /**< [ 1: 1](RO/H) Transmit retry buffer not empty. Indicates that there is data in the transmit retry buffer. */
+ uint32_t rtlpfccnr : 1; /**< [ 0: 0](RO/H) Received TLP FC credits not returned. Indicates that the PCI Express bus has sent a TLP
+ but has not yet received an UpdateFC DLLP indicating that the credits for that TLP have
+ been restored by the receiver at the other end of the link. */
+#else /* Word 0 - Little Endian */
+ uint32_t rtlpfccnr : 1; /**< [ 0: 0](RO/H) Received TLP FC credits not returned. Indicates that the PCI Express bus has sent a TLP
+ but has not yet received an UpdateFC DLLP indicating that the credits for that TLP have
+ been restored by the receiver at the other end of the link. */
+ uint32_t trbne : 1; /**< [ 1: 1](RO/H) Transmit retry buffer not empty. Indicates that there is data in the transmit retry buffer. */
+ uint32_t rqne : 1; /**< [ 2: 2](RO/H) Received queue not empty. Indicates there is data in one or more of the receive buffers. */
+ uint32_t rqof : 1; /**< [ 3: 3](R/W1C) Receive credit queue overflow. Indicates insufficient buffer space available to
+ write to the P/NP/CPL credit queue. */
+ uint32_t reserved_4_12 : 9;
+ uint32_t rsqne : 1; /**< [ 13: 13](RO/H) Receive serialization queue not empty. Indicates there is data in the serialization queue. */
+ uint32_t rsqwe : 1; /**< [ 14: 14](R/W1C) Receive serialization queue write error. Indicates insufficient buffer space
+ available to write to the serialization queue. */
+ uint32_t rsqre : 1; /**< [ 15: 15](R/W1C) Receive serialization queue read error. Indicates the serialization queue has
+ attempted to read an incorrectly formatted TLP. */
+ uint32_t fcltov : 13; /**< [ 28: 16](R/W) FC latency timer override value. When you set PCIERC_QUEUE_STATUS[FCLTOE], the
+ value in this field will override the FC latency timer value that the core
+ calculates according to the PCIe specification. */
+ uint32_t reserved_29_30 : 2;
+ uint32_t fcltoe : 1; /**< [ 31: 31](R/W) FC latency timer override enable. When this bit is set, the value in
+ PCIERC_QUEUE_STATUS[FCLTOV] will override the FC latency timer value that the
+ core calculates according to the PCIe specification. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_queue_status_s cn; */
+};
+typedef union bdk_pciercx_queue_status bdk_pciercx_queue_status_t;
+
+static inline uint64_t BDK_PCIERCX_QUEUE_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_QUEUE_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x73cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_QUEUE_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_QUEUE_STATUS(a) bdk_pciercx_queue_status_t
+#define bustype_BDK_PCIERCX_QUEUE_STATUS(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_QUEUE_STATUS(a) "PCIERCX_QUEUE_STATUS"
+#define busnum_BDK_PCIERCX_QUEUE_STATUS(a) (a)
+#define arguments_BDK_PCIERCX_QUEUE_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_des_cap_hdr
+ *
+ * PCIe RC Vendor Specific RAS DES Capability Header Register
+ */
+union bdk_pciercx_ras_des_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_des_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_des_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_ras_des_cap_hdr bdk_pciercx_ras_des_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_DES_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_DES_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x318ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_DES_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_DES_CAP_HDR(a) bdk_pciercx_ras_des_cap_hdr_t
+#define bustype_BDK_PCIERCX_RAS_DES_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_DES_CAP_HDR(a) "PCIERCX_RAS_DES_CAP_HDR"
+#define busnum_BDK_PCIERCX_RAS_DES_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_RAS_DES_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_ec_ctl
+ *
+ * PCIe RC Vendor RAS DES Event Counter Control Register
+ */
+union bdk_pciercx_ras_ec_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_ec_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t ev_cntr_data_sel : 12; /**< [ 27: 16](R/W) Event counter data select. This field in conjunction with [EV_CNTR_LANE_SEL]
+ selects PCIERC_RAS_EC_DATA[EV_CNTR_DATA].
+ _ \<27:24\> = Group number (0..0x7).
+ _ \<23:16\> = Event number (0..0x13). */
+ uint32_t reserved_12_15 : 4;
+ uint32_t ev_cntr_lane_sel : 4; /**< [ 11: 8](R/W) Event counter lane select. This field in conjunction with [EV_CNTR_DATA_SEL]
+ indexes the event counter data returned in the PCIERC_RAS_EC_DATA[EV_CNTR_DATA].
+
+ 0x0-0x7 = Lane number.
+ 0x8-0xF = Reserved. */
+ uint32_t ev_cntr_stat : 1; /**< [ 7: 7](RO/H) Event counter status. Returns the enable status of the event counter
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL]. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t ev_cntr_en : 3; /**< [ 4: 2](WO) Event counter enable. Enables/disables the event counter
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL].
+ By default, all event counters are disabled. This field
+ always reads zeros.
+
+ 0x0 = No change.
+ 0x1 = Per event off.
+ 0x2 = No change.
+ 0x3 = Per event on.
+ 0x4 = No change.
+ 0x5 = All off.
+ 0x6 = No change.
+ 0x7 = All on. */
+ uint32_t ev_cntr_clr : 2; /**< [ 1: 0](WO) Event counter clear. Clears the event counters
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL].
+ By default, all event counters are disabled. This field
+ always reads zeros.
+
+ 0x0 = No change.
+ 0x1 = Per clear.
+ 0x2 = No change.
+ 0x3 = All clear. */
+#else /* Word 0 - Little Endian */
+ uint32_t ev_cntr_clr : 2; /**< [ 1: 0](WO) Event counter clear. Clears the event counters
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL].
+ By default, all event counters are disabled. This field
+ always reads zeros.
+
+ 0x0 = No change.
+ 0x1 = Per clear.
+ 0x2 = No change.
+ 0x3 = All clear. */
+ uint32_t ev_cntr_en : 3; /**< [ 4: 2](WO) Event counter enable. Enables/disables the event counter
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL].
+ By default, all event counters are disabled. This field
+ always reads zeros.
+
+ 0x0 = No change.
+ 0x1 = Per event off.
+ 0x2 = No change.
+ 0x3 = Per event on.
+ 0x4 = No change.
+ 0x5 = All off.
+ 0x6 = No change.
+ 0x7 = All on. */
+ uint32_t reserved_5_6 : 2;
+ uint32_t ev_cntr_stat : 1; /**< [ 7: 7](RO/H) Event counter status. Returns the enable status of the event counter
+ selected by [EV_CNTR_DATA_SEL] and [EV_CNTR_LANE_SEL]. */
+ uint32_t ev_cntr_lane_sel : 4; /**< [ 11: 8](R/W) Event counter lane select. This field in conjunction with [EV_CNTR_DATA_SEL]
+ indexes the event counter data returned in the PCIERC_RAS_EC_DATA[EV_CNTR_DATA].
+
+ 0x0-0x7 = Lane number.
+ 0x8-0xF = Reserved. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t ev_cntr_data_sel : 12; /**< [ 27: 16](R/W) Event counter data select. This field in conjunction with [EV_CNTR_LANE_SEL]
+ selects PCIERC_RAS_EC_DATA[EV_CNTR_DATA].
+ _ \<27:24\> = Group number (0..0x7).
+ _ \<23:16\> = Event number (0..0x13). */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_ec_ctl_s cn; */
+};
+typedef union bdk_pciercx_ras_ec_ctl bdk_pciercx_ras_ec_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EC_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EC_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x320ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EC_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EC_CTL(a) bdk_pciercx_ras_ec_ctl_t
+#define bustype_BDK_PCIERCX_RAS_EC_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EC_CTL(a) "PCIERCX_RAS_EC_CTL"
+#define busnum_BDK_PCIERCX_RAS_EC_CTL(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EC_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_ec_data
+ *
+ * PCIe RC Vendor RAS DES Data Register
+ */
+union bdk_pciercx_ras_ec_data
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_ec_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ev_cntr_data : 32; /**< [ 31: 0](RO) Event counter data. This field returns data selected by
+ PCIERC_RAS_EC_CTL[EV_CNTR_DATA_SEL]
+ and PCIERC_RAS_EC_CTL[EV_CNTR_LANE_SEL]. */
+#else /* Word 0 - Little Endian */
+ uint32_t ev_cntr_data : 32; /**< [ 31: 0](RO) Event counter data. This field returns data selected by
+ PCIERC_RAS_EC_CTL[EV_CNTR_DATA_SEL]
+ and PCIERC_RAS_EC_CTL[EV_CNTR_LANE_SEL]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_ec_data_s cn; */
+};
+typedef union bdk_pciercx_ras_ec_data bdk_pciercx_ras_ec_data_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EC_DATA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EC_DATA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x324ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EC_DATA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EC_DATA(a) bdk_pciercx_ras_ec_data_t
+#define bustype_BDK_PCIERCX_RAS_EC_DATA(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EC_DATA(a) "PCIERCX_RAS_EC_DATA"
+#define busnum_BDK_PCIERCX_RAS_EC_DATA(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EC_DATA(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl0
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 0 (CRC) Register
+ */
+union bdk_pciercx_ras_einj_ctl0
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t einj0_crc_type : 4; /**< [ 11: 8](R/W) Error injection type. Selects the type of CRC error tp in inserted.
+
+ TX path:
+ 0x0 = New TLP's LCRC error injection.
+ 0x1 = 16bCRC error injection of ACK/NAK DLLP.
+ 0x2 = 16bCRC error injection of Update-FC DLLP.
+ 0x3 = New TLP's ECRC error injection.
+ 0x4 = TLP's FCRC error injection (128b/130b).
+ 0x5 = Parity error of TSOS (128b/130b).
+ 0x6 = Parity error of SKPOS (128b/130b).
+ 0x7 = Reserved.
+
+ RX path:
+ 0x8 = LCRC error injection.
+ 0x9 = ECRC error injection.
+ 0xA - 0xF = Reserved. */
+ uint32_t einj0_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ0_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ0_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ0_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj0_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ0_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ0_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ0_EN] is cleared. */
+ uint32_t einj0_crc_type : 4; /**< [ 11: 8](R/W) Error injection type. Selects the type of CRC error tp in inserted.
+
+ TX path:
+ 0x0 = New TLP's LCRC error injection.
+ 0x1 = 16bCRC error injection of ACK/NAK DLLP.
+ 0x2 = 16bCRC error injection of Update-FC DLLP.
+ 0x3 = New TLP's ECRC error injection.
+ 0x4 = TLP's FCRC error injection (128b/130b).
+ 0x5 = Parity error of TSOS (128b/130b).
+ 0x6 = Parity error of SKPOS (128b/130b).
+ 0x7 = Reserved.
+
+ RX path:
+ 0x8 = LCRC error injection.
+ 0x9 = ECRC error injection.
+ 0xA - 0xF = Reserved. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl0_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl0 bdk_pciercx_ras_einj_ctl0_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x34cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL0(a) bdk_pciercx_ras_einj_ctl0_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL0(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL0(a) "PCIERCX_RAS_EINJ_CTL0"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL0(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL0(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl1
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 1 (SEQNUM) Register
+ */
+union bdk_pciercx_ras_einj_ctl1
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t einj1_bad_seqnum : 13; /**< [ 28: 16](R/W) Bad sequence number. Indicates the value to add/subtract
+ from the naturally-assigned sequence numbers. This value is
+ represented by two's complement.
+
+ 0x0FFF = +4095.
+
+ 0x0002 = +2.
+ 0x0001 = +1.
+ 0x0000 = 0.
+ 0x1FFF = -1.
+ 0x1FFE = -2.
+
+ 0x1001 = -4095. */
+ uint32_t reserved_9_15 : 7;
+ uint32_t einj1_seqnum_type : 1; /**< [ 8: 8](R/W) Sequence number type. Selects the type of sequence number.
+
+ 0x0 = Insertion of New TLP's SEQ error.
+ 0x1 = Insertion of ACK/NAK DLLP's SEQ error. */
+ uint32_t einj1_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ1_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ1_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ1_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj1_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ1_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ1_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ1_EN] is cleared. */
+ uint32_t einj1_seqnum_type : 1; /**< [ 8: 8](R/W) Sequence number type. Selects the type of sequence number.
+
+ 0x0 = Insertion of New TLP's SEQ error.
+ 0x1 = Insertion of ACK/NAK DLLP's SEQ error. */
+ uint32_t reserved_9_15 : 7;
+ uint32_t einj1_bad_seqnum : 13; /**< [ 28: 16](R/W) Bad sequence number. Indicates the value to add/subtract
+ from the naturally-assigned sequence numbers. This value is
+ represented by two's complement.
+
+ 0x0FFF = +4095.
+
+ 0x0002 = +2.
+ 0x0001 = +1.
+ 0x0000 = 0.
+ 0x1FFF = -1.
+ 0x1FFE = -2.
+
+ 0x1001 = -4095. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl1_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl1 bdk_pciercx_ras_einj_ctl1_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x350ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL1(a) bdk_pciercx_ras_einj_ctl1_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL1(a) "PCIERCX_RAS_EINJ_CTL1"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL1(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl2
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 2 (DLLP) Register
+ */
+union bdk_pciercx_ras_einj_ctl2
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t einj2_dllp_type : 2; /**< [ 9: 8](R/W) DLLP type. Selects the type of DLLP errors to be inserted.
+
+ 0x0 = ACK/NAK DLLP transmission block.
+ 0x1 = Update FC DLLP's transmission block.
+ 0x2 = Always transmission for NAK DLLP.
+ 0x3 = Reserved. */
+ uint32_t einj2_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ2_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ2_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ2_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj2_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ2_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ2_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ2_EN] is cleared. */
+ uint32_t einj2_dllp_type : 2; /**< [ 9: 8](R/W) DLLP type. Selects the type of DLLP errors to be inserted.
+
+ 0x0 = ACK/NAK DLLP transmission block.
+ 0x1 = Update FC DLLP's transmission block.
+ 0x2 = Always transmission for NAK DLLP.
+ 0x3 = Reserved. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl2_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl2 bdk_pciercx_ras_einj_ctl2_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x354ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL2(a) bdk_pciercx_ras_einj_ctl2_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL2(a) "PCIERCX_RAS_EINJ_CTL2"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL2(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl3
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 3 (Symbol) Register
+ */
+union bdk_pciercx_ras_einj_ctl3
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_11_31 : 21;
+ uint32_t einj3_symbol_type : 3; /**< [ 10: 8](R/W) Error type, 8 b/10 b encoding - Mask K symbol.
+
+ 0x0 = Reserved.
+ 0x1 = COM/PAD(TS1 Order Set).
+ 0x2 = COM/PAD(TS2 Order Set).
+ 0x3 = COM/FTS(FTS Order Set).
+ 0x4 = COM/IDLE(E-Idle Order Set).
+ 0x5 = END/EDB Symbol.
+ 0x6 = STP/SDP Symbol.
+ 0x7 = COM/SKP(SKP Order set). */
+ uint32_t einj3_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ3_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ3_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ3_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj3_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ3_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ3_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ3_EN] is cleared. */
+ uint32_t einj3_symbol_type : 3; /**< [ 10: 8](R/W) Error type, 8 b/10 b encoding - Mask K symbol.
+
+ 0x0 = Reserved.
+ 0x1 = COM/PAD(TS1 Order Set).
+ 0x2 = COM/PAD(TS2 Order Set).
+ 0x3 = COM/FTS(FTS Order Set).
+ 0x4 = COM/IDLE(E-Idle Order Set).
+ 0x5 = END/EDB Symbol.
+ 0x6 = STP/SDP Symbol.
+ 0x7 = COM/SKP(SKP Order set). */
+ uint32_t reserved_11_31 : 21;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl3_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl3 bdk_pciercx_ras_einj_ctl3_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x358ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL3(a) bdk_pciercx_ras_einj_ctl3_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL3(a) "PCIERCX_RAS_EINJ_CTL3"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL3(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl4
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 4 (FC Credit) Register
+ */
+union bdk_pciercx_ras_einj_ctl4
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t einj4_bad_updfc_val : 13; /**< [ 28: 16](R/W) Bad update-FC credit value. Indicates the value to add/subtract
+ from the UpdateFC credit. The value is represented by two's
+ compliment.
+
+ 0x0FFF = +4095.
+
+ 0x0002 = +2.
+ 0x0001 = +1.
+ 0x0000 = 0.
+ 0x1FFF = -1.
+ 0x1FFE = -2.
+
+ 0x1001 = -4095. */
+ uint32_t reserved_15 : 1;
+ uint32_t einj4_vc_num : 3; /**< [ 14: 12](R/W) VC number. Indicates the target VC number. */
+ uint32_t reserved_11 : 1;
+ uint32_t einj4_vc_type : 3; /**< [ 10: 8](R/W) Update-FC type. Selects the credit type.
+
+ 0x0 = Posted TLP header credit value control.
+ 0x1 = Non-Posted TLP header credit value control.
+ 0x2 = Completion TLP header credit value control.
+ 0x3 = Reserved.
+ 0x4 = Posted TLP data credit value control.
+ 0x5 = Non-Posted TLP data credit value control.
+ 0x6 = Completion TLP data credit value control.
+ 0x7 = Reserved. */
+ uint32_t einj4_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ4_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ4_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ4_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj4_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ4_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ4_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ4_EN] is cleared. */
+ uint32_t einj4_vc_type : 3; /**< [ 10: 8](R/W) Update-FC type. Selects the credit type.
+
+ 0x0 = Posted TLP header credit value control.
+ 0x1 = Non-Posted TLP header credit value control.
+ 0x2 = Completion TLP header credit value control.
+ 0x3 = Reserved.
+ 0x4 = Posted TLP data credit value control.
+ 0x5 = Non-Posted TLP data credit value control.
+ 0x6 = Completion TLP data credit value control.
+ 0x7 = Reserved. */
+ uint32_t reserved_11 : 1;
+ uint32_t einj4_vc_num : 3; /**< [ 14: 12](R/W) VC number. Indicates the target VC number. */
+ uint32_t reserved_15 : 1;
+ uint32_t einj4_bad_updfc_val : 13; /**< [ 28: 16](R/W) Bad update-FC credit value. Indicates the value to add/subtract
+ from the UpdateFC credit. The value is represented by two's
+ compliment.
+
+ 0x0FFF = +4095.
+
+ 0x0002 = +2.
+ 0x0001 = +1.
+ 0x0000 = 0.
+ 0x1FFF = -1.
+ 0x1FFE = -2.
+
+ 0x1001 = -4095. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl4_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl4 bdk_pciercx_ras_einj_ctl4_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x35cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL4(a) bdk_pciercx_ras_einj_ctl4_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL4(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL4(a) "PCIERCX_RAS_EINJ_CTL4"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL4(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL4(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl5
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 5 (Specific TLP) Register
+ */
+union bdk_pciercx_ras_einj_ctl5
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t einj5_sp_tlp : 1; /**< [ 8: 8](R/W) Specified TLP. Selects the specified TLP to be inserted.
+
+ 0x0 = Generates duplicate TLPs by handling ACK DLLP as NAK DLLP.
+ 0x1 = Generates nullified TLP (Original TLP will be stored in retry buffer). */
+ uint32_t einj5_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ5_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ5_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ5_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj5_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ5_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ5_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ5_EN] is cleared. */
+ uint32_t einj5_sp_tlp : 1; /**< [ 8: 8](R/W) Specified TLP. Selects the specified TLP to be inserted.
+
+ 0x0 = Generates duplicate TLPs by handling ACK DLLP as NAK DLLP.
+ 0x1 = Generates nullified TLP (Original TLP will be stored in retry buffer). */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl5_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl5 bdk_pciercx_ras_einj_ctl5_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL5(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL5(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x360ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL5", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL5(a) bdk_pciercx_ras_einj_ctl5_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL5(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL5(a) "PCIERCX_RAS_EINJ_CTL5"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL5(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL5(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6chgp0
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Point H0) Register
+ */
+union bdk_pciercx_ras_einj_ctl6chgp0
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6chgp0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_pt_h0 : 32; /**< [ 31: 0](R/W) Packet change point first DWORD.
+ Specifies which TX TLP header DWORD0 bits to replace
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CHGV0[EINJ6_CHG_VAL_H0]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_pt_h0 : 32; /**< [ 31: 0](R/W) Packet change point first DWORD.
+ Specifies which TX TLP header DWORD0 bits to replace
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CHGV0[EINJ6_CHG_VAL_H0]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6chgp0_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6chgp0 bdk_pciercx_ras_einj_ctl6chgp0_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGP0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGP0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x384ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CHGP0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CHGP0(a) bdk_pciercx_ras_einj_ctl6chgp0_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CHGP0(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CHGP0(a) "PCIERCX_RAS_EINJ_CTL6CHGP0"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CHGP0(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CHGP0(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6chgp1
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Point H1) Register
+ */
+union bdk_pciercx_ras_einj_ctl6chgp1
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6chgp1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_pt_h1 : 32; /**< [ 31: 0](R/W) Packet change point second DWORD.
+ Specifies which TX TLP header DWORD0 bits to replace
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CHGV1[EINJ6_CHG_VAL_H1]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_pt_h1 : 32; /**< [ 31: 0](R/W) Packet change point second DWORD.
+ Specifies which TX TLP header DWORD0 bits to replace
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CHGV1[EINJ6_CHG_VAL_H1]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6chgp1_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6chgp1 bdk_pciercx_ras_einj_ctl6chgp1_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGP1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGP1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x388ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CHGP1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CHGP1(a) bdk_pciercx_ras_einj_ctl6chgp1_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CHGP1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CHGP1(a) "PCIERCX_RAS_EINJ_CTL6CHGP1"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CHGP1(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CHGP1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6chgp2
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Point H2) Register
+ */
+union bdk_pciercx_ras_einj_ctl6chgp2
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6chgp2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_pt_h2 : 32; /**< [ 31: 0](R/W) Packet change point third DWORD.
+ Specifies which TX TLP header DWORD2 bits to replace
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CHGV2[EINJ6_CHG_VAL_H2]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_pt_h2 : 32; /**< [ 31: 0](R/W) Packet change point third DWORD.
+ Specifies which TX TLP header DWORD2 bits to replace
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CHGV2[EINJ6_CHG_VAL_H2]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6chgp2_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6chgp2 bdk_pciercx_ras_einj_ctl6chgp2_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGP2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGP2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x38cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CHGP2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CHGP2(a) bdk_pciercx_ras_einj_ctl6chgp2_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CHGP2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CHGP2(a) "PCIERCX_RAS_EINJ_CTL6CHGP2"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CHGP2(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CHGP2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6chgp3
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Point H3) Register
+ */
+union bdk_pciercx_ras_einj_ctl6chgp3
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6chgp3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_pt_h3 : 32; /**< [ 31: 0](R/W) Packet change point first DWORD.
+ Specifies which TX TLP header DWORD3 bits to replace
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CHGV3[EINJ6_CHG_VAL_H3]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_pt_h3 : 32; /**< [ 31: 0](R/W) Packet change point first DWORD.
+ Specifies which TX TLP header DWORD3 bits to replace
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CHGV3[EINJ6_CHG_VAL_H3]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6chgp3_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6chgp3 bdk_pciercx_ras_einj_ctl6chgp3_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGP3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGP3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x390ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CHGP3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CHGP3(a) bdk_pciercx_ras_einj_ctl6chgp3_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CHGP3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CHGP3(a) "PCIERCX_RAS_EINJ_CTL6CHGP3"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CHGP3(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CHGP3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6chgv0
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Value H0) Register
+ */
+union bdk_pciercx_ras_einj_ctl6chgv0
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6chgv0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_val_h0 : 32; /**< [ 31: 0](R/W) Packet change value first DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD0 bits defined in the PCIERC_RAS_EINJ_CTL6CHGP0[EINJ6_CHG_PT_H0].
+ Only applies when PCIERC_RAS_EINJ_CTL6PE[EINJ6_INV_CNTRL] is not set. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_val_h0 : 32; /**< [ 31: 0](R/W) Packet change value first DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD0 bits defined in the PCIERC_RAS_EINJ_CTL6CHGP0[EINJ6_CHG_PT_H0].
+ Only applies when PCIERC_RAS_EINJ_CTL6PE[EINJ6_INV_CNTRL] is not set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6chgv0_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6chgv0 bdk_pciercx_ras_einj_ctl6chgv0_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGV0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGV0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x394ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CHGV0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CHGV0(a) bdk_pciercx_ras_einj_ctl6chgv0_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CHGV0(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CHGV0(a) "PCIERCX_RAS_EINJ_CTL6CHGV0"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CHGV0(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CHGV0(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6chgv1
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Value H1) Register
+ */
+union bdk_pciercx_ras_einj_ctl6chgv1
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6chgv1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_val_h1 : 32; /**< [ 31: 0](R/W) Packet change value second DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD1 bits defined in the PCIERC_RAS_EINJ_CTL6CHGP1[EINJ6_CHG_PT_H1].
+ Only applies when PCIERC_RAS_EINJ_CTL6PE[EINJ6_INV_CNTRL] is not set. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_val_h1 : 32; /**< [ 31: 0](R/W) Packet change value second DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD1 bits defined in the PCIERC_RAS_EINJ_CTL6CHGP1[EINJ6_CHG_PT_H1].
+ Only applies when PCIERC_RAS_EINJ_CTL6PE[EINJ6_INV_CNTRL] is not set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6chgv1_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6chgv1 bdk_pciercx_ras_einj_ctl6chgv1_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGV1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGV1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x398ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CHGV1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CHGV1(a) bdk_pciercx_ras_einj_ctl6chgv1_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CHGV1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CHGV1(a) "PCIERCX_RAS_EINJ_CTL6CHGV1"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CHGV1(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CHGV1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6chgv2
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Value H2) Register
+ */
+union bdk_pciercx_ras_einj_ctl6chgv2
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6chgv2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_val_h2 : 32; /**< [ 31: 0](R/W) Packet change value third DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD2 bits defined in the PCIERC_RAS_EINJ_CTL6CHGP2[EINJ6_CHG_PT_H2].
+ Only applies when PCIERC_RAS_EINJ_CTL6PE[EINJ6_INV_CNTRL] is not set." */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_val_h2 : 32; /**< [ 31: 0](R/W) Packet change value third DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD2 bits defined in the PCIERC_RAS_EINJ_CTL6CHGP2[EINJ6_CHG_PT_H2].
+ Only applies when PCIERC_RAS_EINJ_CTL6PE[EINJ6_INV_CNTRL] is not set." */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6chgv2_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6chgv2 bdk_pciercx_ras_einj_ctl6chgv2_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGV2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGV2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x39cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CHGV2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CHGV2(a) bdk_pciercx_ras_einj_ctl6chgv2_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CHGV2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CHGV2(a) "PCIERCX_RAS_EINJ_CTL6CHGV2"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CHGV2(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CHGV2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6chgv3
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Change Value H3) Register
+ */
+union bdk_pciercx_ras_einj_ctl6chgv3
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6chgv3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_chg_val_h3 : 32; /**< [ 31: 0](R/W) Packet change value fourth DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD3 bits defined in the PCIERC_RAS_EINJ_CTL6CHGP3[EINJ6_CHG_PT_H3].
+ Only applies when PCIERC_RAS_EINJ_CTL6PE[EINJ6_INV_CNTRL] is not set. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_chg_val_h3 : 32; /**< [ 31: 0](R/W) Packet change value fourth DWORD.
+ Specifies replacement values for the TX TLP header
+ DWORD3 bits defined in the PCIERC_RAS_EINJ_CTL6CHGP3[EINJ6_CHG_PT_H3].
+ Only applies when PCIERC_RAS_EINJ_CTL6PE[EINJ6_INV_CNTRL] is not set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6chgv3_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6chgv3 bdk_pciercx_ras_einj_ctl6chgv3_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGV3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CHGV3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3a0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CHGV3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CHGV3(a) bdk_pciercx_ras_einj_ctl6chgv3_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CHGV3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CHGV3(a) "PCIERCX_RAS_EINJ_CTL6CHGV3"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CHGV3(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CHGV3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6cmpp0
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Point H0) Register
+ */
+union bdk_pciercx_ras_einj_ctl6cmpp0
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6cmpp0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_pt_h0 : 32; /**< [ 31: 0](R/W) Packet compare point first DWORD.
+ Specifies which TX TLP header DWORD0 bits to compare
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CMPV0[EINJ6_COM_VAL_H0].
+ When all specified bits (in the TX TLP header and
+ PCIERC_RAS_EINJ_CTL6CMPV0[EINJ6_COM_VAL_H0] match, an error is inserted into the TLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_pt_h0 : 32; /**< [ 31: 0](R/W) Packet compare point first DWORD.
+ Specifies which TX TLP header DWORD0 bits to compare
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CMPV0[EINJ6_COM_VAL_H0].
+ When all specified bits (in the TX TLP header and
+ PCIERC_RAS_EINJ_CTL6CMPV0[EINJ6_COM_VAL_H0] match, an error is inserted into the TLP. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6cmpp0_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6cmpp0 bdk_pciercx_ras_einj_ctl6cmpp0_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPP0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPP0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x364ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CMPP0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CMPP0(a) bdk_pciercx_ras_einj_ctl6cmpp0_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CMPP0(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CMPP0(a) "PCIERCX_RAS_EINJ_CTL6CMPP0"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CMPP0(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CMPP0(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6cmpp1
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Point H1) Register
+ */
+union bdk_pciercx_ras_einj_ctl6cmpp1
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6cmpp1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_pt_h1 : 32; /**< [ 31: 0](R/W) Packet compare point second DWORD.
+ Specifies which TX TLP header DWORD1 bits to compare
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CMPV1[EINJ6_COM_VAL_H1].
+ When all specified bits (in the TX TLP header and
+ PCIERC_RAS_EINJ_CTL6CMPV1[EINJ6_COM_VAL_H1] match, an error is inserted into the TLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_pt_h1 : 32; /**< [ 31: 0](R/W) Packet compare point second DWORD.
+ Specifies which TX TLP header DWORD1 bits to compare
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CMPV1[EINJ6_COM_VAL_H1].
+ When all specified bits (in the TX TLP header and
+ PCIERC_RAS_EINJ_CTL6CMPV1[EINJ6_COM_VAL_H1] match, an error is inserted into the TLP. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6cmpp1_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6cmpp1 bdk_pciercx_ras_einj_ctl6cmpp1_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPP1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPP1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x368ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CMPP1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CMPP1(a) bdk_pciercx_ras_einj_ctl6cmpp1_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CMPP1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CMPP1(a) "PCIERCX_RAS_EINJ_CTL6CMPP1"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CMPP1(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CMPP1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6cmpp2
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Point H2) Register
+ */
+union bdk_pciercx_ras_einj_ctl6cmpp2
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6cmpp2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_pt_h2 : 32; /**< [ 31: 0](R/W) Packet compare point third DWORD.
+ Specifies which TX TLP header DWORD2 bits to compare
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CMPV2[EINJ6_COM_VAL_H2].
+ When all specified bits (in the TX TLP header and
+ PCIERC_RAS_EINJ_CTL6CMPV2[EINJ6_COM_VAL_H2] match, an error is inserted into the TLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_pt_h2 : 32; /**< [ 31: 0](R/W) Packet compare point third DWORD.
+ Specifies which TX TLP header DWORD2 bits to compare
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CMPV2[EINJ6_COM_VAL_H2].
+ When all specified bits (in the TX TLP header and
+ PCIERC_RAS_EINJ_CTL6CMPV2[EINJ6_COM_VAL_H2] match, an error is inserted into the TLP. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6cmpp2_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6cmpp2 bdk_pciercx_ras_einj_ctl6cmpp2_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPP2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPP2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x36cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CMPP2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CMPP2(a) bdk_pciercx_ras_einj_ctl6cmpp2_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CMPP2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CMPP2(a) "PCIERCX_RAS_EINJ_CTL6CMPP2"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CMPP2(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CMPP2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6cmpp3
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Point H3) Register
+ */
+union bdk_pciercx_ras_einj_ctl6cmpp3
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6cmpp3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_pt_h3 : 32; /**< [ 31: 0](R/W) Packet compare point fourth DWORD.
+ Specifies which TX TLP header DWORD3 bits to compare
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CMPV3[EINJ6_COM_VAL_H3].
+ When all specified bits (in the TX TLP header and
+ PCIERC_RAS_EINJ_CTL6CMPV3[EINJ6_COM_VAL_H3] match, an error is inserted into the TLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_pt_h3 : 32; /**< [ 31: 0](R/W) Packet compare point fourth DWORD.
+ Specifies which TX TLP header DWORD3 bits to compare
+ with the corresponding bits in PCIERC_RAS_EINJ_CTL6CMPV3[EINJ6_COM_VAL_H3].
+ When all specified bits (in the TX TLP header and
+ PCIERC_RAS_EINJ_CTL6CMPV3[EINJ6_COM_VAL_H3] match, an error is inserted into the TLP. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6cmpp3_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6cmpp3 bdk_pciercx_ras_einj_ctl6cmpp3_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPP3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPP3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x370ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CMPP3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CMPP3(a) bdk_pciercx_ras_einj_ctl6cmpp3_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CMPP3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CMPP3(a) "PCIERCX_RAS_EINJ_CTL6CMPP3"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CMPP3(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CMPP3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6cmpv0
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Value H0) Register
+ */
+union bdk_pciercx_ras_einj_ctl6cmpv0
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6cmpv0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_val_h0 : 32; /**< [ 31: 0](R/W) Packet compare value first DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD0 bits specified in PCIERC_RAS_EINJ_CTL6CMPP0[EINJ6_COM_PT_H0]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_val_h0 : 32; /**< [ 31: 0](R/W) Packet compare value first DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD0 bits specified in PCIERC_RAS_EINJ_CTL6CMPP0[EINJ6_COM_PT_H0]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6cmpv0_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6cmpv0 bdk_pciercx_ras_einj_ctl6cmpv0_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPV0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPV0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x374ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CMPV0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CMPV0(a) bdk_pciercx_ras_einj_ctl6cmpv0_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CMPV0(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CMPV0(a) "PCIERCX_RAS_EINJ_CTL6CMPV0"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CMPV0(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CMPV0(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6cmpv1
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Value H1) Register
+ */
+union bdk_pciercx_ras_einj_ctl6cmpv1
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6cmpv1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_val_h1 : 32; /**< [ 31: 0](R/W) Packet compare value second DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD1 bits specified in PCIERC_RAS_EINJ_CTL6CMPP1[EINJ6_COM_PT_H1]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_val_h1 : 32; /**< [ 31: 0](R/W) Packet compare value second DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD1 bits specified in PCIERC_RAS_EINJ_CTL6CMPP1[EINJ6_COM_PT_H1]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6cmpv1_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6cmpv1 bdk_pciercx_ras_einj_ctl6cmpv1_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPV1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPV1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x378ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CMPV1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CMPV1(a) bdk_pciercx_ras_einj_ctl6cmpv1_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CMPV1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CMPV1(a) "PCIERCX_RAS_EINJ_CTL6CMPV1"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CMPV1(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CMPV1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6cmpv2
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Value H2) Register
+ */
+union bdk_pciercx_ras_einj_ctl6cmpv2
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6cmpv2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_val_h2 : 32; /**< [ 31: 0](R/W) Packet compare value third DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD2 bits specified in the PCIERC_RAS_EINJ_CTL6CMPP2[EINJ6_COM_PT_H2]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_val_h2 : 32; /**< [ 31: 0](R/W) Packet compare value third DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD2 bits specified in the PCIERC_RAS_EINJ_CTL6CMPP2[EINJ6_COM_PT_H2]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6cmpv2_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6cmpv2 bdk_pciercx_ras_einj_ctl6cmpv2_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPV2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPV2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x37cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CMPV2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CMPV2(a) bdk_pciercx_ras_einj_ctl6cmpv2_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CMPV2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CMPV2(a) "PCIERCX_RAS_EINJ_CTL6CMPV2"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CMPV2(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CMPV2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6cmpv3
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Compare Value H3) Register
+ */
+union bdk_pciercx_ras_einj_ctl6cmpv3
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6cmpv3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t einj6_com_val_h3 : 32; /**< [ 31: 0](R/W) Packet compare value fourth DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD3 bits specified in the PCIERC_RAS_EINJ_CTL6CMPP3[EINJ6_COM_PT_H3]. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_com_val_h3 : 32; /**< [ 31: 0](R/W) Packet compare value fourth DWORD.
+ Specifies the value to compare against TX the TLP header
+ DWORD3 bits specified in the PCIERC_RAS_EINJ_CTL6CMPP3[EINJ6_COM_PT_H3]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6cmpv3_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6cmpv3 bdk_pciercx_ras_einj_ctl6cmpv3_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPV3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6CMPV3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x380ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6CMPV3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6CMPV3(a) bdk_pciercx_ras_einj_ctl6cmpv3_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6CMPV3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6CMPV3(a) "PCIERCX_RAS_EINJ_CTL6CMPV3"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6CMPV3(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6CMPV3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_ctl6pe
+ *
+ * PCIe RC Vendor RAS DES Error Injection Control 6 (Packet Error) Register
+ */
+union bdk_pciercx_ras_einj_ctl6pe
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_ctl6pe_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t einj6_pkt_typ : 3; /**< [ 11: 9](R/W) Packet type. Selects the TLP packets to inject errors into.
+
+ 0x0 = TLP header.
+ 0x1 = TLP prefix 1st 4-DWORDs.
+ 0x2 = TLP prefix 2nd 4-DWORDs.
+ 0x3 - 0x7 = Reserved. */
+ uint32_t einj6_inv_cntrl : 1; /**< [ 8: 8](R/W) Inverted error injection control.
+
+ 0x0 = EINJ6_CHG_VAL_H[0/1/2/3] is used to replace bits specified by
+ EINJ6_CHG_PT_H[0/1/2/3].
+ 0x1 = EINJ6_CHG_VAL_H[0/1/2/3] is ignored and inverts bits specified by
+ EINJ6_CHG_PT_H[0/1/2/3]. */
+ uint32_t einj6_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ6_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ6_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ6_EN] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj6_cnt : 8; /**< [ 7: 0](R/W) Error injection count. Indicates the number of errors.
+ This register is decremented when errors are inserted.
+
+ If the counter value is 0x1 and error is inserted,
+ PCIERC_RAS_EINJ_EN[EINJ6_EN] returns zero.
+
+ If the counter value is 0x0 and PCIERC_RAS_EINJ_EN[EINJ6_EN] is set,
+ errors are inserted until PCIERC_RAS_EINJ_EN[EINJ6_EN] is cleared. */
+ uint32_t einj6_inv_cntrl : 1; /**< [ 8: 8](R/W) Inverted error injection control.
+
+ 0x0 = EINJ6_CHG_VAL_H[0/1/2/3] is used to replace bits specified by
+ EINJ6_CHG_PT_H[0/1/2/3].
+ 0x1 = EINJ6_CHG_VAL_H[0/1/2/3] is ignored and inverts bits specified by
+ EINJ6_CHG_PT_H[0/1/2/3]. */
+ uint32_t einj6_pkt_typ : 3; /**< [ 11: 9](R/W) Packet type. Selects the TLP packets to inject errors into.
+
+ 0x0 = TLP header.
+ 0x1 = TLP prefix 1st 4-DWORDs.
+ 0x2 = TLP prefix 2nd 4-DWORDs.
+ 0x3 - 0x7 = Reserved. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_ctl6pe_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_ctl6pe bdk_pciercx_ras_einj_ctl6pe_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6PE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_CTL6PE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3a4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_CTL6PE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_CTL6PE(a) bdk_pciercx_ras_einj_ctl6pe_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_CTL6PE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_CTL6PE(a) "PCIERCX_RAS_EINJ_CTL6PE"
+#define busnum_BDK_PCIERCX_RAS_EINJ_CTL6PE(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_CTL6PE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_einj_en
+ *
+ * PCIe RC Vendor RAS DES Error Injection Enable Register
+ */
+union bdk_pciercx_ras_einj_en
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_einj_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_7_31 : 25;
+ uint32_t einj6_en : 1; /**< [ 6: 6](R/W) Specific TLP error injection enable. Enables insertion of errors into the
+ packet selected. For more details, refer to PCIERC_RAS_EINJ_CTL6CMPP0. */
+ uint32_t einj5_en : 1; /**< [ 5: 5](R/W) TLP duplicate/nullify error injection enable. Enables insertion of duplicate/nullified
+ TLPs. For more details, refer to PCIERC_RAS_EINJ_CTL5. */
+ uint32_t einj4_en : 1; /**< [ 4: 4](R/W) FC credit update error injection enable. Enables insertion of errors into
+ Updated FCs. See PCIERC_RAS_EINJ_CTL4. */
+ uint32_t einj3_en : 1; /**< [ 3: 3](R/W) Symbol datak mask or sync header error enable. Enables data masking of special
+ symbols or the breaking of the sync header. See PCIERC_RAS_EINJ_CTL3. */
+ uint32_t einj2_en : 1; /**< [ 2: 2](R/W) DLLP error injection enable. enables insertion of DLLP errors.
+ See PCIERC_RAS_EINJ_CTL2. */
+ uint32_t einj1_en : 1; /**< [ 1: 1](R/W) Sequence number error injection enable. Enables insertion of errors into
+ sequence numbers.
+ See PCIERC_RAS_EINJ_CTL1. */
+ uint32_t einj0_en : 1; /**< [ 0: 0](R/W) CRC error injection enable. Enables insertion of errors into various CRC.
+ See PCIERC_RAS_EINJ_CTL0. */
+#else /* Word 0 - Little Endian */
+ uint32_t einj0_en : 1; /**< [ 0: 0](R/W) CRC error injection enable. Enables insertion of errors into various CRC.
+ See PCIERC_RAS_EINJ_CTL0. */
+ uint32_t einj1_en : 1; /**< [ 1: 1](R/W) Sequence number error injection enable. Enables insertion of errors into
+ sequence numbers.
+ See PCIERC_RAS_EINJ_CTL1. */
+ uint32_t einj2_en : 1; /**< [ 2: 2](R/W) DLLP error injection enable. enables insertion of DLLP errors.
+ See PCIERC_RAS_EINJ_CTL2. */
+ uint32_t einj3_en : 1; /**< [ 3: 3](R/W) Symbol datak mask or sync header error enable. Enables data masking of special
+ symbols or the breaking of the sync header. See PCIERC_RAS_EINJ_CTL3. */
+ uint32_t einj4_en : 1; /**< [ 4: 4](R/W) FC credit update error injection enable. Enables insertion of errors into
+ Updated FCs. See PCIERC_RAS_EINJ_CTL4. */
+ uint32_t einj5_en : 1; /**< [ 5: 5](R/W) TLP duplicate/nullify error injection enable. Enables insertion of duplicate/nullified
+ TLPs. For more details, refer to PCIERC_RAS_EINJ_CTL5. */
+ uint32_t einj6_en : 1; /**< [ 6: 6](R/W) Specific TLP error injection enable. Enables insertion of errors into the
+ packet selected. For more details, refer to PCIERC_RAS_EINJ_CTL6CMPP0. */
+ uint32_t reserved_7_31 : 25;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_einj_en_s cn; */
+};
+typedef union bdk_pciercx_ras_einj_en bdk_pciercx_ras_einj_en_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_EN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_EINJ_EN(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x348ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_EINJ_EN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_EINJ_EN(a) bdk_pciercx_ras_einj_en_t
+#define bustype_BDK_PCIERCX_RAS_EINJ_EN(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_EINJ_EN(a) "PCIERCX_RAS_EINJ_EN"
+#define busnum_BDK_PCIERCX_RAS_EINJ_EN(a) (a)
+#define arguments_BDK_PCIERCX_RAS_EINJ_EN(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_hdr
+ *
+ * PCIe RC Vendor RAS DES Header Register
+ */
+union bdk_pciercx_ras_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vsec_length : 12; /**< [ 31: 20](RO) VSEC length. */
+ uint32_t vsec_rev : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsec_id : 16; /**< [ 15: 0](RO) VSEC ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t vsec_id : 16; /**< [ 15: 0](RO) VSEC ID. */
+ uint32_t vsec_rev : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsec_length : 12; /**< [ 31: 20](RO) VSEC length. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_hdr_s cn; */
+};
+typedef union bdk_pciercx_ras_hdr bdk_pciercx_ras_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x31cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_HDR(a) bdk_pciercx_ras_hdr_t
+#define bustype_BDK_PCIERCX_RAS_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_HDR(a) "PCIERCX_RAS_HDR"
+#define busnum_BDK_PCIERCX_RAS_HDR(a) (a)
+#define arguments_BDK_PCIERCX_RAS_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_ctl1
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Control 1 Register
+ */
+union bdk_pciercx_ras_sd_ctl1
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_ctl1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t lp_intv : 2; /**< [ 23: 22](R/W) Low power entry interval time.
+ Interval time that the core starts monitoring RXELECIDLE
+ signal after L0s/L1/L2 entry. You should set the value
+ according to the latency from receiving EIOS to,
+ RXELECIDLE assertion at the PHY
+
+ 0x0 = 40ns.
+ 0x1 = 160ns.
+ 0x2 = 320ns.
+ 0x3 - 640ns. */
+ uint32_t tx_eios_num : 2; /**< [ 21: 20](R/W) Number of TX EIOS.
+ This register sets the number of transmit EIOS for L0s/L1
+ entry and disable/loopback/hot-reset exit. The core selects
+ the greater value between this register and the value defined
+ by the PCI-SIG specification.
+
+ Gen1 or Gen3
+ 0x0 = 1.
+ 0x1 = 4.
+ 0x2 = 8.
+ 0x3 - 16.
+
+ Gen2
+ 0x0 = 2.
+ 0x1 = 8.
+ 0x2 = 16.
+ 0x3 - 32. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t force_detect_lane_en : 1; /**< [ 16: 16](R/W) Force detect lane enable.
+ When this bit is set, the core ignores receiver detection from
+ PHY during LTSSM detect state and uses
+ [FORCE_DETECT_LANE]. */
+ uint32_t force_detect_lane : 16; /**< [ 15: 0](R/W) Force detect lane.
+ When set, the core
+ ignores receiver detection from PHY during LTSSM detect
+ state and uses this value instead.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+
+ 0x7 = Lane7. */
+#else /* Word 0 - Little Endian */
+ uint32_t force_detect_lane : 16; /**< [ 15: 0](R/W) Force detect lane.
+ When set, the core
+ ignores receiver detection from PHY during LTSSM detect
+ state and uses this value instead.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+
+ 0x7 = Lane7. */
+ uint32_t force_detect_lane_en : 1; /**< [ 16: 16](R/W) Force detect lane enable.
+ When this bit is set, the core ignores receiver detection from
+ PHY during LTSSM detect state and uses
+ [FORCE_DETECT_LANE]. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t tx_eios_num : 2; /**< [ 21: 20](R/W) Number of TX EIOS.
+ This register sets the number of transmit EIOS for L0s/L1
+ entry and disable/loopback/hot-reset exit. The core selects
+ the greater value between this register and the value defined
+ by the PCI-SIG specification.
+
+ Gen1 or Gen3
+ 0x0 = 1.
+ 0x1 = 4.
+ 0x2 = 8.
+ 0x3 - 16.
+
+ Gen2
+ 0x0 = 2.
+ 0x1 = 8.
+ 0x2 = 16.
+ 0x3 - 32. */
+ uint32_t lp_intv : 2; /**< [ 23: 22](R/W) Low power entry interval time.
+ Interval time that the core starts monitoring RXELECIDLE
+ signal after L0s/L1/L2 entry. You should set the value
+ according to the latency from receiving EIOS to,
+ RXELECIDLE assertion at the PHY
+
+ 0x0 = 40ns.
+ 0x1 = 160ns.
+ 0x2 = 320ns.
+ 0x3 - 640ns. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_ctl1_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_ctl1 bdk_pciercx_ras_sd_ctl1_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_CTL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_CTL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3b8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_CTL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_CTL1(a) bdk_pciercx_ras_sd_ctl1_t
+#define bustype_BDK_PCIERCX_RAS_SD_CTL1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_CTL1(a) "PCIERCX_RAS_SD_CTL1"
+#define busnum_BDK_PCIERCX_RAS_SD_CTL1(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_CTL1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_ctl2
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Control 2 Register
+ */
+union bdk_pciercx_ras_sd_ctl2
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_ctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_17_31 : 15;
+ uint32_t fr_err_rcvy_dis : 1; /**< [ 16: 16](R/W) Framing error recovery disable.
+ This bit disables a transition to recovery state when a framing
+ error has occurred. */
+ uint32_t reserved_11_15 : 5;
+ uint32_t dir_lpbslv_to_exit : 1; /**< [ 10: 10](R/W) Direct loopback slave to exit.
+ When set and the LTSSM is in loopback slave active state,
+ the LTSSM transitions to the loopback slave exit state. */
+ uint32_t dir_polcmp_to_det : 1; /**< [ 9: 9](R/W) Direct Polling.Compliance to detect.
+ When this bit is set and the LTSSM is in polling compliance
+ state, the LTSSM transitions to detect state. */
+ uint32_t dir_recidle_config : 1; /**< [ 8: 8](R/W) Direct Recovery.Idle to configuration.
+ When this bit is set and the LTSSM is in recovery idle state,
+ the LTSSM transitions to configuration state. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t noack_force_lnkdn : 1; /**< [ 2: 2](R/W) Force link down.
+ When this bit is set and the core detects REPLY_NUM rolling
+ over 4 times, the LTSSM transitions to detect state. */
+ uint32_t rcry_req : 1; /**< [ 1: 1](WO) Recovery request.
+ When this bit is set in L0 or L0s, the LTSSM starts
+ transitioning to recovery state. This request does not cause
+ a speed change or reequalization. This bit always reads
+ a zero. */
+ uint32_t hold_ltssm : 1; /**< [ 0: 0](R/W) Hold and release LTSSM.
+ For as long as this is set, the core stays in the current
+ LTSSM. */
+#else /* Word 0 - Little Endian */
+ uint32_t hold_ltssm : 1; /**< [ 0: 0](R/W) Hold and release LTSSM.
+ For as long as this is set, the core stays in the current
+ LTSSM. */
+ uint32_t rcry_req : 1; /**< [ 1: 1](WO) Recovery request.
+ When this bit is set in L0 or L0s, the LTSSM starts
+ transitioning to recovery state. This request does not cause
+ a speed change or reequalization. This bit always reads
+ a zero. */
+ uint32_t noack_force_lnkdn : 1; /**< [ 2: 2](R/W) Force link down.
+ When this bit is set and the core detects REPLY_NUM rolling
+ over 4 times, the LTSSM transitions to detect state. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t dir_recidle_config : 1; /**< [ 8: 8](R/W) Direct Recovery.Idle to configuration.
+ When this bit is set and the LTSSM is in recovery idle state,
+ the LTSSM transitions to configuration state. */
+ uint32_t dir_polcmp_to_det : 1; /**< [ 9: 9](R/W) Direct Polling.Compliance to detect.
+ When this bit is set and the LTSSM is in polling compliance
+ state, the LTSSM transitions to detect state. */
+ uint32_t dir_lpbslv_to_exit : 1; /**< [ 10: 10](R/W) Direct loopback slave to exit.
+ When set and the LTSSM is in loopback slave active state,
+ the LTSSM transitions to the loopback slave exit state. */
+ uint32_t reserved_11_15 : 5;
+ uint32_t fr_err_rcvy_dis : 1; /**< [ 16: 16](R/W) Framing error recovery disable.
+ This bit disables a transition to recovery state when a framing
+ error has occurred. */
+ uint32_t reserved_17_31 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_ctl2_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_ctl2 bdk_pciercx_ras_sd_ctl2_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_CTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3bcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_CTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_CTL2(a) bdk_pciercx_ras_sd_ctl2_t
+#define bustype_BDK_PCIERCX_RAS_SD_CTL2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_CTL2(a) "PCIERCX_RAS_SD_CTL2"
+#define busnum_BDK_PCIERCX_RAS_SD_CTL2(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_CTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_eq_ctl1
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Control 1 Register
+ */
+union bdk_pciercx_ras_sd_eq_ctl1
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_eq_ctl1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t fom_target : 8; /**< [ 31: 24](R/W) FOM target.
+ Indicates figure of merit target criteria value of EQ
+ master (DSP in EQ Phase3/USP in EQ Phase2).
+ This field is only valid when PCIERC_GEN3_EQ_CTL[FM] is
+ 0x1 (figure of merit). */
+ uint32_t fom_target_en : 1; /**< [ 23: 23](R/W) FOM target enable.
+ Enables the [FOM_TARGET] field. */
+ uint32_t reserved_18_22 : 5;
+ uint32_t eval_interval_time : 2; /**< [ 17: 16](R/W) Eval interval time.
+ Indicates interval time of RxEqEval assertion.
+ 0x0 = 500 ns.
+ 0x1 = 1 us.
+ 0x2 = 2 us.
+ 0x3 = 4 us.
+
+ This field is used for EQ master (DSP in EQ Phase3/USP in
+ EQ Phase2). */
+ uint32_t reserved_10_15 : 6;
+ uint32_t ext_eq_timeout : 2; /**< [ 9: 8](R/W) Extends EQ Phase2/3 timeout.
+ This field is used when the ltssm is in Recovery.EQ2/3.
+ When this field is set, the value of the EQ2/3 timeout is
+ extended.
+
+ EQ master (DSP in EQ Phase 3/USP in EQ Phaase2)
+ 0x0 = 24 ms (default).
+ 0x1 = 48 ms
+ 0x2 = 240 ms.
+ 0x3 = No timeout.
+
+ EQ slave (DSP in EQ Phase 2/USP in EQ Phaase3)
+ 0x0 = 32 ms (default).
+ 0x1 = 56 ms
+ 0x2 = 248 ms.
+ 0x3 = No timeout. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t eq_rate_sel : 1; /**< [ 4: 4](R/W) EQ status rate select.
+ Setting this field in conjunction with [EQ_LANE_SEL]
+ determines the per-lane silicon debug EQ status data
+ returned by the SD_EQ_CONTROL[2/3] and
+ SD_EQ_STATUS[1/2/3] viewport registers.
+ 0x0 = 8.0 GT/s speed.
+ 0x1 = 16.0 GT/s speed. */
+ uint32_t eq_lane_sel : 4; /**< [ 3: 0](R/W) EQ status lane select.
+ Setting this field in conjunction with [EQ_RATE_SEL]
+ determines the per-lane silicon debug EQ status data
+ returned by the SD_EQ_CONTROL[2/3] and
+ SD_EQ_STATUS[1/2/3] viewport registers.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+ _ ...
+ 0x7 = Lane7.
+ 0x8-0xF = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t eq_lane_sel : 4; /**< [ 3: 0](R/W) EQ status lane select.
+ Setting this field in conjunction with [EQ_RATE_SEL]
+ determines the per-lane silicon debug EQ status data
+ returned by the SD_EQ_CONTROL[2/3] and
+ SD_EQ_STATUS[1/2/3] viewport registers.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+ _ ...
+ 0x7 = Lane7.
+ 0x8-0xF = Reserved. */
+ uint32_t eq_rate_sel : 1; /**< [ 4: 4](R/W) EQ status rate select.
+ Setting this field in conjunction with [EQ_LANE_SEL]
+ determines the per-lane silicon debug EQ status data
+ returned by the SD_EQ_CONTROL[2/3] and
+ SD_EQ_STATUS[1/2/3] viewport registers.
+ 0x0 = 8.0 GT/s speed.
+ 0x1 = 16.0 GT/s speed. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t ext_eq_timeout : 2; /**< [ 9: 8](R/W) Extends EQ Phase2/3 timeout.
+ This field is used when the ltssm is in Recovery.EQ2/3.
+ When this field is set, the value of the EQ2/3 timeout is
+ extended.
+
+ EQ master (DSP in EQ Phase 3/USP in EQ Phaase2)
+ 0x0 = 24 ms (default).
+ 0x1 = 48 ms
+ 0x2 = 240 ms.
+ 0x3 = No timeout.
+
+ EQ slave (DSP in EQ Phase 2/USP in EQ Phaase3)
+ 0x0 = 32 ms (default).
+ 0x1 = 56 ms
+ 0x2 = 248 ms.
+ 0x3 = No timeout. */
+ uint32_t reserved_10_15 : 6;
+ uint32_t eval_interval_time : 2; /**< [ 17: 16](R/W) Eval interval time.
+ Indicates interval time of RxEqEval assertion.
+ 0x0 = 500 ns.
+ 0x1 = 1 us.
+ 0x2 = 2 us.
+ 0x3 = 4 us.
+
+ This field is used for EQ master (DSP in EQ Phase3/USP in
+ EQ Phase2). */
+ uint32_t reserved_18_22 : 5;
+ uint32_t fom_target_en : 1; /**< [ 23: 23](R/W) FOM target enable.
+ Enables the [FOM_TARGET] field. */
+ uint32_t fom_target : 8; /**< [ 31: 24](R/W) FOM target.
+ Indicates figure of merit target criteria value of EQ
+ master (DSP in EQ Phase3/USP in EQ Phase2).
+ This field is only valid when PCIERC_GEN3_EQ_CTL[FM] is
+ 0x1 (figure of merit). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_eq_ctl1_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_eq_ctl1 bdk_pciercx_ras_sd_eq_ctl1_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_CTL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_CTL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3e8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_EQ_CTL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_EQ_CTL1(a) bdk_pciercx_ras_sd_eq_ctl1_t
+#define bustype_BDK_PCIERCX_RAS_SD_EQ_CTL1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_EQ_CTL1(a) "PCIERCX_RAS_SD_EQ_CTL1"
+#define busnum_BDK_PCIERCX_RAS_SD_EQ_CTL1(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_EQ_CTL1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_eq_ctl2
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Control 2 Register
+ */
+union bdk_pciercx_ras_sd_eq_ctl2
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_eq_ctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t force_loc_txpre_en : 1; /**< [ 30: 30](R/W) Force local transmitter preset enable. Enables [FORCE_LOC_TXPRE]. */
+ uint32_t force_loc_rxhint_en : 1; /**< [ 29: 29](R/W) Force local receiver preset hint enable. Enables [FORCE_LOC_RXHINT]. */
+ uint32_t force_loc_txcoef_en : 1; /**< [ 28: 28](R/W) Force local transmitter coefficient enable.
+ Enables the following fields:
+ [FORCE_LOC_TXPRE_CUR],
+ [FORCE_LOC_TX_CUR],
+ [FORCE_LOC_TXPOST_CUR]. */
+ uint32_t force_loc_txpre : 4; /**< [ 27: 24](R/W) Force local transmitter preset.
+ Indicates initial preset value of USP in EQ slave (EQ Phase2)
+ instead of receiving EQ TS2. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t force_loc_rxhint : 3; /**< [ 20: 18](R/W) Force local receiver preset hint.
+ Indicates the RxPresetHint value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of received or set value. */
+ uint32_t force_loc_txpost_cur : 6; /**< [ 17: 12](R/W) Force local transmitter postcursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+ uint32_t force_loc_tx_cur : 6; /**< [ 11: 6](R/W) Force local transmitter cursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+ uint32_t force_loc_txpre_cur : 6; /**< [ 5: 0](R/W) Force local transmitter precursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+#else /* Word 0 - Little Endian */
+ uint32_t force_loc_txpre_cur : 6; /**< [ 5: 0](R/W) Force local transmitter precursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+ uint32_t force_loc_tx_cur : 6; /**< [ 11: 6](R/W) Force local transmitter cursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+ uint32_t force_loc_txpost_cur : 6; /**< [ 17: 12](R/W) Force local transmitter postcursor.
+ Indicates the coefficient value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of the value instructed
+ from link partner. */
+ uint32_t force_loc_rxhint : 3; /**< [ 20: 18](R/W) Force local receiver preset hint.
+ Indicates the RxPresetHint value of EQ slave (DSP in EQ
+ Phase2/USP in EQ Phase3), instead of received or set value. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t force_loc_txpre : 4; /**< [ 27: 24](R/W) Force local transmitter preset.
+ Indicates initial preset value of USP in EQ slave (EQ Phase2)
+ instead of receiving EQ TS2. */
+ uint32_t force_loc_txcoef_en : 1; /**< [ 28: 28](R/W) Force local transmitter coefficient enable.
+ Enables the following fields:
+ [FORCE_LOC_TXPRE_CUR],
+ [FORCE_LOC_TX_CUR],
+ [FORCE_LOC_TXPOST_CUR]. */
+ uint32_t force_loc_rxhint_en : 1; /**< [ 29: 29](R/W) Force local receiver preset hint enable. Enables [FORCE_LOC_RXHINT]. */
+ uint32_t force_loc_txpre_en : 1; /**< [ 30: 30](R/W) Force local transmitter preset enable. Enables [FORCE_LOC_TXPRE]. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_eq_ctl2_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_eq_ctl2 bdk_pciercx_ras_sd_eq_ctl2_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_CTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_CTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3ecll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_EQ_CTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_EQ_CTL2(a) bdk_pciercx_ras_sd_eq_ctl2_t
+#define bustype_BDK_PCIERCX_RAS_SD_EQ_CTL2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_EQ_CTL2(a) "PCIERCX_RAS_SD_EQ_CTL2"
+#define busnum_BDK_PCIERCX_RAS_SD_EQ_CTL2(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_EQ_CTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_eq_ctl3
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Control 3 Register
+ */
+union bdk_pciercx_ras_sd_eq_ctl3
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_eq_ctl3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t force_rem_txcoef_en : 1; /**< [ 28: 28](R/W) Force remote transmitter coefficient enable as selected by
+ PCIERC_RAS_SD_EQ_CTL1[EQ_LANE_SEL].
+ Enables the following fields:
+ [FORCE_REM_TXPRE_CUR],
+ [FORCE_REM_TX_CUR],
+ [FORCE_REM_TXPOST_CUR]. */
+ uint32_t reserved_18_27 : 10;
+ uint32_t force_rem_txpost_cur : 6; /**< [ 17: 12](R/W) Force remote transmitter postcursor as selected by
+ PCIERC_RAS_SD_EQ_CTL1[EQ_LANE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+ uint32_t force_rem_tx_cur : 6; /**< [ 11: 6](R/W) Force remote transmitter cursors selected by
+ PCIERC_RAS_SD_EQ_CTL1[EQ_LANE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+ uint32_t force_rem_txpre_cur : 6; /**< [ 5: 0](RAZ) Force remote transmitter precursors selected by
+ PCIERC_RAS_SD_EQ_CTL1[EQ_LANE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+#else /* Word 0 - Little Endian */
+ uint32_t force_rem_txpre_cur : 6; /**< [ 5: 0](RAZ) Force remote transmitter precursors selected by
+ PCIERC_RAS_SD_EQ_CTL1[EQ_LANE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+ uint32_t force_rem_tx_cur : 6; /**< [ 11: 6](R/W) Force remote transmitter cursors selected by
+ PCIERC_RAS_SD_EQ_CTL1[EQ_LANE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+ uint32_t force_rem_txpost_cur : 6; /**< [ 17: 12](R/W) Force remote transmitter postcursor as selected by
+ PCIERC_RAS_SD_EQ_CTL1[EQ_LANE_SEL].
+ Indicates the coefficient value of EQ master (DSP in EQ
+ Phase3/USP in EQ Phase2), instead of the value instructed
+ from link partner. */
+ uint32_t reserved_18_27 : 10;
+ uint32_t force_rem_txcoef_en : 1; /**< [ 28: 28](R/W) Force remote transmitter coefficient enable as selected by
+ PCIERC_RAS_SD_EQ_CTL1[EQ_LANE_SEL].
+ Enables the following fields:
+ [FORCE_REM_TXPRE_CUR],
+ [FORCE_REM_TX_CUR],
+ [FORCE_REM_TXPOST_CUR]. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_eq_ctl3_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_eq_ctl3 bdk_pciercx_ras_sd_eq_ctl3_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_CTL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_CTL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3f0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_EQ_CTL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_EQ_CTL3(a) bdk_pciercx_ras_sd_eq_ctl3_t
+#define bustype_BDK_PCIERCX_RAS_SD_EQ_CTL3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_EQ_CTL3(a) "PCIERCX_RAS_SD_EQ_CTL3"
+#define busnum_BDK_PCIERCX_RAS_SD_EQ_CTL3(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_EQ_CTL3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_eq_stat1
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Status 1 Register
+ */
+union bdk_pciercx_ras_sd_eq_stat1
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_eq_stat1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t eq_reject_event : 1; /**< [ 7: 7](RO/H) EQ reject event.
+ Indicates that the core receives two consecutive TS1 OS
+ w/Reject=1b during EQ master phase (DSP in EQ
+ Phase3/USP in EQ Phase2). This bit is automatically cleared
+ when the core starts EQ master phase again. */
+ uint32_t eq_rulec_viol : 1; /**< [ 6: 6](RO/H) EQ rule C violation.
+ Indicates that coefficient rule C violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rule C
+ correspond to the rules c) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ master phase again. */
+ uint32_t eq_ruleb_viol : 1; /**< [ 5: 5](RO/H) EQ rule B violation.
+ Indicates that coefficient rule B violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rules B
+ correspond to the rules b) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ master phase again. */
+ uint32_t eq_rulea_viol : 1; /**< [ 4: 4](RO/H) EQ rule A violation.
+ Indicates that coefficient rule A violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rules A
+ correspond to the rules a) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ master phase again. */
+ uint32_t reserved_3 : 1;
+ uint32_t eq_conv_info : 2; /**< [ 2: 1](RO/H) EQ convergence info.
+ Indicates equalization convergence information.
+ 0x0 = Equalization is not attempted.
+ 0x1 = Equalization finished successfully.
+ 0x2 = Equalization finished unsuccessfully.
+ 0x3 = Reserved.
+ This bit is automatically cleared when the core starts EQ
+ master phase again. */
+ uint32_t eq_sequence : 1; /**< [ 0: 0](RO) EQ sequence.
+ Indicates that the core is starting the equalization sequence. */
+#else /* Word 0 - Little Endian */
+ uint32_t eq_sequence : 1; /**< [ 0: 0](RO) EQ sequence.
+ Indicates that the core is starting the equalization sequence. */
+ uint32_t eq_conv_info : 2; /**< [ 2: 1](RO/H) EQ convergence info.
+ Indicates equalization convergence information.
+ 0x0 = Equalization is not attempted.
+ 0x1 = Equalization finished successfully.
+ 0x2 = Equalization finished unsuccessfully.
+ 0x3 = Reserved.
+ This bit is automatically cleared when the core starts EQ
+ master phase again. */
+ uint32_t reserved_3 : 1;
+ uint32_t eq_rulea_viol : 1; /**< [ 4: 4](RO/H) EQ rule A violation.
+ Indicates that coefficient rule A violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rules A
+ correspond to the rules a) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ master phase again. */
+ uint32_t eq_ruleb_viol : 1; /**< [ 5: 5](RO/H) EQ rule B violation.
+ Indicates that coefficient rule B violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rules B
+ correspond to the rules b) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ master phase again. */
+ uint32_t eq_rulec_viol : 1; /**< [ 6: 6](RO/H) EQ rule C violation.
+ Indicates that coefficient rule C violation is detected in the
+ values provided by PHY using direction change method
+ during EQ master phase (DSP in EQ Phase3/USP in EQ
+ Phase2). The coefficients rule C
+ correspond to the rules c) from section "Rules for
+ Transmitter Coefficients" in the PCI Express Base Specification.
+ This bit is automatically cleared when the controller starts
+ EQ master phase again. */
+ uint32_t eq_reject_event : 1; /**< [ 7: 7](RO/H) EQ reject event.
+ Indicates that the core receives two consecutive TS1 OS
+ w/Reject=1b during EQ master phase (DSP in EQ
+ Phase3/USP in EQ Phase2). This bit is automatically cleared
+ when the core starts EQ master phase again. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_eq_stat1_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_eq_stat1 bdk_pciercx_ras_sd_eq_stat1_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_STAT1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_STAT1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3f8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_EQ_STAT1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_EQ_STAT1(a) bdk_pciercx_ras_sd_eq_stat1_t
+#define bustype_BDK_PCIERCX_RAS_SD_EQ_STAT1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_EQ_STAT1(a) "PCIERCX_RAS_SD_EQ_STAT1"
+#define busnum_BDK_PCIERCX_RAS_SD_EQ_STAT1(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_EQ_STAT1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_eq_stat2
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Status 2 Register
+ */
+union bdk_pciercx_ras_sd_eq_stat2
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_eq_stat2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t eq_loc_fom_val : 8; /**< [ 31: 24](RO/H) EQ local figure of merit.
+ Indicates local maximum figure of merit value. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t eq_loc_rxhint : 3; /**< [ 20: 18](RO/H) EQ local receiver preset hint.
+ Indicates local receiver preset hint value. */
+ uint32_t eq_loc_post_cur : 6; /**< [ 17: 12](RO/H) EQ local postcursor.
+ Indicates local post cursor coefficient value. */
+ uint32_t eq_loc_cur : 6; /**< [ 11: 6](RO/H) EQ local cursor.
+ Indicates local cursor coefficient value. */
+ uint32_t eq_loc_pre_cur : 6; /**< [ 5: 0](RO/H) EQ local precursor.
+ Indicates local precursor coefficient value. */
+#else /* Word 0 - Little Endian */
+ uint32_t eq_loc_pre_cur : 6; /**< [ 5: 0](RO/H) EQ local precursor.
+ Indicates local precursor coefficient value. */
+ uint32_t eq_loc_cur : 6; /**< [ 11: 6](RO/H) EQ local cursor.
+ Indicates local cursor coefficient value. */
+ uint32_t eq_loc_post_cur : 6; /**< [ 17: 12](RO/H) EQ local postcursor.
+ Indicates local post cursor coefficient value. */
+ uint32_t eq_loc_rxhint : 3; /**< [ 20: 18](RO/H) EQ local receiver preset hint.
+ Indicates local receiver preset hint value. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t eq_loc_fom_val : 8; /**< [ 31: 24](RO/H) EQ local figure of merit.
+ Indicates local maximum figure of merit value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_eq_stat2_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_eq_stat2 bdk_pciercx_ras_sd_eq_stat2_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_STAT2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_STAT2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3fcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_EQ_STAT2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_EQ_STAT2(a) bdk_pciercx_ras_sd_eq_stat2_t
+#define bustype_BDK_PCIERCX_RAS_SD_EQ_STAT2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_EQ_STAT2(a) "PCIERCX_RAS_SD_EQ_STAT2"
+#define busnum_BDK_PCIERCX_RAS_SD_EQ_STAT2(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_EQ_STAT2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_eq_stat3
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug EQ Status 3 Register
+ */
+union bdk_pciercx_ras_sd_eq_stat3
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_eq_stat3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t eq_rem_fs : 6; /**< [ 29: 24](RO/H) EQ remote FS.
+ Indicates remote FS value. */
+ uint32_t eq_rem_lf : 6; /**< [ 23: 18](RO/H) EQ remote LF.
+ Indicates remote LF value. */
+ uint32_t eq_rem_post_cur : 6; /**< [ 17: 12](RO/H) EQ remote postcursor.
+ Indicates remote postcursor coefficient value. */
+ uint32_t eq_rem_cur : 6; /**< [ 11: 6](RO/H) EQ remote cursor.
+ Indicates remote cursor coefficient value. */
+ uint32_t eq_rem_pre_cur : 6; /**< [ 5: 0](RO/H) EQ remote precursor.
+ Indicates remote postcursor coefficient value. */
+#else /* Word 0 - Little Endian */
+ uint32_t eq_rem_pre_cur : 6; /**< [ 5: 0](RO/H) EQ remote precursor.
+ Indicates remote postcursor coefficient value. */
+ uint32_t eq_rem_cur : 6; /**< [ 11: 6](RO/H) EQ remote cursor.
+ Indicates remote cursor coefficient value. */
+ uint32_t eq_rem_post_cur : 6; /**< [ 17: 12](RO/H) EQ remote postcursor.
+ Indicates remote postcursor coefficient value. */
+ uint32_t eq_rem_lf : 6; /**< [ 23: 18](RO/H) EQ remote LF.
+ Indicates remote LF value. */
+ uint32_t eq_rem_fs : 6; /**< [ 29: 24](RO/H) EQ remote FS.
+ Indicates remote FS value. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_eq_stat3_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_eq_stat3 bdk_pciercx_ras_sd_eq_stat3_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_STAT3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_EQ_STAT3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x400ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_EQ_STAT3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_EQ_STAT3(a) bdk_pciercx_ras_sd_eq_stat3_t
+#define bustype_BDK_PCIERCX_RAS_SD_EQ_STAT3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_EQ_STAT3(a) "PCIERCX_RAS_SD_EQ_STAT3"
+#define busnum_BDK_PCIERCX_RAS_SD_EQ_STAT3(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_EQ_STAT3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_l1lane
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status L1Lane Register
+ */
+union bdk_pciercx_ras_sd_l1lane
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_l1lane_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t deskew_ptr : 8; /**< [ 31: 24](RO/H) Deskew pointer.
+ Indicates deskew pointer of internal deskew buffer of
+ selected lane number ([LANE_SELECT]). */
+ uint32_t reserved_21_23 : 3;
+ uint32_t pipe_txelecidle : 1; /**< [ 20: 20](RO/H) PIPE:TxElecIdle.
+ Indicates PIPE TXELECIDLE signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_rxelecidle : 1; /**< [ 19: 19](RO/H) PIPE:RxElecIdle.
+ Indicates PIPE RXELECIDLE signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_rxvalid : 1; /**< [ 18: 18](RO/H) PIPE:RxValid.
+ Indicates PIPE RXVALID signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_det_lane : 1; /**< [ 17: 17](RO/H) PIPE:Detect Lane.
+ Indicates whether PHY indicates receiver detection or not on
+ selected lane number ([LANE_SELECT]). */
+ uint32_t pipe_rxpol : 1; /**< [ 16: 16](RO/H) PIPE:RxPolarity.
+ Indicates PIPE RXPOLARITY signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t reserved_4_15 : 12;
+ uint32_t lane_select : 4; /**< [ 3: 0](R/W) Lane select.
+ Lane select register for silicon debug status register of
+ Layer1-PerLane.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+
+ 0x7 = Lane7.
+ 0x8-0xF = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t lane_select : 4; /**< [ 3: 0](R/W) Lane select.
+ Lane select register for silicon debug status register of
+ Layer1-PerLane.
+ 0x0 = Lane0.
+ 0x1 = Lane1.
+ 0x2 = Lane2.
+
+ 0x7 = Lane7.
+ 0x8-0xF = Reserved. */
+ uint32_t reserved_4_15 : 12;
+ uint32_t pipe_rxpol : 1; /**< [ 16: 16](RO/H) PIPE:RxPolarity.
+ Indicates PIPE RXPOLARITY signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_det_lane : 1; /**< [ 17: 17](RO/H) PIPE:Detect Lane.
+ Indicates whether PHY indicates receiver detection or not on
+ selected lane number ([LANE_SELECT]). */
+ uint32_t pipe_rxvalid : 1; /**< [ 18: 18](RO/H) PIPE:RxValid.
+ Indicates PIPE RXVALID signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_rxelecidle : 1; /**< [ 19: 19](RO/H) PIPE:RxElecIdle.
+ Indicates PIPE RXELECIDLE signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t pipe_txelecidle : 1; /**< [ 20: 20](RO/H) PIPE:TxElecIdle.
+ Indicates PIPE TXELECIDLE signal of selected lane
+ number ([LANE_SELECT]). */
+ uint32_t reserved_21_23 : 3;
+ uint32_t deskew_ptr : 8; /**< [ 31: 24](RO/H) Deskew pointer.
+ Indicates deskew pointer of internal deskew buffer of
+ selected lane number ([LANE_SELECT]). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_l1lane_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_l1lane bdk_pciercx_ras_sd_l1lane_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_L1LANE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_L1LANE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3c8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_L1LANE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_L1LANE(a) bdk_pciercx_ras_sd_l1lane_t
+#define bustype_BDK_PCIERCX_RAS_SD_L1LANE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_L1LANE(a) "PCIERCX_RAS_SD_L1LANE"
+#define busnum_BDK_PCIERCX_RAS_SD_L1LANE(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_L1LANE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_l1ltssm
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status L1LTSSM Register
+ */
+union bdk_pciercx_ras_sd_l1ltssm
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_l1ltssm_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ltssm_var : 16; /**< [ 31: 16](RO/H) LTSSM variable.
+ Indicates internal LTSSM variables defined in the PCI
+ Express base specification.
+ 0x0 = directed_speed change.
+ 0x1 = changed_speed_recovery.
+ 0x2 = successful_speed_negotiation.
+ 0x3 = upconfigure_capable; Set to one if both ports advertised
+ the UpConfigure capability in the last Config.Complete.
+ 0x4 = select_deemphasis.
+ 0x5 = start_equalization_w_preset.
+ 0x6 = equalization_done_8GT_data_rate.
+ 0x7 = equalization_done_16GT_data_rate.
+ 0x8-0xF = idle_to_rlock_transitioned. */
+ uint32_t lane_rev : 1; /**< [ 15: 15](RO/H) Lane reversal operation.
+ Receiver detected lane reversal. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t pipe_pwr_dwn : 3; /**< [ 10: 8](RO/H) PIPE:PowerDown.
+ Indicates PIPE PowerDown signal. */
+ uint32_t framing_err : 1; /**< [ 7: 7](R/W1C) Framing error.
+ Indicates framing error detection status. */
+ uint32_t framing_err_ptr : 7; /**< [ 6: 0](RO) First framing error pointer.
+ Identifies the first framing error using the following
+ encoding. The field contents are only valid value when
+ [FRAMING_ERR] = 1.
+
+ Received unexpected framing token:
+ 0x1 = When non-STP/SDP/IDL token was received and it
+ was not in TLP/DLLP reception.
+ 0x02 = When current token was not a valid EDB token and
+ previous token was an EDB. (128/256 bit core only).
+ 0x03 = When SDP token was received but not expected.
+ 0x04 = When STP token was received but not expected.
+ 0x05 = When EDS token was expected but not received or
+ whenever an EDS token was received but not expected.
+ 0x06 = When a framing error was detected in the deskew
+ block while a packet has been in progress in token_finder.
+ Received Unexpected STP Token
+ 0x11 = When framing CRC in STP token did not match.
+ 0x12 = When framing parity in STP token did not match.
+ 0x13 = When framing TLP length in STP token was
+ smaller than 5 DWORDs.
+
+ \<page\>
+
+ Received unexpected block:
+ 0x21 = When receiving an OS block following SDS in datastream state.n.
+ 0x22 = When data block followed by OS block different.
+ from SKP, EI, EIE in datastream state.
+ 0x23 = When block with an undefined block type in datastream state.
+ 0x24 = When data stream without data over three cycles in datastream state.
+ 0x25 = When OS block during data stream in datastream state.
+ 0x26 = When RxStatus error was detected in datastream state.
+ 0x27 = When not all active lanes receiving SKP OS starting
+ at same cycle time in SKPOS state.
+ 0x28 = When a two-block timeout occurs for SKP OS in SKPOS state.
+ 0x29 = When receiving consecutive OS blocks within a data stream in SKPOS state.n.
+ 0x2A = When Phy status error was detected in SKPOS state.
+ 0x2B = When not all active lanes receiving EIOS starting at
+ same cycle time in EIOS state.
+ 0x2C = When at least one symbol from the first 4 symbols
+ is not EIOS symbol in EIOS state (CX_NB=2 only).
+ 0x2D = When not all active lanes receiving EIEOS starting
+ at same cycle time in EIEOS state.
+ 0x2E = When not full 16 eieos symbols are received in EIEOS state.
+
+ All other values not listed above are reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t framing_err_ptr : 7; /**< [ 6: 0](RO) First framing error pointer.
+ Identifies the first framing error using the following
+ encoding. The field contents are only valid value when
+ [FRAMING_ERR] = 1.
+
+ Received unexpected framing token:
+ 0x1 = When non-STP/SDP/IDL token was received and it
+ was not in TLP/DLLP reception.
+ 0x02 = When current token was not a valid EDB token and
+ previous token was an EDB. (128/256 bit core only).
+ 0x03 = When SDP token was received but not expected.
+ 0x04 = When STP token was received but not expected.
+ 0x05 = When EDS token was expected but not received or
+ whenever an EDS token was received but not expected.
+ 0x06 = When a framing error was detected in the deskew
+ block while a packet has been in progress in token_finder.
+ Received Unexpected STP Token
+ 0x11 = When framing CRC in STP token did not match.
+ 0x12 = When framing parity in STP token did not match.
+ 0x13 = When framing TLP length in STP token was
+ smaller than 5 DWORDs.
+
+ \<page\>
+
+ Received unexpected block:
+ 0x21 = When receiving an OS block following SDS in datastream state.n.
+ 0x22 = When data block followed by OS block different.
+ from SKP, EI, EIE in datastream state.
+ 0x23 = When block with an undefined block type in datastream state.
+ 0x24 = When data stream without data over three cycles in datastream state.
+ 0x25 = When OS block during data stream in datastream state.
+ 0x26 = When RxStatus error was detected in datastream state.
+ 0x27 = When not all active lanes receiving SKP OS starting
+ at same cycle time in SKPOS state.
+ 0x28 = When a two-block timeout occurs for SKP OS in SKPOS state.
+ 0x29 = When receiving consecutive OS blocks within a data stream in SKPOS state.n.
+ 0x2A = When Phy status error was detected in SKPOS state.
+ 0x2B = When not all active lanes receiving EIOS starting at
+ same cycle time in EIOS state.
+ 0x2C = When at least one symbol from the first 4 symbols
+ is not EIOS symbol in EIOS state (CX_NB=2 only).
+ 0x2D = When not all active lanes receiving EIEOS starting
+ at same cycle time in EIEOS state.
+ 0x2E = When not full 16 eieos symbols are received in EIEOS state.
+
+ All other values not listed above are reserved. */
+ uint32_t framing_err : 1; /**< [ 7: 7](R/W1C) Framing error.
+ Indicates framing error detection status. */
+ uint32_t pipe_pwr_dwn : 3; /**< [ 10: 8](RO/H) PIPE:PowerDown.
+ Indicates PIPE PowerDown signal. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t lane_rev : 1; /**< [ 15: 15](RO/H) Lane reversal operation.
+ Receiver detected lane reversal. */
+ uint32_t ltssm_var : 16; /**< [ 31: 16](RO/H) LTSSM variable.
+ Indicates internal LTSSM variables defined in the PCI
+ Express base specification.
+ 0x0 = directed_speed change.
+ 0x1 = changed_speed_recovery.
+ 0x2 = successful_speed_negotiation.
+ 0x3 = upconfigure_capable; Set to one if both ports advertised
+ the UpConfigure capability in the last Config.Complete.
+ 0x4 = select_deemphasis.
+ 0x5 = start_equalization_w_preset.
+ 0x6 = equalization_done_8GT_data_rate.
+ 0x7 = equalization_done_16GT_data_rate.
+ 0x8-0xF = idle_to_rlock_transitioned. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_l1ltssm_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_l1ltssm bdk_pciercx_ras_sd_l1ltssm_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_L1LTSSM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_L1LTSSM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3ccll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_L1LTSSM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_L1LTSSM(a) bdk_pciercx_ras_sd_l1ltssm_t
+#define bustype_BDK_PCIERCX_RAS_SD_L1LTSSM(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_L1LTSSM(a) "PCIERCX_RAS_SD_L1LTSSM"
+#define busnum_BDK_PCIERCX_RAS_SD_L1LTSSM(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_L1LTSSM(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_statusl2
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status L2 Register
+ */
+union bdk_pciercx_ras_sd_statusl2
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_statusl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t fc_init2 : 1; /**< [ 27: 27](RO) Indicates the core is in FC_INIT2(VC0) state. */
+ uint32_t fc_init1 : 1; /**< [ 26: 26](RO) Indicates the core is in FC_INIT1(VC0) state. */
+ uint32_t dlcmsm : 2; /**< [ 25: 24](RO/H) Indicates the current DLCMSM.
+ 0x0 = DL_INACTIVE.
+ 0x1 = DL_FC_INIT.
+ 0x2 = Reserved.
+ 0x3 = DL_ACTIVE. */
+ uint32_t rx_ack_seq_no : 12; /**< [ 23: 12](RO/H) RX ACK sequence number.
+ Indicates the ack sequence number which is updated by receiving
+ ACK/NAK DLLP. */
+ uint32_t tx_ack_seq_no : 12; /**< [ 11: 0](RO/H) TX ACK sequence number.
+ Indicates next transmit sequence number for transmit TLP. */
+#else /* Word 0 - Little Endian */
+ uint32_t tx_ack_seq_no : 12; /**< [ 11: 0](RO/H) TX ACK sequence number.
+ Indicates next transmit sequence number for transmit TLP. */
+ uint32_t rx_ack_seq_no : 12; /**< [ 23: 12](RO/H) RX ACK sequence number.
+ Indicates the ack sequence number which is updated by receiving
+ ACK/NAK DLLP. */
+ uint32_t dlcmsm : 2; /**< [ 25: 24](RO/H) Indicates the current DLCMSM.
+ 0x0 = DL_INACTIVE.
+ 0x1 = DL_FC_INIT.
+ 0x2 = Reserved.
+ 0x3 = DL_ACTIVE. */
+ uint32_t fc_init1 : 1; /**< [ 26: 26](RO) Indicates the core is in FC_INIT1(VC0) state. */
+ uint32_t fc_init2 : 1; /**< [ 27: 27](RO) Indicates the core is in FC_INIT2(VC0) state. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_statusl2_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_statusl2 bdk_pciercx_ras_sd_statusl2_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_STATUSL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_STATUSL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3d4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_STATUSL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_STATUSL2(a) bdk_pciercx_ras_sd_statusl2_t
+#define bustype_BDK_PCIERCX_RAS_SD_STATUSL2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_STATUSL2(a) "PCIERCX_RAS_SD_STATUSL2"
+#define busnum_BDK_PCIERCX_RAS_SD_STATUSL2(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_STATUSL2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_statusl3
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status L3 Register
+ */
+union bdk_pciercx_ras_sd_statusl3
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_statusl3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t mftlp_status : 1; /**< [ 7: 7](R/W1C) Malformed TLP status.
+ Indicates malformed TLP has occurred. */
+ uint32_t mftlp_ptr : 7; /**< [ 6: 0](RO) First malformed TLP error pointer.
+ Indicates the element of the received first malformed TLP.
+ This pointer is validated by [MFTLP_STATUS].
+ 0x01 = AtomicOp address alignment.
+ 0x02 = AtomicOp operand.
+ 0x03 = AtomicOp byte enable.
+ 0x04 = TLP length miss match.
+ 0x05 = Max payload size.
+ 0x06 = Message TLP without TC0.
+ 0x07 = Invalid TC.
+ 0x08 = Unexpected route bit in message TLP.
+ 0x09 = Unexpected CRS status in completion TLP.
+ 0x0A = Byte enable.
+ 0x0B = Memory address 4KB boundary.
+ 0x0C = TLP prefix rules.
+ 0x0D = Translation request rules.
+ 0x0E = Invalid TLP type.
+ 0x0F = Completion rules.
+ 0x10-0x7E = Reserved.
+ 0x7F = Application. */
+#else /* Word 0 - Little Endian */
+ uint32_t mftlp_ptr : 7; /**< [ 6: 0](RO) First malformed TLP error pointer.
+ Indicates the element of the received first malformed TLP.
+ This pointer is validated by [MFTLP_STATUS].
+ 0x01 = AtomicOp address alignment.
+ 0x02 = AtomicOp operand.
+ 0x03 = AtomicOp byte enable.
+ 0x04 = TLP length miss match.
+ 0x05 = Max payload size.
+ 0x06 = Message TLP without TC0.
+ 0x07 = Invalid TC.
+ 0x08 = Unexpected route bit in message TLP.
+ 0x09 = Unexpected CRS status in completion TLP.
+ 0x0A = Byte enable.
+ 0x0B = Memory address 4KB boundary.
+ 0x0C = TLP prefix rules.
+ 0x0D = Translation request rules.
+ 0x0E = Invalid TLP type.
+ 0x0F = Completion rules.
+ 0x10-0x7E = Reserved.
+ 0x7F = Application. */
+ uint32_t mftlp_status : 1; /**< [ 7: 7](R/W1C) Malformed TLP status.
+ Indicates malformed TLP has occurred. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_statusl3_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_statusl3 bdk_pciercx_ras_sd_statusl3_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_STATUSL3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_STATUSL3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3dcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_STATUSL3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_STATUSL3(a) bdk_pciercx_ras_sd_statusl3_t
+#define bustype_BDK_PCIERCX_RAS_SD_STATUSL3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_STATUSL3(a) "PCIERCX_RAS_SD_STATUSL3"
+#define busnum_BDK_PCIERCX_RAS_SD_STATUSL3(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_STATUSL3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_statusl3fc
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status L2 Register
+ */
+union bdk_pciercx_ras_sd_statusl3fc
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_statusl3fc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t credit_data1 : 12; /**< [ 31: 20](RO/H) Credit data 1.
+ Current FC credit data selected by the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields.
+ RX = Credit allocated value.
+ TX = Credit limit value. This value is valid when DLCMSM=0x3(DL_ACTIVE). */
+ uint32_t credit_data0 : 12; /**< [ 19: 8](RO/H) Credit data 0.
+ Current FC credit data selected by the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields.
+ RX = Credit received value.
+ TX = Credit consumed value. */
+ uint32_t reserved_7 : 1;
+ uint32_t credit_sel_hd : 1; /**< [ 6: 6](R/W) Credit select (HeaderData).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], and
+ [CREDIT_SEL_TLP_TYPE] viewport-select fields determines
+ that data that is returned by the [CREDIT_DATA0] and
+ [CREDIT_DATA1] data fields.
+ 0x0 = Header credit.
+ 0x1 = Data credit. */
+ uint32_t credit_sel_tlp_type : 2; /**< [ 5: 4](R/W) Credit select (TLP Type).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], and [CREDIT_SEL_HD]
+ viewport-select fields determines that data that is returned
+ by the [CREDIT_DATA0] and [CREDIT_DATA1] data fields.
+ 0x0 = Posted.
+ 0x1 = Non-posted.
+ 0x2 = Completion.
+ 0x3 = Reserved. */
+ uint32_t credit_sel_credit_type : 1; /**< [ 3: 3](R/W) Credit select (credit type).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_TLP_TYPE], and [CREDIT_SEL_HD] viewport-select
+ fields determines that data that is returned by the
+ [CREDIT_DATA0] and [CREDIT_DATA1] data fields.
+ 0x0 = RX.
+ 0x1 = TX. */
+ uint32_t credit_sel_vc : 3; /**< [ 2: 0](R/W) Credit select (VC).
+ This field in conjunction with the
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields determines that
+ data that is returned by the [CREDIT_DATA0] and
+ [CREDIT_DATA1] data fields.
+ 0x0 = VC0.
+ 0x1 = VC1.
+ 0x2 = VC2.
+ ...
+ 0x7 = VC7. */
+#else /* Word 0 - Little Endian */
+ uint32_t credit_sel_vc : 3; /**< [ 2: 0](R/W) Credit select (VC).
+ This field in conjunction with the
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields determines that
+ data that is returned by the [CREDIT_DATA0] and
+ [CREDIT_DATA1] data fields.
+ 0x0 = VC0.
+ 0x1 = VC1.
+ 0x2 = VC2.
+ ...
+ 0x7 = VC7. */
+ uint32_t credit_sel_credit_type : 1; /**< [ 3: 3](R/W) Credit select (credit type).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_TLP_TYPE], and [CREDIT_SEL_HD] viewport-select
+ fields determines that data that is returned by the
+ [CREDIT_DATA0] and [CREDIT_DATA1] data fields.
+ 0x0 = RX.
+ 0x1 = TX. */
+ uint32_t credit_sel_tlp_type : 2; /**< [ 5: 4](R/W) Credit select (TLP Type).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], and [CREDIT_SEL_HD]
+ viewport-select fields determines that data that is returned
+ by the [CREDIT_DATA0] and [CREDIT_DATA1] data fields.
+ 0x0 = Posted.
+ 0x1 = Non-posted.
+ 0x2 = Completion.
+ 0x3 = Reserved. */
+ uint32_t credit_sel_hd : 1; /**< [ 6: 6](R/W) Credit select (HeaderData).
+ This field in conjunction with the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], and
+ [CREDIT_SEL_TLP_TYPE] viewport-select fields determines
+ that data that is returned by the [CREDIT_DATA0] and
+ [CREDIT_DATA1] data fields.
+ 0x0 = Header credit.
+ 0x1 = Data credit. */
+ uint32_t reserved_7 : 1;
+ uint32_t credit_data0 : 12; /**< [ 19: 8](RO/H) Credit data 0.
+ Current FC credit data selected by the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields.
+ RX = Credit received value.
+ TX = Credit consumed value. */
+ uint32_t credit_data1 : 12; /**< [ 31: 20](RO/H) Credit data 1.
+ Current FC credit data selected by the [CREDIT_SEL_VC],
+ [CREDIT_SEL_CREDIT_TYPE], [CREDIT_SEL_TLP_TYPE],
+ and [CREDIT_SEL_HD] viewport-select fields.
+ RX = Credit allocated value.
+ TX = Credit limit value. This value is valid when DLCMSM=0x3(DL_ACTIVE). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_statusl3fc_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_statusl3fc bdk_pciercx_ras_sd_statusl3fc_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_STATUSL3FC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_STATUSL3FC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3d8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_STATUSL3FC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_STATUSL3FC(a) bdk_pciercx_ras_sd_statusl3fc_t
+#define bustype_BDK_PCIERCX_RAS_SD_STATUSL3FC(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_STATUSL3FC(a) "PCIERCX_RAS_SD_STATUSL3FC"
+#define busnum_BDK_PCIERCX_RAS_SD_STATUSL3FC(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_STATUSL3FC(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_sd_statuspm
+ *
+ * PCIe RC Vendor RAS DES Silicon Debug Status PM Register
+ */
+union bdk_pciercx_ras_sd_statuspm
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_sd_statuspm_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t latched_nfts : 8; /**< [ 23: 16](RO/H) Latched N_FTS.
+ Indicates the value of N_FTS in the received TS ordered
+ sets from the link partner. */
+ uint32_t l1sub_state : 3; /**< [ 15: 13](RO/H) Indicates the internal L1Sub state machine state.
+ Internal:
+ 0x0 = Idle state.
+ 0x1 = Wait for aux_clk_active.
+ 0x2 = Wait for pclkack.
+ 0x3 = Wait for clkreq.
+ 0x4 = Check clkreq_in_n is de-asserted for t_power_off time.
+ 0x5 = L1 substate, turn off txcommonmode circuits (L1.2 only)
+ and rx electrical idle detection circuits.
+ 0x6 = Locally/remotely initiated exit, assert pclkreq, wait for pclkack.
+ 0x7 = Wait for pclkack when aborting an attempt to enter L1_N. */
+ uint32_t pme_rsnd_flag : 1; /**< [ 12: 12](RO) PME resend flag.
+ When the DUT sends a PM_PME message TLP, the DUT
+ sets PME_Status bit. If host software does not clear
+ PME_Status bit for 100ms (+50%/-5%), the DUT resends the
+ PM_PME message. This bit indicates that a PM_PME was
+ resent. */
+ uint32_t int_pm_sstate : 4; /**< [ 11: 8](RO/H) Internal PM state (slave).
+ Indicates internal state machine of power management
+ slave controller.
+ 0x00 = S_IDLE.
+ 0x01 = S_RESPOND_NAK.
+ 0x02 = S_BLOCK_TLP.
+ 0x03 = S_WAIT_LAST_TLP_ACK.
+ 0x04 = S_WAIT_EIDLE.
+ 0x08 = S_LINK_ENTR_L1.
+ 0x09 = S_L1.
+ 0x0A = S_L1_EXIT.
+ 0x0B = S_L23RDY.
+ 0x0C = S_LINK_ENTR_L23.
+ 0x0D = S_L23RDY_WAIT4ALIVE.
+ 0x0F = S_L23RDY_WAIT4IDLE.
+ 0x10 = S_WAIT_LAST_PMDLLP.
+ 0x10-0x1F = Reserved. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t int_pm_mstate : 5; /**< [ 4: 0](RO/H) Internal PM state (master).
+ Indicates internal state machine of power management
+ master controller.
+ 0x00 = IDLE.
+ 0x01 = L0.
+ 0x02 = L0S.
+ 0x03 = ENTER_L0S.
+ 0x04 = L0S_EXIT.
+ 0x08 = L1.
+ 0x09 = L1_BLOCK_TLP.
+ 0x0A = L1_WAIT_LAST_TLP_ACK.
+ 0x0B = L1_WAIT_PMDLLP_ACK.
+ 0x0C = L1_LINK_ENTR_L1.
+ 0x0D = L1_EXIT.
+ 0x0F = PREP_4L1.
+ 0x10 = L23_BLOCK_TLP.
+ 0x11 = L23_WAIT_LAST_TLP_ACK.
+ 0x12 = L23_WAIT_PMDLLP_ACK.
+ 0x13 = L23_ENTR_L23.
+ 0x14 = L23RDY.
+ 0x15 = PREP_4L23.
+ 0x16 = L23RDY_WAIT4ALIVE.
+ 0x17 = L0S_BLOCK_TLP.
+ 0x18 = WAIT_LAST_PMDLLP.
+ 0x19 = WAIT_DSTATE_UPDATE.
+ 0x20-0x1F = Reserved. */
+#else /* Word 0 - Little Endian */
+ uint32_t int_pm_mstate : 5; /**< [ 4: 0](RO/H) Internal PM state (master).
+ Indicates internal state machine of power management
+ master controller.
+ 0x00 = IDLE.
+ 0x01 = L0.
+ 0x02 = L0S.
+ 0x03 = ENTER_L0S.
+ 0x04 = L0S_EXIT.
+ 0x08 = L1.
+ 0x09 = L1_BLOCK_TLP.
+ 0x0A = L1_WAIT_LAST_TLP_ACK.
+ 0x0B = L1_WAIT_PMDLLP_ACK.
+ 0x0C = L1_LINK_ENTR_L1.
+ 0x0D = L1_EXIT.
+ 0x0F = PREP_4L1.
+ 0x10 = L23_BLOCK_TLP.
+ 0x11 = L23_WAIT_LAST_TLP_ACK.
+ 0x12 = L23_WAIT_PMDLLP_ACK.
+ 0x13 = L23_ENTR_L23.
+ 0x14 = L23RDY.
+ 0x15 = PREP_4L23.
+ 0x16 = L23RDY_WAIT4ALIVE.
+ 0x17 = L0S_BLOCK_TLP.
+ 0x18 = WAIT_LAST_PMDLLP.
+ 0x19 = WAIT_DSTATE_UPDATE.
+ 0x20-0x1F = Reserved. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t int_pm_sstate : 4; /**< [ 11: 8](RO/H) Internal PM state (slave).
+ Indicates internal state machine of power management
+ slave controller.
+ 0x00 = S_IDLE.
+ 0x01 = S_RESPOND_NAK.
+ 0x02 = S_BLOCK_TLP.
+ 0x03 = S_WAIT_LAST_TLP_ACK.
+ 0x04 = S_WAIT_EIDLE.
+ 0x08 = S_LINK_ENTR_L1.
+ 0x09 = S_L1.
+ 0x0A = S_L1_EXIT.
+ 0x0B = S_L23RDY.
+ 0x0C = S_LINK_ENTR_L23.
+ 0x0D = S_L23RDY_WAIT4ALIVE.
+ 0x0F = S_L23RDY_WAIT4IDLE.
+ 0x10 = S_WAIT_LAST_PMDLLP.
+ 0x10-0x1F = Reserved. */
+ uint32_t pme_rsnd_flag : 1; /**< [ 12: 12](RO) PME resend flag.
+ When the DUT sends a PM_PME message TLP, the DUT
+ sets PME_Status bit. If host software does not clear
+ PME_Status bit for 100ms (+50%/-5%), the DUT resends the
+ PM_PME message. This bit indicates that a PM_PME was
+ resent. */
+ uint32_t l1sub_state : 3; /**< [ 15: 13](RO/H) Indicates the internal L1Sub state machine state.
+ Internal:
+ 0x0 = Idle state.
+ 0x1 = Wait for aux_clk_active.
+ 0x2 = Wait for pclkack.
+ 0x3 = Wait for clkreq.
+ 0x4 = Check clkreq_in_n is de-asserted for t_power_off time.
+ 0x5 = L1 substate, turn off txcommonmode circuits (L1.2 only)
+ and rx electrical idle detection circuits.
+ 0x6 = Locally/remotely initiated exit, assert pclkreq, wait for pclkack.
+ 0x7 = Wait for pclkack when aborting an attempt to enter L1_N. */
+ uint32_t latched_nfts : 8; /**< [ 23: 16](RO/H) Latched N_FTS.
+ Indicates the value of N_FTS in the received TS ordered
+ sets from the link partner. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_sd_statuspm_s cn; */
+};
+typedef union bdk_pciercx_ras_sd_statuspm bdk_pciercx_ras_sd_statuspm_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_SD_STATUSPM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_SD_STATUSPM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x3d0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_SD_STATUSPM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_SD_STATUSPM(a) bdk_pciercx_ras_sd_statuspm_t
+#define bustype_BDK_PCIERCX_RAS_SD_STATUSPM(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_SD_STATUSPM(a) "PCIERCX_RAS_SD_STATUSPM"
+#define busnum_BDK_PCIERCX_RAS_SD_STATUSPM(a) (a)
+#define arguments_BDK_PCIERCX_RAS_SD_STATUSPM(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_tba_ctl
+ *
+ * PCIe RC Vendor RAS DES Time Based Analysis Control Register
+ */
+union bdk_pciercx_ras_tba_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_tba_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tbase_rpt_sel : 8; /**< [ 31: 24](R/W) Time-based report select. Selects what type of data is measured for the selected
+ duration [TBASE_DUR_SEL]. Data is returned in PCIERC_RAS_TBA_DATA[TBASE_DATA].
+
+ Each type of data is measured using one of three types of units.
+
+ Core clock cycles.
+ 0x0 = Duration of 1 cycle.
+ 0x1 = TxL0s.
+ 0x2 = RxL0s.
+ 0x3 = L0.
+ 0x4 = L1.
+ 0x7 = Configuration/recovery.
+
+ Aux_clk cycles.
+ 0x5 = L1.1.
+ 0x6 = L1.2.
+
+ Data bytes. Actual amount is 16x value.
+ 0x20 = TX TLP Bytes.
+ 0x21 = RX TLP Bytes. */
+ uint32_t reserved_16_23 : 8;
+ uint32_t tbase_dur_sel : 8; /**< [ 15: 8](R/W) Time-based duration select. Selects the duration of time-based
+ analysis.
+
+ 0x0 = Manual control. Analysis controlled by [TIMER_START].
+ 0x1 = 1 ms.
+ 0x2 = 10 ms.
+ 0x3 = 100 ms.
+ 0x4 = 1 s.
+ 0x5 = 2 s.
+ 0x6 = 4 s.
+ 0x7 - 0xF = Reserved. */
+ uint32_t reserved_1_7 : 7;
+ uint32_t timer_start : 1; /**< [ 0: 0](R/W) Timer start.
+
+ 0x0 = Start/restart.
+ 0x1 = Stop.
+
+ This bit will be cleared automatically when the measurement is finished. */
+#else /* Word 0 - Little Endian */
+ uint32_t timer_start : 1; /**< [ 0: 0](R/W) Timer start.
+
+ 0x0 = Start/restart.
+ 0x1 = Stop.
+
+ This bit will be cleared automatically when the measurement is finished. */
+ uint32_t reserved_1_7 : 7;
+ uint32_t tbase_dur_sel : 8; /**< [ 15: 8](R/W) Time-based duration select. Selects the duration of time-based
+ analysis.
+
+ 0x0 = Manual control. Analysis controlled by [TIMER_START].
+ 0x1 = 1 ms.
+ 0x2 = 10 ms.
+ 0x3 = 100 ms.
+ 0x4 = 1 s.
+ 0x5 = 2 s.
+ 0x6 = 4 s.
+ 0x7 - 0xF = Reserved. */
+ uint32_t reserved_16_23 : 8;
+ uint32_t tbase_rpt_sel : 8; /**< [ 31: 24](R/W) Time-based report select. Selects what type of data is measured for the selected
+ duration [TBASE_DUR_SEL]. Data is returned in PCIERC_RAS_TBA_DATA[TBASE_DATA].
+
+ Each type of data is measured using one of three types of units.
+
+ Core clock cycles.
+ 0x0 = Duration of 1 cycle.
+ 0x1 = TxL0s.
+ 0x2 = RxL0s.
+ 0x3 = L0.
+ 0x4 = L1.
+ 0x7 = Configuration/recovery.
+
+ Aux_clk cycles.
+ 0x5 = L1.1.
+ 0x6 = L1.2.
+
+ Data bytes. Actual amount is 16x value.
+ 0x20 = TX TLP Bytes.
+ 0x21 = RX TLP Bytes. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_tba_ctl_s cn; */
+};
+typedef union bdk_pciercx_ras_tba_ctl bdk_pciercx_ras_tba_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_TBA_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_TBA_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x328ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_TBA_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_TBA_CTL(a) bdk_pciercx_ras_tba_ctl_t
+#define bustype_BDK_PCIERCX_RAS_TBA_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_TBA_CTL(a) "PCIERCX_RAS_TBA_CTL"
+#define busnum_BDK_PCIERCX_RAS_TBA_CTL(a) (a)
+#define arguments_BDK_PCIERCX_RAS_TBA_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ras_tba_data
+ *
+ * PCIe RC Vendor RAS DES Time Based Analysis Data Register
+ */
+union bdk_pciercx_ras_tba_data
+{
+ uint32_t u;
+ struct bdk_pciercx_ras_tba_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tbase_data : 32; /**< [ 31: 0](RO/H) Time-based analysis data. This register returns data selected in the
+ PCIERC_RAS_TBA_CTL[TBASE_RPT_SEL] field. The results are cleared when
+ the next measurement starts. */
+#else /* Word 0 - Little Endian */
+ uint32_t tbase_data : 32; /**< [ 31: 0](RO/H) Time-based analysis data. This register returns data selected in the
+ PCIERC_RAS_TBA_CTL[TBASE_RPT_SEL] field. The results are cleared when
+ the next measurement starts. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ras_tba_data_s cn; */
+};
+typedef union bdk_pciercx_ras_tba_data bdk_pciercx_ras_tba_data_t;
+
+static inline uint64_t BDK_PCIERCX_RAS_TBA_DATA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RAS_TBA_DATA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x32cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RAS_TBA_DATA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RAS_TBA_DATA(a) bdk_pciercx_ras_tba_data_t
+#define bustype_BDK_PCIERCX_RAS_TBA_DATA(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RAS_TBA_DATA(a) "PCIERCX_RAS_TBA_DATA"
+#define busnum_BDK_PCIERCX_RAS_TBA_DATA(a) (a)
+#define arguments_BDK_PCIERCX_RAS_TBA_DATA(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_cap_hdr
+ *
+ * PCIe RC Vendor RAS Data Path Protection Header Register
+ */
+union bdk_pciercx_rasdp_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_rasdp_cap_hdr bdk_pciercx_rasdp_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x418ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_CAP_HDR(a) bdk_pciercx_rasdp_cap_hdr_t
+#define bustype_BDK_PCIERCX_RASDP_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_CAP_HDR(a) "PCIERCX_RASDP_CAP_HDR"
+#define busnum_BDK_PCIERCX_RASDP_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_ce_ctl
+ *
+ * PCIe RC RAS Data Path Correctable Error Control Register
+ */
+union bdk_pciercx_rasdp_ce_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_ce_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t corr_cnt_sel : 8; /**< [ 31: 24](R/W) Counter selection. This field selects the counter ID (within
+ the region defined by [CORR_CNT_SEL_REG]) whose contents
+ can be read from PCIERC_RAS_TBA_CTL. You can
+ cycle this field value from 0 to 255 to access all counters. */
+ uint32_t corr_cnt_sel_reg : 4; /**< [ 23: 20](R/W) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA read engine inbound (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion buffer path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_5_19 : 15;
+ uint32_t corr_en_cntrs : 1; /**< [ 4: 4](R/W) Error correction disable for ADM RX path. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t ep_dis_l3_rx : 1; /**< [ 0: 0](R/W1C) Clears all correctable error counters. */
+#else /* Word 0 - Little Endian */
+ uint32_t ep_dis_l3_rx : 1; /**< [ 0: 0](R/W1C) Clears all correctable error counters. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t corr_en_cntrs : 1; /**< [ 4: 4](R/W) Error correction disable for ADM RX path. */
+ uint32_t reserved_5_19 : 15;
+ uint32_t corr_cnt_sel_reg : 4; /**< [ 23: 20](R/W) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA read engine inbound (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion buffer path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t corr_cnt_sel : 8; /**< [ 31: 24](R/W) Counter selection. This field selects the counter ID (within
+ the region defined by [CORR_CNT_SEL_REG]) whose contents
+ can be read from PCIERC_RAS_TBA_CTL. You can
+ cycle this field value from 0 to 255 to access all counters. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_ce_ctl_s cn; */
+};
+typedef union bdk_pciercx_rasdp_ce_ctl bdk_pciercx_rasdp_ce_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_CE_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_CE_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x424ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_CE_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_CE_CTL(a) bdk_pciercx_rasdp_ce_ctl_t
+#define bustype_BDK_PCIERCX_RASDP_CE_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_CE_CTL(a) "PCIERCX_RASDP_CE_CTL"
+#define busnum_BDK_PCIERCX_RASDP_CE_CTL(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_CE_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_ce_ictl
+ *
+ * PCIe RC RAS Data Correctable Error Injection Control Register
+ */
+union bdk_pciercx_rasdp_ce_ictl
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_ce_ictl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t err_inj_loc : 8; /**< [ 23: 16](R/W) Error injection location. Selects where error injection takes place. You
+ can cycle this field value from 0 to 255 to access all locations. */
+ uint32_t err_inj_cnt : 8; /**< [ 15: 8](R/W) Error injection count.
+ 0x0 = errors are injected in every TLP until [ERR_INJ_EN] is cleared.
+ 0x1 - 0xFF = number of errors injected. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t err_inj_type : 2; /**< [ 5: 4](R/W) Error injection type.
+ 0x0 = None.
+ 0x1 = 1-bit.
+ 0x2 = 2-bit.
+ 0x3 = Reserved. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t err_inj_en : 1; /**< [ 0: 0](R/W) Error injection global enable. When set, enables the error
+ insertion logic. */
+#else /* Word 0 - Little Endian */
+ uint32_t err_inj_en : 1; /**< [ 0: 0](R/W) Error injection global enable. When set, enables the error
+ insertion logic. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t err_inj_type : 2; /**< [ 5: 4](R/W) Error injection type.
+ 0x0 = None.
+ 0x1 = 1-bit.
+ 0x2 = 2-bit.
+ 0x3 = Reserved. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t err_inj_cnt : 8; /**< [ 15: 8](R/W) Error injection count.
+ 0x0 = errors are injected in every TLP until [ERR_INJ_EN] is cleared.
+ 0x1 - 0xFF = number of errors injected. */
+ uint32_t err_inj_loc : 8; /**< [ 23: 16](R/W) Error injection location. Selects where error injection takes place. You
+ can cycle this field value from 0 to 255 to access all locations. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_ce_ictl_s cn; */
+};
+typedef union bdk_pciercx_rasdp_ce_ictl bdk_pciercx_rasdp_ce_ictl_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_CE_ICTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_CE_ICTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x434ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_CE_ICTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_CE_ICTL(a) bdk_pciercx_rasdp_ce_ictl_t
+#define bustype_BDK_PCIERCX_RASDP_CE_ICTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_CE_ICTL(a) "PCIERCX_RASDP_CE_ICTL"
+#define busnum_BDK_PCIERCX_RASDP_CE_ICTL(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_CE_ICTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_ce_loc
+ *
+ * PCIe RC RAS Data Correctable Error Location Register
+ */
+union bdk_pciercx_rasdp_ce_loc
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_ce_loc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t loc_last_corr_err : 8; /**< [ 31: 24](RO) Location/ID of the last corrected error within the region defined by
+ [REG_LAST_CORR_ERR]. */
+ uint32_t reg_last_corr_err : 4; /**< [ 23: 20](RO) Region of last corrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t loc_first_corr_err : 8; /**< [ 15: 8](RO) Location/ID of the first corrected error within the region defined by
+ [REG_FIRST_CORR_ERR]. */
+ uint32_t reg_first_corr_err : 4; /**< [ 7: 4](RO) Region of first corrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA read engine (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA write engine (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t reg_first_corr_err : 4; /**< [ 7: 4](RO) Region of first corrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA read engine (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA write engine (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t loc_first_corr_err : 8; /**< [ 15: 8](RO) Location/ID of the first corrected error within the region defined by
+ [REG_FIRST_CORR_ERR]. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t reg_last_corr_err : 4; /**< [ 23: 20](RO) Region of last corrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t loc_last_corr_err : 8; /**< [ 31: 24](RO) Location/ID of the last corrected error within the region defined by
+ [REG_LAST_CORR_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_ce_loc_s cn; */
+};
+typedef union bdk_pciercx_rasdp_ce_loc bdk_pciercx_rasdp_ce_loc_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_CE_LOC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_CE_LOC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x438ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_CE_LOC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_CE_LOC(a) bdk_pciercx_rasdp_ce_loc_t
+#define bustype_BDK_PCIERCX_RASDP_CE_LOC(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_CE_LOC(a) "PCIERCX_RASDP_CE_LOC"
+#define busnum_BDK_PCIERCX_RASDP_CE_LOC(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_CE_LOC(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_ce_rp
+ *
+ * PCIe RC RAS Data Path Correctable Error Report Register
+ */
+union bdk_pciercx_rasdp_ce_rp
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_ce_rp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t corr_cnt_sel : 8; /**< [ 31: 24](RO/H) Counter selection. Returns the value set in PCIERC_RASDP_CE_CTL[CORR_CNT_SEL]. */
+ uint32_t corr_cnt_sel_reg : 4; /**< [ 23: 20](RO/H) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t corr_count : 8; /**< [ 7: 0](RO) Current corrected count for the selected counter. */
+#else /* Word 0 - Little Endian */
+ uint32_t corr_count : 8; /**< [ 7: 0](RO) Current corrected count for the selected counter. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t corr_cnt_sel_reg : 4; /**< [ 23: 20](RO/H) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t corr_cnt_sel : 8; /**< [ 31: 24](RO/H) Counter selection. Returns the value set in PCIERC_RASDP_CE_CTL[CORR_CNT_SEL]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_ce_rp_s cn; */
+};
+typedef union bdk_pciercx_rasdp_ce_rp bdk_pciercx_rasdp_ce_rp_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_CE_RP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_CE_RP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x428ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_CE_RP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_CE_RP(a) bdk_pciercx_rasdp_ce_rp_t
+#define bustype_BDK_PCIERCX_RASDP_CE_RP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_CE_RP(a) "PCIERCX_RASDP_CE_RP"
+#define busnum_BDK_PCIERCX_RASDP_CE_RP(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_CE_RP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_de_mc
+ *
+ * PCIe RC RAS Data Error Mode Clear Register
+ */
+union bdk_pciercx_rasdp_de_mc
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_de_mc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t err_mode_clr : 1; /**< [ 0: 0](R/W1C) Set this bit to take the core out of RASDP error mode. The core will then report
+ uncorrectable
+ errors (through AER internal error reporting) and also stop nullifying/discarding TLPs. */
+#else /* Word 0 - Little Endian */
+ uint32_t err_mode_clr : 1; /**< [ 0: 0](R/W1C) Set this bit to take the core out of RASDP error mode. The core will then report
+ uncorrectable
+ errors (through AER internal error reporting) and also stop nullifying/discarding TLPs. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_de_mc_s cn; */
+};
+typedef union bdk_pciercx_rasdp_de_mc bdk_pciercx_rasdp_de_mc_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_DE_MC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_DE_MC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x444ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_DE_MC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_DE_MC(a) bdk_pciercx_rasdp_de_mc_t
+#define bustype_BDK_PCIERCX_RASDP_DE_MC(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_DE_MC(a) "PCIERCX_RASDP_DE_MC"
+#define busnum_BDK_PCIERCX_RASDP_DE_MC(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_DE_MC(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_de_me
+ *
+ * PCIe RC RAS Data Error Mode Enable Register
+ */
+union bdk_pciercx_rasdp_de_me
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_de_me_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t auto_lnk_dn_en : 1; /**< [ 1: 1](R/W) Set this bit to enable the core to bring the link down when RASDP error mode is entered. */
+ uint32_t err_mode_en : 1; /**< [ 0: 0](R/W) Set this bit to enable the core to enter RASDP error mode when it detects an uncorrectable error. */
+#else /* Word 0 - Little Endian */
+ uint32_t err_mode_en : 1; /**< [ 0: 0](R/W) Set this bit to enable the core to enter RASDP error mode when it detects an uncorrectable error. */
+ uint32_t auto_lnk_dn_en : 1; /**< [ 1: 1](R/W) Set this bit to enable the core to bring the link down when RASDP error mode is entered. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_de_me_s cn; */
+};
+typedef union bdk_pciercx_rasdp_de_me bdk_pciercx_rasdp_de_me_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_DE_ME(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_DE_ME(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x440ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_DE_ME", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_DE_ME(a) bdk_pciercx_rasdp_de_me_t
+#define bustype_BDK_PCIERCX_RASDP_DE_ME(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_DE_ME(a) "PCIERCX_RASDP_DE_ME"
+#define busnum_BDK_PCIERCX_RASDP_DE_ME(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_DE_ME(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_ep_ctl
+ *
+ * PCIe RC RAS Data Path Error Protection Control Register
+ */
+union bdk_pciercx_rasdp_ep_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_ep_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_23_31 : 9;
+ uint32_t ep_dis_adm_rx : 1; /**< [ 22: 22](R/W) Error correction disable for ADM RX path. */
+ uint32_t ep_dis_l3_rx : 1; /**< [ 21: 21](R/W) Error correction disable for layer 3 RX path. */
+ uint32_t ep_dis_l2_rx : 1; /**< [ 20: 20](R/W) Error correction disable for layer 2 RX path. */
+ uint32_t ep_dis_dma_rd : 1; /**< [ 19: 19](R/W) Error correction disable for DMA read (not supported). */
+ uint32_t ep_dis_axib_inbr : 1; /**< [ 18: 18](R/W) Error correction disable for AXI bridge inbound request path (not supported). */
+ uint32_t ep_dis_axib_inbc : 1; /**< [ 17: 17](R/W) Error correction disable for AXI bridge inbound completion composer (not supported). */
+ uint32_t ep_dis_rx : 1; /**< [ 16: 16](R/W) Global error correction disable for all RX layers. */
+ uint32_t reserved_7_15 : 9;
+ uint32_t ep_dis_adm_tx : 1; /**< [ 6: 6](R/W) Error correction disable for ADM TX path. */
+ uint32_t ep_dis_l3_tx : 1; /**< [ 5: 5](R/W) Error correction disable for layer 3 TX path. */
+ uint32_t ep_dis_l2_tx : 1; /**< [ 4: 4](R/W) Error correction disable for layer 2 TX path. */
+ uint32_t ep_dis_dma_wr : 1; /**< [ 3: 3](R/W) Error correction disable for DMA write (not supported). */
+ uint32_t ep_dis_axib_outb : 1; /**< [ 2: 2](R/W) Error correction disable for AXI bridge outbound request path (not supported). */
+ uint32_t ep_dis_axib_masc : 1; /**< [ 1: 1](R/W) Error correction disable for AXI bridge master completion buffer (not supported). */
+ uint32_t ep_dis_tx : 1; /**< [ 0: 0](R/W) Global error correction disable for all TX layers. */
+#else /* Word 0 - Little Endian */
+ uint32_t ep_dis_tx : 1; /**< [ 0: 0](R/W) Global error correction disable for all TX layers. */
+ uint32_t ep_dis_axib_masc : 1; /**< [ 1: 1](R/W) Error correction disable for AXI bridge master completion buffer (not supported). */
+ uint32_t ep_dis_axib_outb : 1; /**< [ 2: 2](R/W) Error correction disable for AXI bridge outbound request path (not supported). */
+ uint32_t ep_dis_dma_wr : 1; /**< [ 3: 3](R/W) Error correction disable for DMA write (not supported). */
+ uint32_t ep_dis_l2_tx : 1; /**< [ 4: 4](R/W) Error correction disable for layer 2 TX path. */
+ uint32_t ep_dis_l3_tx : 1; /**< [ 5: 5](R/W) Error correction disable for layer 3 TX path. */
+ uint32_t ep_dis_adm_tx : 1; /**< [ 6: 6](R/W) Error correction disable for ADM TX path. */
+ uint32_t reserved_7_15 : 9;
+ uint32_t ep_dis_rx : 1; /**< [ 16: 16](R/W) Global error correction disable for all RX layers. */
+ uint32_t ep_dis_axib_inbc : 1; /**< [ 17: 17](R/W) Error correction disable for AXI bridge inbound completion composer (not supported). */
+ uint32_t ep_dis_axib_inbr : 1; /**< [ 18: 18](R/W) Error correction disable for AXI bridge inbound request path (not supported). */
+ uint32_t ep_dis_dma_rd : 1; /**< [ 19: 19](R/W) Error correction disable for DMA read (not supported). */
+ uint32_t ep_dis_l2_rx : 1; /**< [ 20: 20](R/W) Error correction disable for layer 2 RX path. */
+ uint32_t ep_dis_l3_rx : 1; /**< [ 21: 21](R/W) Error correction disable for layer 3 RX path. */
+ uint32_t ep_dis_adm_rx : 1; /**< [ 22: 22](R/W) Error correction disable for ADM RX path. */
+ uint32_t reserved_23_31 : 9;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_ep_ctl_s cn; */
+};
+typedef union bdk_pciercx_rasdp_ep_ctl bdk_pciercx_rasdp_ep_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_EP_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_EP_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x420ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_EP_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_EP_CTL(a) bdk_pciercx_rasdp_ep_ctl_t
+#define bustype_BDK_PCIERCX_RASDP_EP_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_EP_CTL(a) "PCIERCX_RASDP_EP_CTL"
+#define busnum_BDK_PCIERCX_RASDP_EP_CTL(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_EP_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_hdr
+ *
+ * PCIe RC RAS Data Path Extended Capability Register
+ */
+union bdk_pciercx_rasdp_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vsec_length : 12; /**< [ 31: 20](RO) VSEC length. */
+ uint32_t vsec_rev : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsec_id : 16; /**< [ 15: 0](RO) VSEC ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t vsec_id : 16; /**< [ 15: 0](RO) VSEC ID. */
+ uint32_t vsec_rev : 4; /**< [ 19: 16](RO) Capability version. */
+ uint32_t vsec_length : 12; /**< [ 31: 20](RO) VSEC length. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_hdr_s cn; */
+};
+typedef union bdk_pciercx_rasdp_hdr bdk_pciercx_rasdp_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x41cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_HDR(a) bdk_pciercx_rasdp_hdr_t
+#define bustype_BDK_PCIERCX_RASDP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_HDR(a) "PCIERCX_RASDP_HDR"
+#define busnum_BDK_PCIERCX_RASDP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_radr_ce
+ *
+ * PCIe RC RAS RAM Address Corrected Error Register
+ */
+union bdk_pciercx_rasdp_radr_ce
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_radr_ce_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ram_idx_corr_err : 4; /**< [ 31: 28](RO) RAM index where a corrected error has been detected. */
+ uint32_t reserved_27 : 1;
+ uint32_t ram_addr_corr_err : 27; /**< [ 26: 0](RO) RAM address where a corrected error has been detected. */
+#else /* Word 0 - Little Endian */
+ uint32_t ram_addr_corr_err : 27; /**< [ 26: 0](RO) RAM address where a corrected error has been detected. */
+ uint32_t reserved_27 : 1;
+ uint32_t ram_idx_corr_err : 4; /**< [ 31: 28](RO) RAM index where a corrected error has been detected. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_radr_ce_s cn; */
+};
+typedef union bdk_pciercx_rasdp_radr_ce bdk_pciercx_rasdp_radr_ce_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_RADR_CE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_RADR_CE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x448ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_RADR_CE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_RADR_CE(a) bdk_pciercx_rasdp_radr_ce_t
+#define bustype_BDK_PCIERCX_RASDP_RADR_CE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_RADR_CE(a) "PCIERCX_RASDP_RADR_CE"
+#define busnum_BDK_PCIERCX_RASDP_RADR_CE(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_RADR_CE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_radr_uce
+ *
+ * PCIe RC RAS RAM Address Uncorrected Error Register
+ */
+union bdk_pciercx_rasdp_radr_uce
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_radr_uce_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ram_idx_ucorr_err : 4; /**< [ 31: 28](RO) RAM index where a uncorrected error has been detected. */
+ uint32_t reserved_27 : 1;
+ uint32_t ram_addr_ucorr_err : 27; /**< [ 26: 0](RO) RAM address where a uncorrected error has been detected. */
+#else /* Word 0 - Little Endian */
+ uint32_t ram_addr_ucorr_err : 27; /**< [ 26: 0](RO) RAM address where a uncorrected error has been detected. */
+ uint32_t reserved_27 : 1;
+ uint32_t ram_idx_ucorr_err : 4; /**< [ 31: 28](RO) RAM index where a uncorrected error has been detected. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_radr_uce_s cn; */
+};
+typedef union bdk_pciercx_rasdp_radr_uce bdk_pciercx_rasdp_radr_uce_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_RADR_UCE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_RADR_UCE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x44cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_RADR_UCE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_RADR_UCE(a) bdk_pciercx_rasdp_radr_uce_t
+#define bustype_BDK_PCIERCX_RASDP_RADR_UCE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_RADR_UCE(a) "PCIERCX_RASDP_RADR_UCE"
+#define busnum_BDK_PCIERCX_RASDP_RADR_UCE(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_RADR_UCE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_uce_ctl
+ *
+ * PCIe RC RAS Data Path Uncorrectable Error Control Register
+ */
+union bdk_pciercx_rasdp_uce_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_uce_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ucorr_cnt_sel : 8; /**< [ 31: 24](R/W) Counter selection. This field selects the counter ID (within
+ the region defined by [UCORR_CNT_SEL_REG]) whose contents
+ can be read from PCIERC_RAS_TBA_CTL. You can
+ cycle this field value from 0 to 255 to access all counters. */
+ uint32_t ucorr_cnt_sel_reg : 4; /**< [ 23: 20](R/W) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_5_19 : 15;
+ uint32_t ucorr_en_cntrs : 1; /**< [ 4: 4](R/W) Error correction disable for ADM RX path. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t ep_dis_l3_rx : 1; /**< [ 0: 0](R/W1C) Clears all uncorrectable error counters. */
+#else /* Word 0 - Little Endian */
+ uint32_t ep_dis_l3_rx : 1; /**< [ 0: 0](R/W1C) Clears all uncorrectable error counters. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t ucorr_en_cntrs : 1; /**< [ 4: 4](R/W) Error correction disable for ADM RX path. */
+ uint32_t reserved_5_19 : 15;
+ uint32_t ucorr_cnt_sel_reg : 4; /**< [ 23: 20](R/W) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t ucorr_cnt_sel : 8; /**< [ 31: 24](R/W) Counter selection. This field selects the counter ID (within
+ the region defined by [UCORR_CNT_SEL_REG]) whose contents
+ can be read from PCIERC_RAS_TBA_CTL. You can
+ cycle this field value from 0 to 255 to access all counters. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_uce_ctl_s cn; */
+};
+typedef union bdk_pciercx_rasdp_uce_ctl bdk_pciercx_rasdp_uce_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_UCE_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_UCE_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x42cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_UCE_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_UCE_CTL(a) bdk_pciercx_rasdp_uce_ctl_t
+#define bustype_BDK_PCIERCX_RASDP_UCE_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_UCE_CTL(a) "PCIERCX_RASDP_UCE_CTL"
+#define busnum_BDK_PCIERCX_RASDP_UCE_CTL(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_UCE_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_uce_loc
+ *
+ * PCIe RC RAS Data Uncorrectable Error Location Register
+ */
+union bdk_pciercx_rasdp_uce_loc
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_uce_loc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t loc_last_ucorr_err : 8; /**< [ 31: 24](RO) Location/ID of the last uncorrected error within the region defined by
+ [REG_LAST_UCORR_ERR]. */
+ uint32_t reg_last_ucorr_err : 4; /**< [ 23: 20](RO) Region of last uncorrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t loc_first_ucorr_err : 8; /**< [ 15: 8](RO) Location/ID of the first uncorrected error within the region defined by
+ [REG_FIRST_UCORR_ERR]. */
+ uint32_t reg_first_ucorr_err : 4; /**< [ 7: 4](RO) Region of first uncorrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t reg_first_ucorr_err : 4; /**< [ 7: 4](RO) Region of first uncorrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t loc_first_ucorr_err : 8; /**< [ 15: 8](RO) Location/ID of the first uncorrected error within the region defined by
+ [REG_FIRST_UCORR_ERR]. */
+ uint32_t reserved_16_19 : 4;
+ uint32_t reg_last_ucorr_err : 4; /**< [ 23: 20](RO) Region of last uncorrected error
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t loc_last_ucorr_err : 8; /**< [ 31: 24](RO) Location/ID of the last uncorrected error within the region defined by
+ [REG_LAST_UCORR_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_uce_loc_s cn; */
+};
+typedef union bdk_pciercx_rasdp_uce_loc bdk_pciercx_rasdp_uce_loc_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_UCE_LOC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_UCE_LOC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x43cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_UCE_LOC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_UCE_LOC(a) bdk_pciercx_rasdp_uce_loc_t
+#define bustype_BDK_PCIERCX_RASDP_UCE_LOC(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_UCE_LOC(a) "PCIERCX_RASDP_UCE_LOC"
+#define busnum_BDK_PCIERCX_RASDP_UCE_LOC(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_UCE_LOC(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rasdp_uce_rp
+ *
+ * PCIe RC RAS Data Path Uncorrectable Error Report Register
+ */
+union bdk_pciercx_rasdp_uce_rp
+{
+ uint32_t u;
+ struct bdk_pciercx_rasdp_uce_rp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ucorr_cnt_sel : 8; /**< [ 31: 24](RO/H) Counter selection. Returns the value set in PCIERC_RASDP_UCE_CTL[UCORR_CNT_SEL]. */
+ uint32_t ucorr_cnt_sel_reg : 4; /**< [ 23: 20](RO/H) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion buffer path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t ucorr_count : 8; /**< [ 7: 0](RO) Current uncorrected count for the selected counter. */
+#else /* Word 0 - Little Endian */
+ uint32_t ucorr_count : 8; /**< [ 7: 0](RO) Current uncorrected count for the selected counter. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t ucorr_cnt_sel_reg : 4; /**< [ 23: 20](RO/H) Selected correctable counter region.
+ 0x0 = ADM RX path.
+ 0x1 = Layer 3 RX path.
+ 0x2 = Layer 2 RX path.
+ 0x3 = DMA inbound path (not supported).
+ 0x4 = AXI bridge inbound request path (not supported).
+ 0x5 = AXI bridge inbound completion composer path (not supported).
+ 0x6 = ADM TX path.
+ 0x7 = Layer 3 TX path.
+ 0x8 = Layer 2 TX path.
+ 0x9 = DMA outbound path (not supported).
+ 0xA = AXI bridge outbound request path (not supported).
+ 0xB = AXI bridge outbound master completion buffer path (not supported).
+ 0xC - 0xF = Reserved. */
+ uint32_t ucorr_cnt_sel : 8; /**< [ 31: 24](RO/H) Counter selection. Returns the value set in PCIERC_RASDP_UCE_CTL[UCORR_CNT_SEL]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rasdp_uce_rp_s cn; */
+};
+typedef union bdk_pciercx_rasdp_uce_rp bdk_pciercx_rasdp_uce_rp_t;
+
+static inline uint64_t BDK_PCIERCX_RASDP_UCE_RP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RASDP_UCE_RP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x430ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RASDP_UCE_RP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RASDP_UCE_RP(a) bdk_pciercx_rasdp_uce_rp_t
+#define bustype_BDK_PCIERCX_RASDP_UCE_RP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RASDP_UCE_RP(a) "PCIERCX_RASDP_UCE_RP"
+#define busnum_BDK_PCIERCX_RASDP_UCE_RP(a) (a)
+#define arguments_BDK_PCIERCX_RASDP_UCE_RP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rev
+ *
+ * PCIe RC Class Code/Revision ID Register
+ */
+union bdk_pciercx_rev
+{
+ uint32_t u;
+ struct bdk_pciercx_rev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bcc : 8; /**< [ 31: 24](RO/WRSL) Base class code, writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+ 0x6 = Bridge. */
+ uint32_t sc : 8; /**< [ 23: 16](RO/WRSL) Subclass code, writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+ 0x4 = PCI-to-PCI. */
+ uint32_t pi : 8; /**< [ 15: 8](RO/WRSL) Programming interface, writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+ 0x0 = No standard interface. */
+ uint32_t rid : 8; /**< [ 7: 0](RO/WRSL) Revision ID, writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+ See FUS_FUSE_NUM_E::CHIP_ID() for more information. */
+#else /* Word 0 - Little Endian */
+ uint32_t rid : 8; /**< [ 7: 0](RO/WRSL) Revision ID, writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+ See FUS_FUSE_NUM_E::CHIP_ID() for more information. */
+ uint32_t pi : 8; /**< [ 15: 8](RO/WRSL) Programming interface, writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+ 0x0 = No standard interface. */
+ uint32_t sc : 8; /**< [ 23: 16](RO/WRSL) Subclass code, writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+ 0x4 = PCI-to-PCI. */
+ uint32_t bcc : 8; /**< [ 31: 24](RO/WRSL) Base class code, writable through PEM()_CFG_WR.
+ However, the application must not change this field.
+ 0x6 = Bridge. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rev_s cn; */
+};
+typedef union bdk_pciercx_rev bdk_pciercx_rev_t;
+
+static inline uint64_t BDK_PCIERCX_REV(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_REV(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_REV", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_REV(a) bdk_pciercx_rev_t
+#define bustype_BDK_PCIERCX_REV(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_REV(a) "PCIERCX_REV"
+#define busnum_BDK_PCIERCX_REV(a) (a)
+#define arguments_BDK_PCIERCX_REV(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_root_ctl_cap
+ *
+ * PCIe RC Root Control/Root Capabilities Register
+ */
+union bdk_pciercx_root_ctl_cap
+{
+ uint32_t u;
+ struct bdk_pciercx_root_ctl_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_17_31 : 15;
+ uint32_t crssv : 1; /**< [ 16: 16](RO) CRS software visibility. Not supported, hardwired to zero. */
+ uint32_t reserved_5_15 : 11;
+ uint32_t crssve : 1; /**< [ 4: 4](RO) CRS software visibility enable. Not supported, hardwired to zero. */
+ uint32_t pmeie : 1; /**< [ 3: 3](R/W) PME interrupt enable. */
+ uint32_t sefee : 1; /**< [ 2: 2](R/W) System error on fatal error enable. */
+ uint32_t senfee : 1; /**< [ 1: 1](R/W) System error on nonfatal error enable. */
+ uint32_t secee : 1; /**< [ 0: 0](R/W) System error on correctable error enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t secee : 1; /**< [ 0: 0](R/W) System error on correctable error enable. */
+ uint32_t senfee : 1; /**< [ 1: 1](R/W) System error on nonfatal error enable. */
+ uint32_t sefee : 1; /**< [ 2: 2](R/W) System error on fatal error enable. */
+ uint32_t pmeie : 1; /**< [ 3: 3](R/W) PME interrupt enable. */
+ uint32_t crssve : 1; /**< [ 4: 4](RO) CRS software visibility enable. Not supported, hardwired to zero. */
+ uint32_t reserved_5_15 : 11;
+ uint32_t crssv : 1; /**< [ 16: 16](RO) CRS software visibility. Not supported, hardwired to zero. */
+ uint32_t reserved_17_31 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_root_ctl_cap_s cn; */
+};
+typedef union bdk_pciercx_root_ctl_cap bdk_pciercx_root_ctl_cap_t;
+
+static inline uint64_t BDK_PCIERCX_ROOT_CTL_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ROOT_CTL_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ROOT_CTL_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ROOT_CTL_CAP(a) bdk_pciercx_root_ctl_cap_t
+#define bustype_BDK_PCIERCX_ROOT_CTL_CAP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ROOT_CTL_CAP(a) "PCIERCX_ROOT_CTL_CAP"
+#define busnum_BDK_PCIERCX_ROOT_CTL_CAP(a) (a)
+#define arguments_BDK_PCIERCX_ROOT_CTL_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_root_err_cmd
+ *
+ * PCIe RC Root Error Command Register
+ */
+union bdk_pciercx_root_err_cmd
+{
+ uint32_t u;
+ struct bdk_pciercx_root_err_cmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t fere : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. */
+ uint32_t nfere : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. */
+ uint32_t cere : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t cere : 1; /**< [ 0: 0](R/W) Correctable error reporting enable. */
+ uint32_t nfere : 1; /**< [ 1: 1](R/W) Nonfatal error reporting enable. */
+ uint32_t fere : 1; /**< [ 2: 2](R/W) Fatal error reporting enable. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_root_err_cmd_s cn; */
+};
+typedef union bdk_pciercx_root_err_cmd bdk_pciercx_root_err_cmd_t;
+
+static inline uint64_t BDK_PCIERCX_ROOT_ERR_CMD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ROOT_ERR_CMD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x12cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ROOT_ERR_CMD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ROOT_ERR_CMD(a) bdk_pciercx_root_err_cmd_t
+#define bustype_BDK_PCIERCX_ROOT_ERR_CMD(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ROOT_ERR_CMD(a) "PCIERCX_ROOT_ERR_CMD"
+#define busnum_BDK_PCIERCX_ROOT_ERR_CMD(a) (a)
+#define arguments_BDK_PCIERCX_ROOT_ERR_CMD(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_root_err_stat
+ *
+ * PCIe RC Root Error Status Register
+ */
+union bdk_pciercx_root_err_stat
+{
+ uint32_t u;
+ struct bdk_pciercx_root_err_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t aeimn : 5; /**< [ 31: 27](RO/WRSL) Advanced error interrupt message number, writable through
+ PEM()_CFG_WR. */
+ uint32_t reserved_7_26 : 20;
+ uint32_t femr : 1; /**< [ 6: 6](R/W1C/H) Fatal error messages received. */
+ uint32_t nfemr : 1; /**< [ 5: 5](R/W1C/H) Nonfatal error messages received. */
+ uint32_t fuf : 1; /**< [ 4: 4](R/W1C/H) First uncorrectable fatal. */
+ uint32_t multi_efnfr : 1; /**< [ 3: 3](R/W1C/H) Multiple ERR_FATAL/NONFATAL received. */
+ uint32_t efnfr : 1; /**< [ 2: 2](R/W1C/H) ERR_FATAL/NONFATAL received. */
+ uint32_t multi_ecr : 1; /**< [ 1: 1](R/W1C/H) Multiple ERR_COR received. */
+ uint32_t ecr : 1; /**< [ 0: 0](R/W1C/H) ERR_COR received. */
+#else /* Word 0 - Little Endian */
+ uint32_t ecr : 1; /**< [ 0: 0](R/W1C/H) ERR_COR received. */
+ uint32_t multi_ecr : 1; /**< [ 1: 1](R/W1C/H) Multiple ERR_COR received. */
+ uint32_t efnfr : 1; /**< [ 2: 2](R/W1C/H) ERR_FATAL/NONFATAL received. */
+ uint32_t multi_efnfr : 1; /**< [ 3: 3](R/W1C/H) Multiple ERR_FATAL/NONFATAL received. */
+ uint32_t fuf : 1; /**< [ 4: 4](R/W1C/H) First uncorrectable fatal. */
+ uint32_t nfemr : 1; /**< [ 5: 5](R/W1C/H) Nonfatal error messages received. */
+ uint32_t femr : 1; /**< [ 6: 6](R/W1C/H) Fatal error messages received. */
+ uint32_t reserved_7_26 : 20;
+ uint32_t aeimn : 5; /**< [ 31: 27](RO/WRSL) Advanced error interrupt message number, writable through
+ PEM()_CFG_WR. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_root_err_stat_s cn; */
+};
+typedef union bdk_pciercx_root_err_stat bdk_pciercx_root_err_stat_t;
+
+static inline uint64_t BDK_PCIERCX_ROOT_ERR_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ROOT_ERR_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x130ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ROOT_ERR_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ROOT_ERR_STAT(a) bdk_pciercx_root_err_stat_t
+#define bustype_BDK_PCIERCX_ROOT_ERR_STAT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ROOT_ERR_STAT(a) "PCIERCX_ROOT_ERR_STAT"
+#define busnum_BDK_PCIERCX_ROOT_ERR_STAT(a) (a)
+#define arguments_BDK_PCIERCX_ROOT_ERR_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_root_stat
+ *
+ * PCIe RC Root Status Register
+ */
+union bdk_pciercx_root_stat
+{
+ uint32_t u;
+ struct bdk_pciercx_root_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_18_31 : 14;
+ uint32_t pme_pend : 1; /**< [ 17: 17](RO) PME pending. */
+ uint32_t pme_stat : 1; /**< [ 16: 16](R/W1C/H) PME status. */
+ uint32_t pme_rid : 16; /**< [ 15: 0](RO) PME requester ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t pme_rid : 16; /**< [ 15: 0](RO) PME requester ID. */
+ uint32_t pme_stat : 1; /**< [ 16: 16](R/W1C/H) PME status. */
+ uint32_t pme_pend : 1; /**< [ 17: 17](RO) PME pending. */
+ uint32_t reserved_18_31 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_root_stat_s cn; */
+};
+typedef union bdk_pciercx_root_stat bdk_pciercx_root_stat_t;
+
+static inline uint64_t BDK_PCIERCX_ROOT_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_ROOT_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x90ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_ROOT_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_ROOT_STAT(a) bdk_pciercx_root_stat_t
+#define bustype_BDK_PCIERCX_ROOT_STAT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_ROOT_STAT(a) "PCIERCX_ROOT_STAT"
+#define busnum_BDK_PCIERCX_ROOT_STAT(a) (a)
+#define arguments_BDK_PCIERCX_ROOT_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_rx_ser_q_ctrl
+ *
+ * PCIe RC Receive Serialization Queue Control Register
+ */
+union bdk_pciercx_rx_ser_q_ctrl
+{
+ uint32_t u;
+ struct bdk_pciercx_rx_ser_q_ctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t qof_prv_en : 1; /**< [ 31: 31](R/W) Enable receive serialization queue overflow prevention. */
+ uint32_t af_thres_sign : 1; /**< [ 30: 30](R/W) Almost full threshold adjustment sign. */
+ uint32_t reserved_28_29 : 2;
+ uint32_t af_thres_val : 12; /**< [ 27: 16](R/W) Almost full threshold adjustment value. */
+ uint32_t af_thres : 16; /**< [ 15: 0](RO) Current almost full threshold. */
+#else /* Word 0 - Little Endian */
+ uint32_t af_thres : 16; /**< [ 15: 0](RO) Current almost full threshold. */
+ uint32_t af_thres_val : 12; /**< [ 27: 16](R/W) Almost full threshold adjustment value. */
+ uint32_t reserved_28_29 : 2;
+ uint32_t af_thres_sign : 1; /**< [ 30: 30](R/W) Almost full threshold adjustment sign. */
+ uint32_t qof_prv_en : 1; /**< [ 31: 31](R/W) Enable receive serialization queue overflow prevention. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_rx_ser_q_ctrl_s cn; */
+};
+typedef union bdk_pciercx_rx_ser_q_ctrl bdk_pciercx_rx_ser_q_ctrl_t;
+
+static inline uint64_t BDK_PCIERCX_RX_SER_Q_CTRL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_RX_SER_Q_CTRL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xc00ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_RX_SER_Q_CTRL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_RX_SER_Q_CTRL(a) bdk_pciercx_rx_ser_q_ctrl_t
+#define bustype_BDK_PCIERCX_RX_SER_Q_CTRL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_RX_SER_Q_CTRL(a) "PCIERCX_RX_SER_Q_CTRL"
+#define busnum_BDK_PCIERCX_RX_SER_Q_CTRL(a) (a)
+#define arguments_BDK_PCIERCX_RX_SER_Q_CTRL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_scap_hdr
+ *
+ * PCIe RC PCI Express Secondary Capability (Gen3) Header Register
+ */
+union bdk_pciercx_scap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_scap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_scap_hdr_s cn; */
+};
+typedef union bdk_pciercx_scap_hdr bdk_pciercx_scap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_SCAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_SCAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x178ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_SCAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_SCAP_HDR(a) bdk_pciercx_scap_hdr_t
+#define bustype_BDK_PCIERCX_SCAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_SCAP_HDR(a) "PCIERCX_SCAP_HDR"
+#define busnum_BDK_PCIERCX_SCAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_SCAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ser_num_1
+ *
+ * PCIe RC Serial Number 1 Register
+ */
+union bdk_pciercx_ser_num_1
+{
+ uint32_t u;
+ struct bdk_pciercx_ser_num_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword1 : 32; /**< [ 31: 0](R/W) IEEE 64-bit device serial number (doubleword 1). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword1 : 32; /**< [ 31: 0](R/W) IEEE 64-bit device serial number (doubleword 1). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ser_num_1_s cn; */
+};
+typedef union bdk_pciercx_ser_num_1 bdk_pciercx_ser_num_1_t;
+
+static inline uint64_t BDK_PCIERCX_SER_NUM_1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_SER_NUM_1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x14cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_SER_NUM_1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_SER_NUM_1(a) bdk_pciercx_ser_num_1_t
+#define bustype_BDK_PCIERCX_SER_NUM_1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_SER_NUM_1(a) "PCIERCX_SER_NUM_1"
+#define busnum_BDK_PCIERCX_SER_NUM_1(a) (a)
+#define arguments_BDK_PCIERCX_SER_NUM_1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ser_num_2
+ *
+ * PCIe RC Serial Number 2 Register
+ */
+union bdk_pciercx_ser_num_2
+{
+ uint32_t u;
+ struct bdk_pciercx_ser_num_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword2 : 32; /**< [ 31: 0](R/W) IEEE 64-bit device serial number (doubleword 2). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword2 : 32; /**< [ 31: 0](R/W) IEEE 64-bit device serial number (doubleword 2). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ser_num_2_s cn; */
+};
+typedef union bdk_pciercx_ser_num_2 bdk_pciercx_ser_num_2_t;
+
+static inline uint64_t BDK_PCIERCX_SER_NUM_2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_SER_NUM_2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x150ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_SER_NUM_2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_SER_NUM_2(a) bdk_pciercx_ser_num_2_t
+#define bustype_BDK_PCIERCX_SER_NUM_2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_SER_NUM_2(a) "PCIERCX_SER_NUM_2"
+#define busnum_BDK_PCIERCX_SER_NUM_2(a) (a)
+#define arguments_BDK_PCIERCX_SER_NUM_2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_slot_cap
+ *
+ * PCIe RC Slot Capabilities Register
+ */
+union bdk_pciercx_slot_cap
+{
+ uint32_t u;
+ struct bdk_pciercx_slot_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ps_num : 13; /**< [ 31: 19](RO/WRSL) Physical slot number, writable through PEM()_CFG_WR. */
+ uint32_t nccs : 1; /**< [ 18: 18](RO/WRSL) No command complete support, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t emip : 1; /**< [ 17: 17](RO/WRSL) Electromechanical interlock present, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t sp_ls : 2; /**< [ 16: 15](RO/WRSL) Slot power limit scale, writable through PEM()_CFG_WR. */
+ uint32_t sp_lv : 8; /**< [ 14: 7](RO/WRSL) Slot power limit value, writable through PEM()_CFG_WR. */
+ uint32_t hp_c : 1; /**< [ 6: 6](RO/WRSL) Hot plug capable, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t hp_s : 1; /**< [ 5: 5](RO/WRSL) Hot plug surprise, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t pip : 1; /**< [ 4: 4](RO/WRSL) Power indicator present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t aip : 1; /**< [ 3: 3](RO/WRSL) Attention indicator present, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t mrlsp : 1; /**< [ 2: 2](RO/WRSL) MRL sensor present, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t pcp : 1; /**< [ 1: 1](RO/WRSL) Power controller present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t abp : 1; /**< [ 0: 0](RO/WRSL) Attention button present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t abp : 1; /**< [ 0: 0](RO/WRSL) Attention button present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t pcp : 1; /**< [ 1: 1](RO/WRSL) Power controller present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t mrlsp : 1; /**< [ 2: 2](RO/WRSL) MRL sensor present, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t aip : 1; /**< [ 3: 3](RO/WRSL) Attention indicator present, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t pip : 1; /**< [ 4: 4](RO/WRSL) Power indicator present, writable through PEM()_CFG_WR. However, the application must
+ not change this field. */
+ uint32_t hp_s : 1; /**< [ 5: 5](RO/WRSL) Hot plug surprise, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t hp_c : 1; /**< [ 6: 6](RO/WRSL) Hot plug capable, writable through PEM()_CFG_WR. However, the application must not
+ change this field. */
+ uint32_t sp_lv : 8; /**< [ 14: 7](RO/WRSL) Slot power limit value, writable through PEM()_CFG_WR. */
+ uint32_t sp_ls : 2; /**< [ 16: 15](RO/WRSL) Slot power limit scale, writable through PEM()_CFG_WR. */
+ uint32_t emip : 1; /**< [ 17: 17](RO/WRSL) Electromechanical interlock present, writable through PEM()_CFG_WR. However, the
+ application must not change this field. */
+ uint32_t nccs : 1; /**< [ 18: 18](RO/WRSL) No command complete support, writable through PEM()_CFG_WR. However, the application
+ must not change this field. */
+ uint32_t ps_num : 13; /**< [ 31: 19](RO/WRSL) Physical slot number, writable through PEM()_CFG_WR. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_slot_cap_s cn; */
+};
+typedef union bdk_pciercx_slot_cap bdk_pciercx_slot_cap_t;
+
+static inline uint64_t BDK_PCIERCX_SLOT_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_SLOT_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x84ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_SLOT_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_SLOT_CAP(a) bdk_pciercx_slot_cap_t
+#define bustype_BDK_PCIERCX_SLOT_CAP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_SLOT_CAP(a) "PCIERCX_SLOT_CAP"
+#define busnum_BDK_PCIERCX_SLOT_CAP(a) (a)
+#define arguments_BDK_PCIERCX_SLOT_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_slot_cap2
+ *
+ * PCIe RC Slot Capabilities 2 Register
+ */
+union bdk_pciercx_slot_cap2
+{
+ uint32_t u;
+ struct bdk_pciercx_slot_cap2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_slot_cap2_s cn; */
+};
+typedef union bdk_pciercx_slot_cap2 bdk_pciercx_slot_cap2_t;
+
+static inline uint64_t BDK_PCIERCX_SLOT_CAP2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_SLOT_CAP2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xa4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_SLOT_CAP2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_SLOT_CAP2(a) bdk_pciercx_slot_cap2_t
+#define bustype_BDK_PCIERCX_SLOT_CAP2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_SLOT_CAP2(a) "PCIERCX_SLOT_CAP2"
+#define busnum_BDK_PCIERCX_SLOT_CAP2(a) (a)
+#define arguments_BDK_PCIERCX_SLOT_CAP2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_slot_ctl
+ *
+ * PCIe RC Slot Control/Slot Status Register
+ */
+union bdk_pciercx_slot_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_slot_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t dlls_c : 1; /**< [ 24: 24](R/W1C/H) Data link layer state changed. */
+ uint32_t emis : 1; /**< [ 23: 23](RO) Electromechanical interlock status. */
+ uint32_t pds : 1; /**< [ 22: 22](RO/H) Presence detect state. */
+ uint32_t mrlss : 1; /**< [ 21: 21](RO) MRL sensor state. */
+ uint32_t ccint_d : 1; /**< [ 20: 20](R/W1C/H) Command completed. */
+ uint32_t pd_c : 1; /**< [ 19: 19](R/W1C/H) Presence detect changed. */
+ uint32_t mrls_c : 1; /**< [ 18: 18](R/W1C/H) MRL sensor changed. */
+ uint32_t pf_d : 1; /**< [ 17: 17](R/W1C/H) Power fault detected. */
+ uint32_t abp_d : 1; /**< [ 16: 16](R/W1C/H) Attention button pressed. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t dlls_en : 1; /**< [ 12: 12](R/W) Data link layer state changed enable. */
+ uint32_t emic : 1; /**< [ 11: 11](WO) Electromechanical interlock control. */
+ uint32_t pcc : 1; /**< [ 10: 10](R/W) Power controller control. */
+ uint32_t pic : 2; /**< [ 9: 8](R/W) Power indicator control. */
+ uint32_t aic : 2; /**< [ 7: 6](R/W) Attention indicator control. */
+ uint32_t hpint_en : 1; /**< [ 5: 5](R/W) Hot-plug interrupt enable. */
+ uint32_t ccint_en : 1; /**< [ 4: 4](R/W) Command completed interrupt enable. */
+ uint32_t pd_en : 1; /**< [ 3: 3](R/W) Presence detect changed enable. */
+ uint32_t mrls_en : 1; /**< [ 2: 2](R/W) MRL sensor changed enable. */
+ uint32_t pf_en : 1; /**< [ 1: 1](R/W) Power fault detected enable. */
+ uint32_t abp_en : 1; /**< [ 0: 0](R/W) Attention button pressed enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t abp_en : 1; /**< [ 0: 0](R/W) Attention button pressed enable. */
+ uint32_t pf_en : 1; /**< [ 1: 1](R/W) Power fault detected enable. */
+ uint32_t mrls_en : 1; /**< [ 2: 2](R/W) MRL sensor changed enable. */
+ uint32_t pd_en : 1; /**< [ 3: 3](R/W) Presence detect changed enable. */
+ uint32_t ccint_en : 1; /**< [ 4: 4](R/W) Command completed interrupt enable. */
+ uint32_t hpint_en : 1; /**< [ 5: 5](R/W) Hot-plug interrupt enable. */
+ uint32_t aic : 2; /**< [ 7: 6](R/W) Attention indicator control. */
+ uint32_t pic : 2; /**< [ 9: 8](R/W) Power indicator control. */
+ uint32_t pcc : 1; /**< [ 10: 10](R/W) Power controller control. */
+ uint32_t emic : 1; /**< [ 11: 11](WO) Electromechanical interlock control. */
+ uint32_t dlls_en : 1; /**< [ 12: 12](R/W) Data link layer state changed enable. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t abp_d : 1; /**< [ 16: 16](R/W1C/H) Attention button pressed. */
+ uint32_t pf_d : 1; /**< [ 17: 17](R/W1C/H) Power fault detected. */
+ uint32_t mrls_c : 1; /**< [ 18: 18](R/W1C/H) MRL sensor changed. */
+ uint32_t pd_c : 1; /**< [ 19: 19](R/W1C/H) Presence detect changed. */
+ uint32_t ccint_d : 1; /**< [ 20: 20](R/W1C/H) Command completed. */
+ uint32_t mrlss : 1; /**< [ 21: 21](RO) MRL sensor state. */
+ uint32_t pds : 1; /**< [ 22: 22](RO/H) Presence detect state. */
+ uint32_t emis : 1; /**< [ 23: 23](RO) Electromechanical interlock status. */
+ uint32_t dlls_c : 1; /**< [ 24: 24](R/W1C/H) Data link layer state changed. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_slot_ctl_s cn; */
+};
+typedef union bdk_pciercx_slot_ctl bdk_pciercx_slot_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_SLOT_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_SLOT_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x88ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_SLOT_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_SLOT_CTL(a) bdk_pciercx_slot_ctl_t
+#define bustype_BDK_PCIERCX_SLOT_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_SLOT_CTL(a) "PCIERCX_SLOT_CTL"
+#define busnum_BDK_PCIERCX_SLOT_CTL(a) (a)
+#define arguments_BDK_PCIERCX_SLOT_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_slot_ctl_stat2
+ *
+ * PCIe RC Slot Control 2 Register/Slot Status 2 Register
+ */
+union bdk_pciercx_slot_ctl_stat2
+{
+ uint32_t u;
+ struct bdk_pciercx_slot_ctl_stat2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_slot_ctl_stat2_s cn; */
+};
+typedef union bdk_pciercx_slot_ctl_stat2 bdk_pciercx_slot_ctl_stat2_t;
+
+static inline uint64_t BDK_PCIERCX_SLOT_CTL_STAT2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_SLOT_CTL_STAT2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xa8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_SLOT_CTL_STAT2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_SLOT_CTL_STAT2(a) bdk_pciercx_slot_ctl_stat2_t
+#define bustype_BDK_PCIERCX_SLOT_CTL_STAT2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_SLOT_CTL_STAT2(a) "PCIERCX_SLOT_CTL_STAT2"
+#define busnum_BDK_PCIERCX_SLOT_CTL_STAT2(a) (a)
+#define arguments_BDK_PCIERCX_SLOT_CTL_STAT2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_sn_base
+ *
+ * Device Serial Number Extended Capability Header Register
+ * Device Serial Number Extended Capability Header
+ */
+union bdk_pciercx_sn_base
+{
+ uint32_t u;
+ struct bdk_pciercx_sn_base_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_sn_base_s cn; */
+};
+typedef union bdk_pciercx_sn_base bdk_pciercx_sn_base_t;
+
+static inline uint64_t BDK_PCIERCX_SN_BASE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_SN_BASE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x148ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_SN_BASE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_SN_BASE(a) bdk_pciercx_sn_base_t
+#define bustype_BDK_PCIERCX_SN_BASE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_SN_BASE(a) "PCIERCX_SN_BASE"
+#define busnum_BDK_PCIERCX_SN_BASE(a) (a)
+#define arguments_BDK_PCIERCX_SN_BASE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_symb_timer
+ *
+ * PCIe RC Symbol Timer/Filter Mask Register 1
+ */
+union bdk_pciercx_symb_timer
+{
+ uint32_t u;
+ struct bdk_pciercx_symb_timer_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t m_cfg0_filt : 1; /**< [ 31: 31](R/W) Mask filtering of received configuration requests (RC mode only). */
+ uint32_t m_io_filt : 1; /**< [ 30: 30](R/W) Mask filtering of received I/O requests (RC mode only). */
+ uint32_t msg_ctrl : 1; /**< [ 29: 29](R/W) Message control. The application must not change this field. */
+ uint32_t m_cpl_ecrc_filt : 1; /**< [ 28: 28](R/W) Mask ECRC error filtering for completions. */
+ uint32_t m_ecrc_filt : 1; /**< [ 27: 27](R/W) Mask ECRC error filtering. */
+ uint32_t m_cpl_len_err : 1; /**< [ 26: 26](R/W) Mask length mismatch error for received completions. */
+ uint32_t m_cpl_attr_err : 1; /**< [ 25: 25](R/W) Mask attributes mismatch error for received completions. */
+ uint32_t m_cpl_tc_err : 1; /**< [ 24: 24](R/W) Mask traffic class mismatch error for received completions. */
+ uint32_t m_cpl_fun_err : 1; /**< [ 23: 23](R/W) Mask function mismatch error for received completions. */
+ uint32_t m_cpl_rid_err : 1; /**< [ 22: 22](R/W) Mask requester ID mismatch error for received completions. */
+ uint32_t m_cpl_tag_err : 1; /**< [ 21: 21](R/W) Mask tag error rules for received completions. */
+ uint32_t m_lk_filt : 1; /**< [ 20: 20](R/W) Mask locked request filtering. */
+ uint32_t m_cfg1_filt : 1; /**< [ 19: 19](R/W) Mask type 1 configuration request filtering. */
+ uint32_t m_bar_match : 1; /**< [ 18: 18](R/W) Mask BAR match filtering. */
+ uint32_t m_pois_filt : 1; /**< [ 17: 17](R/W) Mask poisoned TLP filtering. */
+ uint32_t m_fun : 1; /**< [ 16: 16](R/W) Mask function. */
+ uint32_t dfcwt : 1; /**< [ 15: 15](R/W) Disable FC watchdog timer. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t skpiv : 11; /**< [ 10: 0](R/W) SKP interval value. The number of symbol times to wait
+ between transmitting SKP ordered sets. Note that the
+ controller actually waits the number of symbol times in this
+ register plus one between transmitting SKP ordered sets.
+
+ This value is not used at Gen3 speed; the skip interval
+ is hardcoded to 370 blocks. */
+#else /* Word 0 - Little Endian */
+ uint32_t skpiv : 11; /**< [ 10: 0](R/W) SKP interval value. The number of symbol times to wait
+ between transmitting SKP ordered sets. Note that the
+ controller actually waits the number of symbol times in this
+ register plus one between transmitting SKP ordered sets.
+
+ This value is not used at Gen3 speed; the skip interval
+ is hardcoded to 370 blocks. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t dfcwt : 1; /**< [ 15: 15](R/W) Disable FC watchdog timer. */
+ uint32_t m_fun : 1; /**< [ 16: 16](R/W) Mask function. */
+ uint32_t m_pois_filt : 1; /**< [ 17: 17](R/W) Mask poisoned TLP filtering. */
+ uint32_t m_bar_match : 1; /**< [ 18: 18](R/W) Mask BAR match filtering. */
+ uint32_t m_cfg1_filt : 1; /**< [ 19: 19](R/W) Mask type 1 configuration request filtering. */
+ uint32_t m_lk_filt : 1; /**< [ 20: 20](R/W) Mask locked request filtering. */
+ uint32_t m_cpl_tag_err : 1; /**< [ 21: 21](R/W) Mask tag error rules for received completions. */
+ uint32_t m_cpl_rid_err : 1; /**< [ 22: 22](R/W) Mask requester ID mismatch error for received completions. */
+ uint32_t m_cpl_fun_err : 1; /**< [ 23: 23](R/W) Mask function mismatch error for received completions. */
+ uint32_t m_cpl_tc_err : 1; /**< [ 24: 24](R/W) Mask traffic class mismatch error for received completions. */
+ uint32_t m_cpl_attr_err : 1; /**< [ 25: 25](R/W) Mask attributes mismatch error for received completions. */
+ uint32_t m_cpl_len_err : 1; /**< [ 26: 26](R/W) Mask length mismatch error for received completions. */
+ uint32_t m_ecrc_filt : 1; /**< [ 27: 27](R/W) Mask ECRC error filtering. */
+ uint32_t m_cpl_ecrc_filt : 1; /**< [ 28: 28](R/W) Mask ECRC error filtering for completions. */
+ uint32_t msg_ctrl : 1; /**< [ 29: 29](R/W) Message control. The application must not change this field. */
+ uint32_t m_io_filt : 1; /**< [ 30: 30](R/W) Mask filtering of received I/O requests (RC mode only). */
+ uint32_t m_cfg0_filt : 1; /**< [ 31: 31](R/W) Mask filtering of received configuration requests (RC mode only). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_symb_timer_s cn; */
+};
+typedef union bdk_pciercx_symb_timer bdk_pciercx_symb_timer_t;
+
+static inline uint64_t BDK_PCIERCX_SYMB_TIMER(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_SYMB_TIMER(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x71cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_SYMB_TIMER", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_SYMB_TIMER(a) bdk_pciercx_symb_timer_t
+#define bustype_BDK_PCIERCX_SYMB_TIMER(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_SYMB_TIMER(a) "PCIERCX_SYMB_TIMER"
+#define busnum_BDK_PCIERCX_SYMB_TIMER(a) (a)
+#define arguments_BDK_PCIERCX_SYMB_TIMER(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_timer_ctl
+ *
+ * PCIe RC PF Timer Control and Max Function Number Register
+ */
+union bdk_pciercx_timer_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_timer_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t flmsf : 2; /**< [ 30: 29](R/W) Fast link timer scaling factor. Sets the scaling factor of
+ LTSSM timer when PCIERC_PORT_CTL[FLM] is set.
+ 0x0 = Scaling factor is 1024 (1 ms is 1 us).
+ 0x1 = Scaling factor is 256 (1 ms is 4 us).
+ 0x2 = Scaling factor is 64 (1 ms is 16 us).
+ 0x3 = Scaling factor is 16 (1 ms is 64 us). */
+ uint32_t updft : 5; /**< [ 28: 24](R/W) Update frequency timer. This is an internally reserved field, do not use. */
+ uint32_t tmanlt : 5; /**< [ 23: 19](R/W) Timer modifier for ACK/NAK latency timer. Increases the timer value for the ACK/NAK
+ latency timer, in increments of 64 clock cycles. */
+ uint32_t tmrt : 5; /**< [ 18: 14](R/W/H) Timer modifier for replay timer. Increases the timer value for the replay timer, in
+ increments of 64 clock cycles. */
+ uint32_t reserved_8_13 : 6;
+ uint32_t mfuncn : 8; /**< [ 7: 0](R/W/H) Max number of functions supported.
+
+ Reset values:
+ _ UPEM: 0xf.
+ _ BPEM: 0x1. */
+#else /* Word 0 - Little Endian */
+ uint32_t mfuncn : 8; /**< [ 7: 0](R/W/H) Max number of functions supported.
+
+ Reset values:
+ _ UPEM: 0xf.
+ _ BPEM: 0x1. */
+ uint32_t reserved_8_13 : 6;
+ uint32_t tmrt : 5; /**< [ 18: 14](R/W/H) Timer modifier for replay timer. Increases the timer value for the replay timer, in
+ increments of 64 clock cycles. */
+ uint32_t tmanlt : 5; /**< [ 23: 19](R/W) Timer modifier for ACK/NAK latency timer. Increases the timer value for the ACK/NAK
+ latency timer, in increments of 64 clock cycles. */
+ uint32_t updft : 5; /**< [ 28: 24](R/W) Update frequency timer. This is an internally reserved field, do not use. */
+ uint32_t flmsf : 2; /**< [ 30: 29](R/W) Fast link timer scaling factor. Sets the scaling factor of
+ LTSSM timer when PCIERC_PORT_CTL[FLM] is set.
+ 0x0 = Scaling factor is 1024 (1 ms is 1 us).
+ 0x1 = Scaling factor is 256 (1 ms is 4 us).
+ 0x2 = Scaling factor is 64 (1 ms is 16 us).
+ 0x3 = Scaling factor is 16 (1 ms is 64 us). */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_timer_ctl_s cn; */
+};
+typedef union bdk_pciercx_timer_ctl bdk_pciercx_timer_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_TIMER_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TIMER_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x718ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TIMER_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TIMER_CTL(a) bdk_pciercx_timer_ctl_t
+#define bustype_BDK_PCIERCX_TIMER_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TIMER_CTL(a) "PCIERCX_TIMER_CTL"
+#define busnum_BDK_PCIERCX_TIMER_CTL(a) (a)
+#define arguments_BDK_PCIERCX_TIMER_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_tlp_plog1
+ *
+ * PCIe RC TLP Prefix Log Register 1
+ * PCIe RC TLP Prefix Log Register 1
+ */
+union bdk_pciercx_tlp_plog1
+{
+ uint32_t u;
+ struct bdk_pciercx_tlp_plog1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword1 : 32; /**< [ 31: 0](RO) TLP Prefix log register (first DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword1 : 32; /**< [ 31: 0](RO) TLP Prefix log register (first DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_tlp_plog1_s cn; */
+};
+typedef union bdk_pciercx_tlp_plog1 bdk_pciercx_tlp_plog1_t;
+
+static inline uint64_t BDK_PCIERCX_TLP_PLOG1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TLP_PLOG1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x138ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TLP_PLOG1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TLP_PLOG1(a) bdk_pciercx_tlp_plog1_t
+#define bustype_BDK_PCIERCX_TLP_PLOG1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TLP_PLOG1(a) "PCIERCX_TLP_PLOG1"
+#define busnum_BDK_PCIERCX_TLP_PLOG1(a) (a)
+#define arguments_BDK_PCIERCX_TLP_PLOG1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_tlp_plog2
+ *
+ * PCIe RC TLP Prefix Log Register 2
+ * PCIe RC TLP Prefix Log Register 2
+ */
+union bdk_pciercx_tlp_plog2
+{
+ uint32_t u;
+ struct bdk_pciercx_tlp_plog2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword2 : 32; /**< [ 31: 0](RO) TLP Prefix log register (second DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword2 : 32; /**< [ 31: 0](RO) TLP Prefix log register (second DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_tlp_plog2_s cn; */
+};
+typedef union bdk_pciercx_tlp_plog2 bdk_pciercx_tlp_plog2_t;
+
+static inline uint64_t BDK_PCIERCX_TLP_PLOG2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TLP_PLOG2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x13cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TLP_PLOG2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TLP_PLOG2(a) bdk_pciercx_tlp_plog2_t
+#define bustype_BDK_PCIERCX_TLP_PLOG2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TLP_PLOG2(a) "PCIERCX_TLP_PLOG2"
+#define busnum_BDK_PCIERCX_TLP_PLOG2(a) (a)
+#define arguments_BDK_PCIERCX_TLP_PLOG2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_tlp_plog3
+ *
+ * PCIe RC TLP Prefix Log Register 3
+ * PCIe RC TLP Prefix Log Register 3
+ */
+union bdk_pciercx_tlp_plog3
+{
+ uint32_t u;
+ struct bdk_pciercx_tlp_plog3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword3 : 32; /**< [ 31: 0](RO) TLP Prefix log register (third DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword3 : 32; /**< [ 31: 0](RO) TLP Prefix log register (third DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_tlp_plog3_s cn; */
+};
+typedef union bdk_pciercx_tlp_plog3 bdk_pciercx_tlp_plog3_t;
+
+static inline uint64_t BDK_PCIERCX_TLP_PLOG3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TLP_PLOG3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x140ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TLP_PLOG3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TLP_PLOG3(a) bdk_pciercx_tlp_plog3_t
+#define bustype_BDK_PCIERCX_TLP_PLOG3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TLP_PLOG3(a) "PCIERCX_TLP_PLOG3"
+#define busnum_BDK_PCIERCX_TLP_PLOG3(a) (a)
+#define arguments_BDK_PCIERCX_TLP_PLOG3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_tlp_plog4
+ *
+ * PCIe RC TLP Prefix Log Register 4
+ * PCIe RC TLP Prefix Log Register 4
+ */
+union bdk_pciercx_tlp_plog4
+{
+ uint32_t u;
+ struct bdk_pciercx_tlp_plog4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dword4 : 32; /**< [ 31: 0](RO) TLP Prefix log register (fourth DWORD). */
+#else /* Word 0 - Little Endian */
+ uint32_t dword4 : 32; /**< [ 31: 0](RO) TLP Prefix log register (fourth DWORD). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_tlp_plog4_s cn; */
+};
+typedef union bdk_pciercx_tlp_plog4 bdk_pciercx_tlp_plog4_t;
+
+static inline uint64_t BDK_PCIERCX_TLP_PLOG4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TLP_PLOG4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x144ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TLP_PLOG4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TLP_PLOG4(a) bdk_pciercx_tlp_plog4_t
+#define bustype_BDK_PCIERCX_TLP_PLOG4(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TLP_PLOG4(a) "PCIERCX_TLP_PLOG4"
+#define busnum_BDK_PCIERCX_TLP_PLOG4(a) (a)
+#define arguments_BDK_PCIERCX_TLP_PLOG4(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_tph_ext_cap_hdr
+ *
+ * PCIe RC TPH Extended Capability Header Register
+ */
+union bdk_pciercx_tph_ext_cap_hdr
+{
+ uint32_t u;
+ struct bdk_pciercx_tph_ext_cap_hdr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 16; /**< [ 15: 0](RO/WRSL) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t cv : 4; /**< [ 19: 16](RO/WRSL) Capability version.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 12; /**< [ 31: 20](RO/WRSL) Next capability offset.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_tph_ext_cap_hdr_s cn; */
+};
+typedef union bdk_pciercx_tph_ext_cap_hdr bdk_pciercx_tph_ext_cap_hdr_t;
+
+static inline uint64_t BDK_PCIERCX_TPH_EXT_CAP_HDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TPH_EXT_CAP_HDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x260ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TPH_EXT_CAP_HDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TPH_EXT_CAP_HDR(a) bdk_pciercx_tph_ext_cap_hdr_t
+#define bustype_BDK_PCIERCX_TPH_EXT_CAP_HDR(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TPH_EXT_CAP_HDR(a) "PCIERCX_TPH_EXT_CAP_HDR"
+#define busnum_BDK_PCIERCX_TPH_EXT_CAP_HDR(a) (a)
+#define arguments_BDK_PCIERCX_TPH_EXT_CAP_HDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_tph_req_cap
+ *
+ * PCIe RC TPH Requestor Capability Register
+ */
+union bdk_pciercx_tph_req_cap
+{
+ uint32_t u;
+ struct bdk_pciercx_tph_req_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t st_tbl_size : 11; /**< [ 26: 16](RO/WRSL) ST table size. */
+ uint32_t reserved_11_15 : 5;
+ uint32_t st_tbl_l1 : 1; /**< [ 10: 10](RO/WRSL) Steering tag table bit 1. */
+ uint32_t st_tbl_l0 : 1; /**< [ 9: 9](RO/WRSL) Steering tag table bit 0. */
+ uint32_t ext : 1; /**< [ 8: 8](RO/WRSL) Extended TPH requester supported.
+ This field is writable through PEM()_CFG_WR. However, Extended TPH requester
+ is not supported. Therefore, the application must not write any value
+ other than 0x0 to this field. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t dev_sms : 1; /**< [ 2: 2](RO/WRSL) Device specific mode supported. */
+ uint32_t ivms : 1; /**< [ 1: 1](RO/WRSL) Interrupt vector mode supported. */
+ uint32_t nsms : 1; /**< [ 0: 0](RO) No ST mode supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t nsms : 1; /**< [ 0: 0](RO) No ST mode supported. */
+ uint32_t ivms : 1; /**< [ 1: 1](RO/WRSL) Interrupt vector mode supported. */
+ uint32_t dev_sms : 1; /**< [ 2: 2](RO/WRSL) Device specific mode supported. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t ext : 1; /**< [ 8: 8](RO/WRSL) Extended TPH requester supported.
+ This field is writable through PEM()_CFG_WR. However, Extended TPH requester
+ is not supported. Therefore, the application must not write any value
+ other than 0x0 to this field. */
+ uint32_t st_tbl_l0 : 1; /**< [ 9: 9](RO/WRSL) Steering tag table bit 0. */
+ uint32_t st_tbl_l1 : 1; /**< [ 10: 10](RO/WRSL) Steering tag table bit 1. */
+ uint32_t reserved_11_15 : 5;
+ uint32_t st_tbl_size : 11; /**< [ 26: 16](RO/WRSL) ST table size. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_tph_req_cap_s cn; */
+};
+typedef union bdk_pciercx_tph_req_cap bdk_pciercx_tph_req_cap_t;
+
+static inline uint64_t BDK_PCIERCX_TPH_REQ_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TPH_REQ_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x264ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TPH_REQ_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TPH_REQ_CAP(a) bdk_pciercx_tph_req_cap_t
+#define bustype_BDK_PCIERCX_TPH_REQ_CAP(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TPH_REQ_CAP(a) "PCIERCX_TPH_REQ_CAP"
+#define busnum_BDK_PCIERCX_TPH_REQ_CAP(a) (a)
+#define arguments_BDK_PCIERCX_TPH_REQ_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_tph_req_ctl
+ *
+ * PCIe RC TPH Requestor Control Register
+ */
+union bdk_pciercx_tph_req_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_tph_req_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t en : 2; /**< [ 9: 8](R/W) TPH requestor enable. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t msel : 3; /**< [ 2: 0](R/W) ST mode select. */
+#else /* Word 0 - Little Endian */
+ uint32_t msel : 3; /**< [ 2: 0](R/W) ST mode select. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t en : 2; /**< [ 9: 8](R/W) TPH requestor enable. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_tph_req_ctl_s cn; */
+};
+typedef union bdk_pciercx_tph_req_ctl bdk_pciercx_tph_req_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_TPH_REQ_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TPH_REQ_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x268ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TPH_REQ_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TPH_REQ_CTL(a) bdk_pciercx_tph_req_ctl_t
+#define bustype_BDK_PCIERCX_TPH_REQ_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TPH_REQ_CTL(a) "PCIERCX_TPH_REQ_CTL"
+#define busnum_BDK_PCIERCX_TPH_REQ_CTL(a) (a)
+#define arguments_BDK_PCIERCX_TPH_REQ_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_tph_st_table0
+ *
+ * PCIe RC TPH St Table Register 0
+ */
+union bdk_pciercx_tph_st_table0
+{
+ uint32_t u;
+ struct bdk_pciercx_tph_st_table0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t ubyte : 8; /**< [ 15: 8](RO) ST table 0 upper byte. */
+ uint32_t lbyte : 8; /**< [ 7: 0](RO) ST table 0 lower byte. */
+#else /* Word 0 - Little Endian */
+ uint32_t lbyte : 8; /**< [ 7: 0](RO) ST table 0 lower byte. */
+ uint32_t ubyte : 8; /**< [ 15: 8](RO) ST table 0 upper byte. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_tph_st_table0_s cn; */
+};
+typedef union bdk_pciercx_tph_st_table0 bdk_pciercx_tph_st_table0_t;
+
+static inline uint64_t BDK_PCIERCX_TPH_ST_TABLE0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TPH_ST_TABLE0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x26cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TPH_ST_TABLE0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TPH_ST_TABLE0(a) bdk_pciercx_tph_st_table0_t
+#define bustype_BDK_PCIERCX_TPH_ST_TABLE0(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TPH_ST_TABLE0(a) "PCIERCX_TPH_ST_TABLE0"
+#define busnum_BDK_PCIERCX_TPH_ST_TABLE0(a) (a)
+#define arguments_BDK_PCIERCX_TPH_ST_TABLE0(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_trgt_cpl_lut_del_ent
+ *
+ * PCIe RC TRGT_CPL_LUT Delete Entry Control Register
+ */
+union bdk_pciercx_trgt_cpl_lut_del_ent
+{
+ uint32_t u;
+ struct bdk_pciercx_trgt_cpl_lut_del_ent_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t del_en : 1; /**< [ 31: 31](R/W1C/H) This is a one-shot bit. Writing a one triggers the deletion of the target
+ completion LUT entry that is specified in [LKUP_ID]. This is a self-clearing
+ register field. Reading from this register field always returns a zero. */
+ uint32_t lkup_id : 31; /**< [ 30: 0](R/W) This number selects one entry to delete from the target completion LUT. */
+#else /* Word 0 - Little Endian */
+ uint32_t lkup_id : 31; /**< [ 30: 0](R/W) This number selects one entry to delete from the target completion LUT. */
+ uint32_t del_en : 1; /**< [ 31: 31](R/W1C/H) This is a one-shot bit. Writing a one triggers the deletion of the target
+ completion LUT entry that is specified in [LKUP_ID]. This is a self-clearing
+ register field. Reading from this register field always returns a zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_trgt_cpl_lut_del_ent_s cn; */
+};
+typedef union bdk_pciercx_trgt_cpl_lut_del_ent bdk_pciercx_trgt_cpl_lut_del_ent_t;
+
+static inline uint64_t BDK_PCIERCX_TRGT_CPL_LUT_DEL_ENT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TRGT_CPL_LUT_DEL_ENT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8c8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TRGT_CPL_LUT_DEL_ENT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TRGT_CPL_LUT_DEL_ENT(a) bdk_pciercx_trgt_cpl_lut_del_ent_t
+#define bustype_BDK_PCIERCX_TRGT_CPL_LUT_DEL_ENT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TRGT_CPL_LUT_DEL_ENT(a) "PCIERCX_TRGT_CPL_LUT_DEL_ENT"
+#define busnum_BDK_PCIERCX_TRGT_CPL_LUT_DEL_ENT(a) (a)
+#define arguments_BDK_PCIERCX_TRGT_CPL_LUT_DEL_ENT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_trgt_map_ctl
+ *
+ * PCIe RC Programmable Target Map Control Register
+ */
+union bdk_pciercx_trgt_map_ctl
+{
+ uint32_t u;
+ struct bdk_pciercx_trgt_map_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_21_31 : 11;
+ uint32_t map_idx : 5; /**< [ 20: 16](R/W/H) The number of the PF function on which target values are set. This register does
+ not respect the Byte Enable setting. any write will affect all register bits. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t map_vf : 6; /**< [ 12: 7](R/W/H) Target values for each BAR on the VF Function selected by the index number. This register
+ does not respect the Byte Enable setting. any write will affect all register bits. */
+ uint32_t map_rom : 1; /**< [ 6: 6](R/W/H) Target values for the ROM page of the PF Function selected by the index number. This
+ register
+ does not respect the Byte Enable setting. any write will affect all register bits. */
+ uint32_t map_pf : 6; /**< [ 5: 0](R/W/H) Target values for each BAR on the PF Function selected by the index number. This register
+ does not respect the Byte Enable setting. any write will affect all register bits. */
+#else /* Word 0 - Little Endian */
+ uint32_t map_pf : 6; /**< [ 5: 0](R/W/H) Target values for each BAR on the PF Function selected by the index number. This register
+ does not respect the Byte Enable setting. any write will affect all register bits. */
+ uint32_t map_rom : 1; /**< [ 6: 6](R/W/H) Target values for the ROM page of the PF Function selected by the index number. This
+ register
+ does not respect the Byte Enable setting. any write will affect all register bits. */
+ uint32_t map_vf : 6; /**< [ 12: 7](R/W/H) Target values for each BAR on the VF Function selected by the index number. This register
+ does not respect the Byte Enable setting. any write will affect all register bits. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t map_idx : 5; /**< [ 20: 16](R/W/H) The number of the PF function on which target values are set. This register does
+ not respect the Byte Enable setting. any write will affect all register bits. */
+ uint32_t reserved_21_31 : 11;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_trgt_map_ctl_s cn; */
+};
+typedef union bdk_pciercx_trgt_map_ctl bdk_pciercx_trgt_map_ctl_t;
+
+static inline uint64_t BDK_PCIERCX_TRGT_MAP_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_TRGT_MAP_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x81cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_TRGT_MAP_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_TRGT_MAP_CTL(a) bdk_pciercx_trgt_map_ctl_t
+#define bustype_BDK_PCIERCX_TRGT_MAP_CTL(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_TRGT_MAP_CTL(a) "PCIERCX_TRGT_MAP_CTL"
+#define busnum_BDK_PCIERCX_TRGT_MAP_CTL(a) (a)
+#define arguments_BDK_PCIERCX_TRGT_MAP_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ucor_err_msk
+ *
+ * PCIe RC Uncorrectable Error Mask Register
+ */
+union bdk_pciercx_ucor_err_msk
+{
+ uint32_t u;
+ struct bdk_pciercx_ucor_err_msk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbem : 1; /**< [ 25: 25](R/W) TLP prefix blocked error mask. */
+ uint32_t uatombm : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked mask. */
+ uint32_t reserved_23 : 1;
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t avm : 1; /**< [ 21: 21](R/W) ACS violation mask. */
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< [ 5: 5](R/W) Surprise down error mask. Writeable when PCIERC_LINK_CAP[SDERC] is set.
+ When PCIERC_LINK_CAP[SDERC] is clear, will always read as clear. */
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t sdem : 1; /**< [ 5: 5](R/W) Surprise down error mask. Writeable when PCIERC_LINK_CAP[SDERC] is set.
+ When PCIERC_LINK_CAP[SDERC] is clear, will always read as clear. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t avm : 1; /**< [ 21: 21](R/W) ACS violation mask. */
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombm : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked mask. */
+ uint32_t tpbem : 1; /**< [ 25: 25](R/W) TLP prefix blocked error mask. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_ucor_err_msk_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbem : 1; /**< [ 25: 25](R/W) TLP prefix blocked error mask. */
+ uint32_t uatombm : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked mask. */
+ uint32_t reserved_23 : 1;
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t avm : 1; /**< [ 21: 21](R/W) ACS violation mask. */
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdem : 1; /**< [ 5: 5](R/W) Surprise down error mask. Writeable when PCIERC_LINK_CAP[SDERC] is set.
+ When PCIERC_LINK_CAP[SDERC] is clear, will always read as clear. */
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpem : 1; /**< [ 4: 4](R/W) Data link protocol error mask. */
+ uint32_t sdem : 1; /**< [ 5: 5](R/W) Surprise down error mask. Writeable when PCIERC_LINK_CAP[SDERC] is set.
+ When PCIERC_LINK_CAP[SDERC] is clear, will always read as clear. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlpm : 1; /**< [ 12: 12](R/W) Poisoned TLP mask. */
+ uint32_t fcpem : 1; /**< [ 13: 13](R/W) Flow control protocol error mask. */
+ uint32_t ctm : 1; /**< [ 14: 14](R/W) Completion timeout mask. */
+ uint32_t cam : 1; /**< [ 15: 15](R/W) Completer abort mask. */
+ uint32_t ucm : 1; /**< [ 16: 16](R/W) Unexpected completion mask. */
+ uint32_t rom : 1; /**< [ 17: 17](R/W) Receiver overflow mask. */
+ uint32_t mtlpm : 1; /**< [ 18: 18](R/W) Malformed TLP mask. */
+ uint32_t ecrcem : 1; /**< [ 19: 19](R/W) ECRC error mask. */
+ uint32_t urem : 1; /**< [ 20: 20](R/W) Unsupported request error mask. */
+ uint32_t avm : 1; /**< [ 21: 21](R/W) ACS violation mask. */
+ uint32_t uciem : 1; /**< [ 22: 22](R/W) Uncorrectable internal error mask. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombm : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked mask. */
+ uint32_t tpbem : 1; /**< [ 25: 25](R/W) TLP prefix blocked error mask. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_pciercx_ucor_err_msk bdk_pciercx_ucor_err_msk_t;
+
+static inline uint64_t BDK_PCIERCX_UCOR_ERR_MSK(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UCOR_ERR_MSK(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x108ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UCOR_ERR_MSK", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UCOR_ERR_MSK(a) bdk_pciercx_ucor_err_msk_t
+#define bustype_BDK_PCIERCX_UCOR_ERR_MSK(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UCOR_ERR_MSK(a) "PCIERCX_UCOR_ERR_MSK"
+#define busnum_BDK_PCIERCX_UCOR_ERR_MSK(a) (a)
+#define arguments_BDK_PCIERCX_UCOR_ERR_MSK(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ucor_err_sev
+ *
+ * PCIe RC Uncorrectable Error Severity Register
+ */
+union bdk_pciercx_ucor_err_sev
+{
+ uint32_t u;
+ struct bdk_pciercx_ucor_err_sev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](R/W) TLP prefix blocked error severity. */
+ uint32_t uatombs : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked severity. */
+ uint32_t reserved_23 : 1;
+ uint32_t ies : 1; /**< [ 22: 22](R/W) Uncorrectable internal error severity. */
+ uint32_t avs : 1; /**< [ 21: 21](R/W) AVCS violation severity. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](R/W) Surprise down error severity. Writable when PCIERC_LINK_CAP[SDERC] is set.
+ When PCIERC_LINK_CAP[SDERC] is clear, will always read as set. */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t sdes : 1; /**< [ 5: 5](R/W) Surprise down error severity. Writable when PCIERC_LINK_CAP[SDERC] is set.
+ When PCIERC_LINK_CAP[SDERC] is clear, will always read as set. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t avs : 1; /**< [ 21: 21](R/W) AVCS violation severity. */
+ uint32_t ies : 1; /**< [ 22: 22](R/W) Uncorrectable internal error severity. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombs : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked severity. */
+ uint32_t tpbes : 1; /**< [ 25: 25](R/W) TLP prefix blocked error severity. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_ucor_err_sev_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](R/W) TLP prefix blocked error severity. */
+ uint32_t uatombs : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked severity. */
+ uint32_t reserved_23 : 1;
+ uint32_t ies : 1; /**< [ 22: 22](R/W) Uncorrectable internal error severity. */
+ uint32_t avs : 1; /**< [ 21: 21](R/W) AVCS violation severity. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](R/W) Surprise down error severity. Writable when PCIERC_LINK_CAP[SDERC] is set.
+ When PCIERC_LINK_CAP[SDERC] is clear, will always read as set. */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W) Data link protocol error severity. */
+ uint32_t sdes : 1; /**< [ 5: 5](R/W) Surprise down error severity. Writable when PCIERC_LINK_CAP[SDERC] is set.
+ When PCIERC_LINK_CAP[SDERC] is clear, will always read as set. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W) Poisoned TLP severity. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W) Flow control protocol error severity. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W) Completion timeout severity. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W) Completer abort severity. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W) Unexpected completion severity. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W) Receiver overflow severity. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W) Malformed TLP severity. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W) ECRC error severity. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W) Unsupported request error severity. */
+ uint32_t avs : 1; /**< [ 21: 21](R/W) AVCS violation severity. */
+ uint32_t ies : 1; /**< [ 22: 22](R/W) Uncorrectable internal error severity. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombs : 1; /**< [ 24: 24](R/W) Unsupported AtomicOp egress blocked severity. */
+ uint32_t tpbes : 1; /**< [ 25: 25](R/W) TLP prefix blocked error severity. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_pciercx_ucor_err_sev bdk_pciercx_ucor_err_sev_t;
+
+static inline uint64_t BDK_PCIERCX_UCOR_ERR_SEV(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UCOR_ERR_SEV(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x10cll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UCOR_ERR_SEV", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UCOR_ERR_SEV(a) bdk_pciercx_ucor_err_sev_t
+#define bustype_BDK_PCIERCX_UCOR_ERR_SEV(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UCOR_ERR_SEV(a) "PCIERCX_UCOR_ERR_SEV"
+#define busnum_BDK_PCIERCX_UCOR_ERR_SEV(a) (a)
+#define arguments_BDK_PCIERCX_UCOR_ERR_SEV(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ucor_err_stat
+ *
+ * PCIe RC Uncorrectable Error Status Register
+ */
+union bdk_pciercx_ucor_err_stat
+{
+ uint32_t u;
+ struct bdk_pciercx_ucor_err_stat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error status. */
+ uint32_t uatombs : 1; /**< [ 24: 24](RO) Unsupported AtomicOp egress blocked status. */
+ uint32_t reserved_23 : 1;
+ uint32_t ucies : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error status. */
+ uint32_t avs : 1; /**< [ 21: 21](R/W1C) ACS violation status. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W1C/H) Unsupported request error status. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W1C/H) ECRC error status. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W1C/H) Malformed TLP status. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W1C/H) Receiver overflow status. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W1C/H) Unexpected completion status. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W1C/H) Completer abort status. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W1C/H) Completion timeout status. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W1C/H) Flow control protocol error status. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP status. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](R/W1C/H) Surprise link down error status. */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W1C/H) Data link protocol error status. */
+ uint32_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_3 : 4;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W1C/H) Data link protocol error status. */
+ uint32_t sdes : 1; /**< [ 5: 5](R/W1C/H) Surprise link down error status. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP status. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W1C/H) Flow control protocol error status. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W1C/H) Completion timeout status. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W1C/H) Completer abort status. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W1C/H) Unexpected completion status. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W1C/H) Receiver overflow status. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W1C/H) Malformed TLP status. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W1C/H) ECRC error status. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W1C/H) Unsupported request error status. */
+ uint32_t avs : 1; /**< [ 21: 21](R/W1C) ACS violation status. */
+ uint32_t ucies : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error status. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombs : 1; /**< [ 24: 24](RO) Unsupported AtomicOp egress blocked status. */
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error status. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_pciercx_ucor_err_stat_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error status. */
+ uint32_t uatombs : 1; /**< [ 24: 24](RO) Unsupported AtomicOp egress blocked status. */
+ uint32_t reserved_23 : 1;
+ uint32_t ucies : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error status. */
+ uint32_t avs : 1; /**< [ 21: 21](R/W1C) ACS violation status. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W1C/H) Unsupported request error status. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W1C/H) ECRC error status. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W1C/H) Malformed TLP status. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W1C/H) Receiver overflow status. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W1C/H) Unexpected completion status. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W1C/H) Completer abort status. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W1C/H) Completion timeout status. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W1C/H) Flow control protocol error status. */
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP status. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t sdes : 1; /**< [ 5: 5](R/W1C/H) Surprise link down error status. */
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W1C/H) Data link protocol error status. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t reserved_1_3 : 3;
+ uint32_t dlpes : 1; /**< [ 4: 4](R/W1C/H) Data link protocol error status. */
+ uint32_t sdes : 1; /**< [ 5: 5](R/W1C/H) Surprise link down error status. */
+ uint32_t reserved_6_11 : 6;
+ uint32_t ptlps : 1; /**< [ 12: 12](R/W1C/H) Poisoned TLP status. */
+ uint32_t fcpes : 1; /**< [ 13: 13](R/W1C/H) Flow control protocol error status. */
+ uint32_t cts : 1; /**< [ 14: 14](R/W1C/H) Completion timeout status. */
+ uint32_t cas : 1; /**< [ 15: 15](R/W1C/H) Completer abort status. */
+ uint32_t ucs : 1; /**< [ 16: 16](R/W1C/H) Unexpected completion status. */
+ uint32_t ros : 1; /**< [ 17: 17](R/W1C/H) Receiver overflow status. */
+ uint32_t mtlps : 1; /**< [ 18: 18](R/W1C/H) Malformed TLP status. */
+ uint32_t ecrces : 1; /**< [ 19: 19](R/W1C/H) ECRC error status. */
+ uint32_t ures : 1; /**< [ 20: 20](R/W1C/H) Unsupported request error status. */
+ uint32_t avs : 1; /**< [ 21: 21](R/W1C) ACS violation status. */
+ uint32_t ucies : 1; /**< [ 22: 22](R/W1C/H) Uncorrectable internal error status. */
+ uint32_t reserved_23 : 1;
+ uint32_t uatombs : 1; /**< [ 24: 24](RO) Unsupported AtomicOp egress blocked status. */
+ uint32_t tpbes : 1; /**< [ 25: 25](RO) Unsupported TLP prefix blocked error status. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_pciercx_ucor_err_stat bdk_pciercx_ucor_err_stat_t;
+
+static inline uint64_t BDK_PCIERCX_UCOR_ERR_STAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UCOR_ERR_STAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x104ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UCOR_ERR_STAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UCOR_ERR_STAT(a) bdk_pciercx_ucor_err_stat_t
+#define bustype_BDK_PCIERCX_UCOR_ERR_STAT(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UCOR_ERR_STAT(a) "PCIERCX_UCOR_ERR_STAT"
+#define busnum_BDK_PCIERCX_UCOR_ERR_STAT(a) (a)
+#define arguments_BDK_PCIERCX_UCOR_ERR_STAT(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap0
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap0
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap0_s cn; */
+};
+typedef union bdk_pciercx_unused_cap0 bdk_pciercx_unused_cap0_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xbcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP0(a) bdk_pciercx_unused_cap0_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP0(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP0(a) "PCIERCX_UNUSED_CAP0"
+#define busnum_BDK_PCIERCX_UNUSED_CAP0(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP0(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap1
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap1
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap1_s cn; */
+};
+typedef union bdk_pciercx_unused_cap1 bdk_pciercx_unused_cap1_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xc0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP1(a) bdk_pciercx_unused_cap1_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP1(a) "PCIERCX_UNUSED_CAP1"
+#define busnum_BDK_PCIERCX_UNUSED_CAP1(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap10
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap10
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap10_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap10_s cn; */
+};
+typedef union bdk_pciercx_unused_cap10 bdk_pciercx_unused_cap10_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP10(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP10(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xe4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP10", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP10(a) bdk_pciercx_unused_cap10_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP10(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP10(a) "PCIERCX_UNUSED_CAP10"
+#define busnum_BDK_PCIERCX_UNUSED_CAP10(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP10(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap11
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap11
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap11_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap11_s cn; */
+};
+typedef union bdk_pciercx_unused_cap11 bdk_pciercx_unused_cap11_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP11(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP11(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xe8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP11", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP11(a) bdk_pciercx_unused_cap11_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP11(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP11(a) "PCIERCX_UNUSED_CAP11"
+#define busnum_BDK_PCIERCX_UNUSED_CAP11(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP11(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap12
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap12
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap12_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap12_s cn; */
+};
+typedef union bdk_pciercx_unused_cap12 bdk_pciercx_unused_cap12_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP12(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP12(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xecll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP12", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP12(a) bdk_pciercx_unused_cap12_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP12(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP12(a) "PCIERCX_UNUSED_CAP12"
+#define busnum_BDK_PCIERCX_UNUSED_CAP12(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP12(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap13
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap13
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap13_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap13_s cn; */
+};
+typedef union bdk_pciercx_unused_cap13 bdk_pciercx_unused_cap13_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP13(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP13(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xf0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP13", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP13(a) bdk_pciercx_unused_cap13_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP13(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP13(a) "PCIERCX_UNUSED_CAP13"
+#define busnum_BDK_PCIERCX_UNUSED_CAP13(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP13(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap14
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap14
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap14_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap14_s cn; */
+};
+typedef union bdk_pciercx_unused_cap14 bdk_pciercx_unused_cap14_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP14(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP14(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xf4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP14", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP14(a) bdk_pciercx_unused_cap14_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP14(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP14(a) "PCIERCX_UNUSED_CAP14"
+#define busnum_BDK_PCIERCX_UNUSED_CAP14(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP14(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap15
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap15
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap15_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap15_s cn; */
+};
+typedef union bdk_pciercx_unused_cap15 bdk_pciercx_unused_cap15_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP15(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP15(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xf8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP15", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP15(a) bdk_pciercx_unused_cap15_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP15(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP15(a) "PCIERCX_UNUSED_CAP15"
+#define busnum_BDK_PCIERCX_UNUSED_CAP15(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP15(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap16
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap16
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap16_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap16_s cn; */
+};
+typedef union bdk_pciercx_unused_cap16 bdk_pciercx_unused_cap16_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP16(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP16(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xfcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP16", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP16(a) bdk_pciercx_unused_cap16_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP16(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP16(a) "PCIERCX_UNUSED_CAP16"
+#define busnum_BDK_PCIERCX_UNUSED_CAP16(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP16(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap2
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap2
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap2_s cn; */
+};
+typedef union bdk_pciercx_unused_cap2 bdk_pciercx_unused_cap2_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xc4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP2(a) bdk_pciercx_unused_cap2_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP2(a) "PCIERCX_UNUSED_CAP2"
+#define busnum_BDK_PCIERCX_UNUSED_CAP2(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP2(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap3
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap3
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap3_s cn; */
+};
+typedef union bdk_pciercx_unused_cap3 bdk_pciercx_unused_cap3_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xc8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP3(a) bdk_pciercx_unused_cap3_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP3(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP3(a) "PCIERCX_UNUSED_CAP3"
+#define busnum_BDK_PCIERCX_UNUSED_CAP3(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP3(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap4
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap4
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap4_s cn; */
+};
+typedef union bdk_pciercx_unused_cap4 bdk_pciercx_unused_cap4_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xccll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP4(a) bdk_pciercx_unused_cap4_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP4(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP4(a) "PCIERCX_UNUSED_CAP4"
+#define busnum_BDK_PCIERCX_UNUSED_CAP4(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP4(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap7
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap7
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap7_s cn; */
+};
+typedef union bdk_pciercx_unused_cap7 bdk_pciercx_unused_cap7_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP7(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP7(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xd8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP7", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP7(a) bdk_pciercx_unused_cap7_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP7(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP7(a) "PCIERCX_UNUSED_CAP7"
+#define busnum_BDK_PCIERCX_UNUSED_CAP7(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP7(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap8
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap8
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap8_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap8_s cn; */
+};
+typedef union bdk_pciercx_unused_cap8 bdk_pciercx_unused_cap8_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP8(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP8(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xdcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP8", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP8(a) bdk_pciercx_unused_cap8_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP8(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP8(a) "PCIERCX_UNUSED_CAP8"
+#define busnum_BDK_PCIERCX_UNUSED_CAP8(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP8(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_unused_cap9
+ *
+ * PCIe RC Unused Capability Registers
+ */
+union bdk_pciercx_unused_cap9
+{
+ uint32_t u;
+ struct bdk_pciercx_unused_cap9_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t sw_hdr : 32; /**< [ 31: 0](RO/WRSL) Software headers. This configuration area is opaque to PCIERC hardware. It is available
+ for software to add additional configuration capabilities.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_unused_cap9_s cn; */
+};
+typedef union bdk_pciercx_unused_cap9 bdk_pciercx_unused_cap9_t;
+
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP9(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UNUSED_CAP9(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xe0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UNUSED_CAP9", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UNUSED_CAP9(a) bdk_pciercx_unused_cap9_t
+#define bustype_BDK_PCIERCX_UNUSED_CAP9(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UNUSED_CAP9(a) "PCIERCX_UNUSED_CAP9"
+#define busnum_BDK_PCIERCX_UNUSED_CAP9(a) (a)
+#define arguments_BDK_PCIERCX_UNUSED_CAP9(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_upconfig
+ *
+ * PCIe RC UpConfigure Multi-lane Control Register
+ */
+union bdk_pciercx_upconfig
+{
+ uint32_t u;
+ struct bdk_pciercx_upconfig_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t upc_supp : 1; /**< [ 7: 7](R/W) Upconfigure support.
+ The core sends this value to the link upconfigure capability in TS2 ordered
+ sets in Configuration.Complete state. */
+ uint32_t dir_lnk_wdth_chg : 1; /**< [ 6: 6](R/W/H) Directed link width change.
+ The core always moves to configuration state through recovery state
+ when this bit is set.
+
+ If PCIERC_RAS_EINJ_CTL6PE[LTSSM_VAR] is set and PCIERC_LINK_CTL2[HASD]
+ is zero, the core starts upconfigure or autonomous width
+ downsizing (to the [TRGT_LNK_WDTH] value) in the configuration
+ state.
+
+ If [TRGT_LNK_WDTH] is 0x0, the core does not start upconfigure or autonomous
+ width downsizing in the configuration state.
+
+ The core self-clears this field when the core accepts this
+ request. */
+ uint32_t trgt_lnk_wdth : 6; /**< [ 5: 0](R/W/H) Target link width.
+ 0x0 = Core does not start upconfigure or autonomous width downsizing in configuration
+ state.
+ 0x1 = x1.
+ 0x2 = x2.
+ 0x4 = x4.
+ 0x8 = x8.
+ 0x10 = x16.
+ 0x20 = x32 (Not supported). */
+#else /* Word 0 - Little Endian */
+ uint32_t trgt_lnk_wdth : 6; /**< [ 5: 0](R/W/H) Target link width.
+ 0x0 = Core does not start upconfigure or autonomous width downsizing in configuration
+ state.
+ 0x1 = x1.
+ 0x2 = x2.
+ 0x4 = x4.
+ 0x8 = x8.
+ 0x10 = x16.
+ 0x20 = x32 (Not supported). */
+ uint32_t dir_lnk_wdth_chg : 1; /**< [ 6: 6](R/W/H) Directed link width change.
+ The core always moves to configuration state through recovery state
+ when this bit is set.
+
+ If PCIERC_RAS_EINJ_CTL6PE[LTSSM_VAR] is set and PCIERC_LINK_CTL2[HASD]
+ is zero, the core starts upconfigure or autonomous width
+ downsizing (to the [TRGT_LNK_WDTH] value) in the configuration
+ state.
+
+ If [TRGT_LNK_WDTH] is 0x0, the core does not start upconfigure or autonomous
+ width downsizing in the configuration state.
+
+ The core self-clears this field when the core accepts this
+ request. */
+ uint32_t upc_supp : 1; /**< [ 7: 7](R/W) Upconfigure support.
+ The core sends this value to the link upconfigure capability in TS2 ordered
+ sets in Configuration.Complete state. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_upconfig_s cn; */
+};
+typedef union bdk_pciercx_upconfig bdk_pciercx_upconfig_t;
+
+static inline uint64_t BDK_PCIERCX_UPCONFIG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_UPCONFIG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8c0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_UPCONFIG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_UPCONFIG(a) bdk_pciercx_upconfig_t
+#define bustype_BDK_PCIERCX_UPCONFIG(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_UPCONFIG(a) "PCIERCX_UPCONFIG"
+#define busnum_BDK_PCIERCX_UPCONFIG(a) (a)
+#define arguments_BDK_PCIERCX_UPCONFIG(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ver_num
+ *
+ * PCIe RC Controller IIP Release Version Number Register
+ */
+union bdk_pciercx_ver_num
+{
+ uint32_t u;
+ struct bdk_pciercx_ver_num_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vn : 32; /**< [ 31: 0](RO) Version number. Convert hex characters to ASCII to interpret. */
+#else /* Word 0 - Little Endian */
+ uint32_t vn : 32; /**< [ 31: 0](RO) Version number. Convert hex characters to ASCII to interpret. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ver_num_s cn; */
+};
+typedef union bdk_pciercx_ver_num bdk_pciercx_ver_num_t;
+
+static inline uint64_t BDK_PCIERCX_VER_NUM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_VER_NUM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8f8ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_VER_NUM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_VER_NUM(a) bdk_pciercx_ver_num_t
+#define bustype_BDK_PCIERCX_VER_NUM(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_VER_NUM(a) "PCIERCX_VER_NUM"
+#define busnum_BDK_PCIERCX_VER_NUM(a) (a)
+#define arguments_BDK_PCIERCX_VER_NUM(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_ver_type
+ *
+ * PCIe RC Contorller IIP Release Version Type Register
+ */
+union bdk_pciercx_ver_type
+{
+ uint32_t u;
+ struct bdk_pciercx_ver_type_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t vt : 32; /**< [ 31: 0](RO) Version type. Convert hex characters to ASCII to interpret. */
+#else /* Word 0 - Little Endian */
+ uint32_t vt : 32; /**< [ 31: 0](RO) Version type. Convert hex characters to ASCII to interpret. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_ver_type_s cn; */
+};
+typedef union bdk_pciercx_ver_type bdk_pciercx_ver_type_t;
+
+static inline uint64_t BDK_PCIERCX_VER_TYPE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_VER_TYPE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8fcll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_VER_TYPE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_VER_TYPE(a) bdk_pciercx_ver_type_t
+#define bustype_BDK_PCIERCX_VER_TYPE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_VER_TYPE(a) "PCIERCX_VER_TYPE"
+#define busnum_BDK_PCIERCX_VER_TYPE(a) (a)
+#define arguments_BDK_PCIERCX_VER_TYPE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_vpd_base
+ *
+ * PCIe RC PCI Express VPD Control and Capabilities Register
+ * Internal:
+ * All 32 bits are writable through PEM()_CFG_WR, so that software may replace VPD
+ * capability with another desired capablility as a PCIERC_UNUSED_CAP5, (e.g. Enhanced
+ * Allocation) if desired.
+ */
+union bdk_pciercx_vpd_base
+{
+ uint32_t u;
+ struct bdk_pciercx_vpd_base_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t flag : 1; /**< [ 31: 31](R/W) VPD flag. */
+ uint32_t addr : 15; /**< [ 30: 16](R/W) VPD address. */
+ uint32_t nco : 8; /**< [ 15: 8](RO/WRSL) Next capability offset. End of list.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t pcieec : 8; /**< [ 7: 0](RO) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+#else /* Word 0 - Little Endian */
+ uint32_t pcieec : 8; /**< [ 7: 0](RO) PCI Express extended capability.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t nco : 8; /**< [ 15: 8](RO/WRSL) Next capability offset. End of list.
+ Writable through PEM()_CFG_WR. However, the application must not change this field. */
+ uint32_t addr : 15; /**< [ 30: 16](R/W) VPD address. */
+ uint32_t flag : 1; /**< [ 31: 31](R/W) VPD flag. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_vpd_base_s cn; */
+};
+typedef union bdk_pciercx_vpd_base bdk_pciercx_vpd_base_t;
+
+static inline uint64_t BDK_PCIERCX_VPD_BASE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_VPD_BASE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xd0ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_VPD_BASE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_VPD_BASE(a) bdk_pciercx_vpd_base_t
+#define bustype_BDK_PCIERCX_VPD_BASE(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_VPD_BASE(a) "PCIERCX_VPD_BASE"
+#define busnum_BDK_PCIERCX_VPD_BASE(a) (a)
+#define arguments_BDK_PCIERCX_VPD_BASE(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_vpd_data
+ *
+ * PCIe RC PCI Express VPD Data Register
+ * Internal:
+ * All 32 bits are writable through PEM()_CFG_WR, so that software may replace VPD
+ * capability with another desired capablility as a PCIERC_UNUSED_CAP6, (e.g. Enhanced
+ * Allocation) if desired.
+ */
+union bdk_pciercx_vpd_data
+{
+ uint32_t u;
+ struct bdk_pciercx_vpd_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) VPD data. */
+#else /* Word 0 - Little Endian */
+ uint32_t data : 32; /**< [ 31: 0](R/W) VPD data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_vpd_data_s cn; */
+};
+typedef union bdk_pciercx_vpd_data bdk_pciercx_vpd_data_t;
+
+static inline uint64_t BDK_PCIERCX_VPD_DATA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_VPD_DATA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0xd4ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_VPD_DATA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_VPD_DATA(a) bdk_pciercx_vpd_data_t
+#define bustype_BDK_PCIERCX_VPD_DATA(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_VPD_DATA(a) "PCIERCX_VPD_DATA"
+#define busnum_BDK_PCIERCX_VPD_DATA(a) (a)
+#define arguments_BDK_PCIERCX_VPD_DATA(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_xmit_arb1
+ *
+ * PCIe RC VC Transmit Arbitration Register 1
+ */
+union bdk_pciercx_xmit_arb1
+{
+ uint32_t u;
+ struct bdk_pciercx_xmit_arb1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wrr_vc3 : 8; /**< [ 31: 24](RO) WRR weight for VC3. */
+ uint32_t wrr_vc2 : 8; /**< [ 23: 16](RO) WRR weight for VC2. */
+ uint32_t wrr_vc1 : 8; /**< [ 15: 8](RO) WRR weight for VC1. */
+ uint32_t wrr_vc0 : 8; /**< [ 7: 0](RO) WRR weight for VC0. */
+#else /* Word 0 - Little Endian */
+ uint32_t wrr_vc0 : 8; /**< [ 7: 0](RO) WRR weight for VC0. */
+ uint32_t wrr_vc1 : 8; /**< [ 15: 8](RO) WRR weight for VC1. */
+ uint32_t wrr_vc2 : 8; /**< [ 23: 16](RO) WRR weight for VC2. */
+ uint32_t wrr_vc3 : 8; /**< [ 31: 24](RO) WRR weight for VC3. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_xmit_arb1_s cn; */
+};
+typedef union bdk_pciercx_xmit_arb1 bdk_pciercx_xmit_arb1_t;
+
+static inline uint64_t BDK_PCIERCX_XMIT_ARB1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_XMIT_ARB1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x740ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_XMIT_ARB1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_XMIT_ARB1(a) bdk_pciercx_xmit_arb1_t
+#define bustype_BDK_PCIERCX_XMIT_ARB1(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_XMIT_ARB1(a) "PCIERCX_XMIT_ARB1"
+#define busnum_BDK_PCIERCX_XMIT_ARB1(a) (a)
+#define arguments_BDK_PCIERCX_XMIT_ARB1(a) (a),-1,-1,-1
+
+/**
+ * Register (PCICONFIGRC) pcierc#_xmit_arb2
+ *
+ * PCIe RC VC Transmit Arbitration Register 2
+ */
+union bdk_pciercx_xmit_arb2
+{
+ uint32_t u;
+ struct bdk_pciercx_xmit_arb2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wrr_vc7 : 8; /**< [ 31: 24](RO) WRR weight for VC7. */
+ uint32_t wrr_vc6 : 8; /**< [ 23: 16](RO) WRR weight for VC6. */
+ uint32_t wrr_vc5 : 8; /**< [ 15: 8](RO) WRR weight for VC5. */
+ uint32_t wrr_vc4 : 8; /**< [ 7: 0](RO) WRR weight for VC4. */
+#else /* Word 0 - Little Endian */
+ uint32_t wrr_vc4 : 8; /**< [ 7: 0](RO) WRR weight for VC4. */
+ uint32_t wrr_vc5 : 8; /**< [ 15: 8](RO) WRR weight for VC5. */
+ uint32_t wrr_vc6 : 8; /**< [ 23: 16](RO) WRR weight for VC6. */
+ uint32_t wrr_vc7 : 8; /**< [ 31: 24](RO) WRR weight for VC7. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_pciercx_xmit_arb2_s cn; */
+};
+typedef union bdk_pciercx_xmit_arb2 bdk_pciercx_xmit_arb2_t;
+
+static inline uint64_t BDK_PCIERCX_XMIT_ARB2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_PCIERCX_XMIT_ARB2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x744ll + 0x100000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("PCIERCX_XMIT_ARB2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_PCIERCX_XMIT_ARB2(a) bdk_pciercx_xmit_arb2_t
+#define bustype_BDK_PCIERCX_XMIT_ARB2(a) BDK_CSR_TYPE_PCICONFIGRC
+#define basename_BDK_PCIERCX_XMIT_ARB2(a) "PCIERCX_XMIT_ARB2"
+#define busnum_BDK_PCIERCX_XMIT_ARB2(a) (a)
+#define arguments_BDK_PCIERCX_XMIT_ARB2(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_PCIERC_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rst.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rst.h
index 86f0358a96..05c2534b1e 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rst.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rst.h
@@ -41,6 +41,8 @@
* PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <bdk-minimal.h>
+#include <libbdk-arch/bdk-csr.h>
/**
* @file
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rvu.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rvu.h
new file mode 100644
index 0000000000..4a0c33d521
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-rvu.h
@@ -0,0 +1,4167 @@
+#ifndef __BDK_CSRS_RVU_H__
+#define __BDK_CSRS_RVU_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium RVU.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration rvu_af_int_vec_e
+ *
+ * RVU Admin Function Interrupt Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ * Internal:
+ * RVU maintains the state of these vectors internally, and generates GIB
+ * messages for it without accessing the MSI-X table region in LLC/DRAM.
+ */
+#define BDK_RVU_AF_INT_VEC_E_GEN (3)
+#define BDK_RVU_AF_INT_VEC_E_MBOX (4)
+#define BDK_RVU_AF_INT_VEC_E_PFFLR (1)
+#define BDK_RVU_AF_INT_VEC_E_PFME (2)
+#define BDK_RVU_AF_INT_VEC_E_POISON (0)
+
+/**
+ * Enumeration rvu_bar_e
+ *
+ * RVU Base Address Register Enumeration
+ * Enumerates the base address registers.
+ * Internal:
+ * For documentation only.
+ */
+#define BDK_RVU_BAR_E_RVU_PFX_BAR0(a) (0x850000000000ll + 0x1000000000ll * (a))
+#define BDK_RVU_BAR_E_RVU_PFX_BAR0_SIZE 0x200000000ull
+#define BDK_RVU_BAR_E_RVU_PFX_FUNCX_BAR2(a,b) (0x850200000000ll + 0x1000000000ll * (a) + 0x2000000ll * (b))
+#define BDK_RVU_BAR_E_RVU_PFX_FUNCX_BAR2_SIZE 0x2000000ull
+
+/**
+ * Enumeration rvu_block_addr_e
+ *
+ * RVU Block Address Enumeration
+ * Enumerates addressing of RVU resource blocks within each RVU BAR, i.e. values
+ * of RVU_FUNC_ADDR_S[BLOCK] and RVU_AF_ADDR_S[BLOCK].
+ */
+#define BDK_RVU_BLOCK_ADDR_E_CPTX(a) (0xa + (a))
+#define BDK_RVU_BLOCK_ADDR_E_LMT (1)
+#define BDK_RVU_BLOCK_ADDR_E_MSIX (2)
+#define BDK_RVU_BLOCK_ADDR_E_NDCX(a) (0xc + (a))
+#define BDK_RVU_BLOCK_ADDR_E_NIXX(a) (4 + (a))
+#define BDK_RVU_BLOCK_ADDR_E_NPA (3)
+#define BDK_RVU_BLOCK_ADDR_E_NPC (6)
+#define BDK_RVU_BLOCK_ADDR_E_RX(a) (0 + (a))
+#define BDK_RVU_BLOCK_ADDR_E_RVUM (0)
+#define BDK_RVU_BLOCK_ADDR_E_SSO (7)
+#define BDK_RVU_BLOCK_ADDR_E_SSOW (8)
+#define BDK_RVU_BLOCK_ADDR_E_TIM (9)
+
+/**
+ * Enumeration rvu_block_type_e
+ *
+ * RVU Block Type Enumeration
+ * Enumerates values of RVU_PF/RVU_VF_BLOCK_ADDR()_DISC[BTYPE].
+ */
+#define BDK_RVU_BLOCK_TYPE_E_CPT (9)
+#define BDK_RVU_BLOCK_TYPE_E_DDF (0xb)
+#define BDK_RVU_BLOCK_TYPE_E_DFA (0xe)
+#define BDK_RVU_BLOCK_TYPE_E_HNA (0xf)
+#define BDK_RVU_BLOCK_TYPE_E_LMT (2)
+#define BDK_RVU_BLOCK_TYPE_E_MSIX (1)
+#define BDK_RVU_BLOCK_TYPE_E_NDC (0xa)
+#define BDK_RVU_BLOCK_TYPE_E_NIX (3)
+#define BDK_RVU_BLOCK_TYPE_E_NPA (4)
+#define BDK_RVU_BLOCK_TYPE_E_NPC (5)
+#define BDK_RVU_BLOCK_TYPE_E_RAD (0xd)
+#define BDK_RVU_BLOCK_TYPE_E_RVUM (0)
+#define BDK_RVU_BLOCK_TYPE_E_SSO (6)
+#define BDK_RVU_BLOCK_TYPE_E_SSOW (7)
+#define BDK_RVU_BLOCK_TYPE_E_TIM (8)
+#define BDK_RVU_BLOCK_TYPE_E_ZIP (0xc)
+
+/**
+ * Enumeration rvu_bus_lf_e
+ *
+ * INTERNAL: RVU Bus LF Range Enumeration
+ *
+ * Enumerates the LF range for the RVU bus.
+ * Internal:
+ * This is an enum used in csr3 virtual equations.
+ */
+#define BDK_RVU_BUS_LF_E_RVU_BUS_LFX(a) (0 + 0x2000000 * (a))
+
+/**
+ * Enumeration rvu_bus_pf_e
+ *
+ * INTERNAL: RVU Bus PF Range Enumeration
+ *
+ * Enumerates the PF range for the RVU bus.
+ * Internal:
+ * This is an enum used in csr3 virtual equations.
+ */
+#define BDK_RVU_BUS_PF_E_RVU_BUS_PFX(a) (0ll + 0x1000000000ll * (a))
+
+/**
+ * Enumeration rvu_bus_pfvf_e
+ *
+ * INTERNAL: RVU Bus PFVF Range Enumeration
+ *
+ * Enumerates the PF and VF ranges for the RVU bus.
+ * Internal:
+ * This is an enum used in csr3 virtual equations.
+ */
+#define BDK_RVU_BUS_PFVF_E_RVU_BUS_PFX(a) (0 + 0x2000000 * (a))
+#define BDK_RVU_BUS_PFVF_E_RVU_BUS_VFX(a) (0 + 0x2000000 * (a))
+
+/**
+ * Enumeration rvu_busbar_e
+ *
+ * INTERNAL: RVU Bus Base Address Region Enumeration
+ *
+ * Enumerates the base address region for the RVU bus.
+ * Internal:
+ * This is an enum used in csr3 virtual equations.
+ */
+#define BDK_RVU_BUSBAR_E_RVU_BUSBAR0 (0)
+#define BDK_RVU_BUSBAR_E_RVU_BUSBAR2 (0x200000000ll)
+
+/**
+ * Enumeration rvu_busdid_e
+ *
+ * INTERNAL: RVU Bus DID Enumeration
+ *
+ * Enumerates the DID offset for the RVU bus.
+ * Internal:
+ * This is an enum used in csr3 virtual equations.
+ */
+#define BDK_RVU_BUSDID_E_RVU_BUSDID (0x850000000000ll)
+
+/**
+ * Enumeration rvu_ndc_idx_e
+ *
+ * RVU NDC Index Enumeration
+ * Enumerates NDC instances and index of RVU_BLOCK_ADDR_E::NDC().
+ */
+#define BDK_RVU_NDC_IDX_E_NIXX_RX(a) (0 + 4 * (a))
+#define BDK_RVU_NDC_IDX_E_NIXX_TX(a) (1 + 4 * (a))
+#define BDK_RVU_NDC_IDX_E_NPA_UX(a) (2 + 0 * (a))
+
+/**
+ * Enumeration rvu_pf_int_vec_e
+ *
+ * RVU PF Interrupt Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_RVU_PF_INT_VEC_E_AFPF_MBOX (0xc)
+#define BDK_RVU_PF_INT_VEC_E_VFFLRX(a) (0 + (a))
+#define BDK_RVU_PF_INT_VEC_E_VFMEX(a) (4 + (a))
+#define BDK_RVU_PF_INT_VEC_E_VFPF_MBOXX(a) (8 + (a))
+
+/**
+ * Enumeration rvu_vf_int_vec_e
+ *
+ * RVU VF Interrupt Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_RVU_VF_INT_VEC_E_MBOX (0)
+
+/**
+ * Structure rvu_af_addr_s
+ *
+ * RVU Admin Function Register Address Structure
+ * Address format for accessing shared Admin Function (AF) registers in
+ * RVU PF BAR0. These registers may be accessed by all RVU PFs whose
+ * RVU_PRIV_PF()_CFG[AF_ENA] bit is set.
+ */
+union bdk_rvu_af_addr_s
+{
+ uint64_t u;
+ struct bdk_rvu_af_addr_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t block : 5; /**< [ 32: 28] Resource block enumerated by RVU_BLOCK_ADDR_E. */
+ uint64_t addr : 28; /**< [ 27: 0] Register address within [BLOCK]. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 28; /**< [ 27: 0] Register address within [BLOCK]. */
+ uint64_t block : 5; /**< [ 32: 28] Resource block enumerated by RVU_BLOCK_ADDR_E. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_addr_s_s cn; */
+};
+
+/**
+ * Structure rvu_func_addr_s
+ *
+ * RVU Function-unique Address Structure
+ * Address format for accessing function-unique registers in RVU PF/FUNC BAR2.
+ */
+union bdk_rvu_func_addr_s
+{
+ uint32_t u;
+ struct bdk_rvu_func_addr_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t block : 5; /**< [ 24: 20] Resource block enumerated by RVU_BLOCK_ADDR_E. */
+ uint32_t lf_slot : 8; /**< [ 19: 12] Local function slot, or extended register address within the block's LF
+ slot 0, depending on [BLOCK]. */
+ uint32_t addr : 12; /**< [ 11: 0] Register address within the block and LF slot. */
+#else /* Word 0 - Little Endian */
+ uint32_t addr : 12; /**< [ 11: 0] Register address within the block and LF slot. */
+ uint32_t lf_slot : 8; /**< [ 19: 12] Local function slot, or extended register address within the block's LF
+ slot 0, depending on [BLOCK]. */
+ uint32_t block : 5; /**< [ 24: 20] Resource block enumerated by RVU_BLOCK_ADDR_E. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_func_addr_s_s cn; */
+};
+
+/**
+ * Structure rvu_msix_vec_s
+ *
+ * RVU MSI-X Vector Structure
+ * Format of entries in the RVU MSI-X table region in LLC/DRAM. See
+ * RVU_PRIV_PF()_MSIX_CFG.
+ */
+union bdk_rvu_msix_vec_s
+{
+ uint64_t u[2];
+ struct bdk_rvu_msix_vec_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 64; /**< [ 63: 0] IOVA to use for MSI-X delivery of this vector. Bits \<63:53\> are reserved.
+ Bit \<1:0\> are reserved for alignment. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 64; /**< [ 63: 0] IOVA to use for MSI-X delivery of this vector. Bits \<63:53\> are reserved.
+ Bit \<1:0\> are reserved for alignment. */
+#endif /* Word 0 - End */
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 1 - Big Endian */
+ uint64_t reserved_98_127 : 30;
+ uint64_t pend : 1; /**< [ 97: 97] Vector's pending bit in the MSI-X PBA. */
+ uint64_t mask : 1; /**< [ 96: 96] When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t data : 32; /**< [ 95: 64] Data to use for MSI-X delivery of this vector. */
+#else /* Word 1 - Little Endian */
+ uint64_t data : 32; /**< [ 95: 64] Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 96: 96] When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t pend : 1; /**< [ 97: 97] Vector's pending bit in the MSI-X PBA. */
+ uint64_t reserved_98_127 : 30;
+#endif /* Word 1 - End */
+ } s;
+ /* struct bdk_rvu_msix_vec_s_s cn; */
+};
+
+/**
+ * Structure rvu_pf_func_s
+ *
+ * RVU PF Function Identification Structure
+ * Identifies an RVU PF/VF, and format of *_PRIV_LF()_CFG[PF_FUNC] in RVU
+ * resource blocks, e.g. NPA_PRIV_LF()_CFG[PF_FUNC].
+ *
+ * Internal:
+ * Also used for PF/VF identification on inter-coprocessor hardware
+ * interfaces (NPA, SSO, CPT, ...).
+ */
+union bdk_rvu_pf_func_s
+{
+ uint32_t u;
+ struct bdk_rvu_pf_func_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t pf : 6; /**< [ 15: 10] RVU PF number. */
+ uint32_t func : 10; /**< [ 9: 0] Function within [PF]; 0 for the PF itself, else VF number plus 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t func : 10; /**< [ 9: 0] Function within [PF]; 0 for the PF itself, else VF number plus 1. */
+ uint32_t pf : 6; /**< [ 15: 10] RVU PF number. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_func_s_s cn; */
+};
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_afpf#_mbox#
+ *
+ * RVU Admin Function AF/PF Mailbox Registers
+ */
+union bdk_rvu_af_afpfx_mboxx
+{
+ uint64_t u;
+ struct bdk_rvu_af_afpfx_mboxx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Mailbox data. These AF registers access the 16-byte-per-PF PF/AF
+ mailbox. Each corresponding PF may access the same storage using
+ RVU_PF_PFAF_MBOX(). MBOX(0) is typically used for AF to PF
+ signaling, MBOX(1) for PF to AF.
+ Writing RVU_AF_AFPF()_MBOX(0) (but not RVU_PF_PFAF_MBOX(0)) will
+ set the corresponding
+ RVU_PF_INT[MBOX] which if appropriately enabled will send an
+ interrupt to the PF. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Mailbox data. These AF registers access the 16-byte-per-PF PF/AF
+ mailbox. Each corresponding PF may access the same storage using
+ RVU_PF_PFAF_MBOX(). MBOX(0) is typically used for AF to PF
+ signaling, MBOX(1) for PF to AF.
+ Writing RVU_AF_AFPF()_MBOX(0) (but not RVU_PF_PFAF_MBOX(0)) will
+ set the corresponding
+ RVU_PF_INT[MBOX] which if appropriately enabled will send an
+ interrupt to the PF. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_afpfx_mboxx_s cn; */
+};
+typedef union bdk_rvu_af_afpfx_mboxx bdk_rvu_af_afpfx_mboxx_t;
+
+static inline uint64_t BDK_RVU_AF_AFPFX_MBOXX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_AFPFX_MBOXX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=15) && (b<=1)))
+ return 0x850000002000ll + 0x10ll * ((a) & 0xf) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("RVU_AF_AFPFX_MBOXX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_AFPFX_MBOXX(a,b) bdk_rvu_af_afpfx_mboxx_t
+#define bustype_BDK_RVU_AF_AFPFX_MBOXX(a,b) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_AFPFX_MBOXX(a,b) "RVU_AF_AFPFX_MBOXX"
+#define device_bar_BDK_RVU_AF_AFPFX_MBOXX(a,b) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_AFPFX_MBOXX(a,b) (a)
+#define arguments_BDK_RVU_AF_AFPFX_MBOXX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_blk_rst
+ *
+ * RVU Master Admin Function Block Reset Register
+ */
+union bdk_rvu_af_blk_rst
+{
+ uint64_t u;
+ struct bdk_rvu_af_blk_rst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t busy : 1; /**< [ 1: 1](RO/H) When one, RVUM is busy completing reset. No access except the reading of this
+ bit should occur to RVUM until this is clear. */
+ uint64_t rst : 1; /**< [ 0: 0](WO) Write one to reset RVUM, except for privileged AF registers (RVU_PRIV_*).
+ Software must ensure that all RVUM activity is quiesced before writing one. */
+#else /* Word 0 - Little Endian */
+ uint64_t rst : 1; /**< [ 0: 0](WO) Write one to reset RVUM, except for privileged AF registers (RVU_PRIV_*).
+ Software must ensure that all RVUM activity is quiesced before writing one. */
+ uint64_t busy : 1; /**< [ 1: 1](RO/H) When one, RVUM is busy completing reset. No access except the reading of this
+ bit should occur to RVUM until this is clear. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_blk_rst_s cn; */
+};
+typedef union bdk_rvu_af_blk_rst bdk_rvu_af_blk_rst_t;
+
+#define BDK_RVU_AF_BLK_RST BDK_RVU_AF_BLK_RST_FUNC()
+static inline uint64_t BDK_RVU_AF_BLK_RST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_BLK_RST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000030ll;
+ __bdk_csr_fatal("RVU_AF_BLK_RST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_BLK_RST bdk_rvu_af_blk_rst_t
+#define bustype_BDK_RVU_AF_BLK_RST BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_BLK_RST "RVU_AF_BLK_RST"
+#define device_bar_BDK_RVU_AF_BLK_RST 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_BLK_RST 0
+#define arguments_BDK_RVU_AF_BLK_RST -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_eco
+ *
+ * INTERNAL: RVU Admin Function ECO Register
+ */
+union bdk_rvu_af_eco
+{
+ uint64_t u;
+ struct bdk_rvu_af_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) Internal:
+ Reserved for ECO usage. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) Internal:
+ Reserved for ECO usage. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_eco_s cn; */
+};
+typedef union bdk_rvu_af_eco bdk_rvu_af_eco_t;
+
+#define BDK_RVU_AF_ECO BDK_RVU_AF_ECO_FUNC()
+static inline uint64_t BDK_RVU_AF_ECO_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_ECO_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000020ll;
+ __bdk_csr_fatal("RVU_AF_ECO", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_ECO bdk_rvu_af_eco_t
+#define bustype_BDK_RVU_AF_ECO BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_ECO "RVU_AF_ECO"
+#define device_bar_BDK_RVU_AF_ECO 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_ECO 0
+#define arguments_BDK_RVU_AF_ECO -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_gen_int
+ *
+ * RVU Admin Function General Interrupt Register
+ * This register contains General interrupt summary bits.
+ */
+union bdk_rvu_af_gen_int
+{
+ uint64_t u;
+ struct bdk_rvu_af_gen_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t msix_fault : 1; /**< [ 1: 1](R/W1C/H) Received MSIX-X table read response with fault data */
+ uint64_t unmapped : 1; /**< [ 0: 0](R/W1C/H) Received a register read or write request to an unmapped or disabled PF or
+ VF. Specifically:
+ * A PF/VF BAR2 access in a PF whose RVU_PRIV_PF()_CFG[ENA] is
+ clear.
+ * A VF BAR2 access to a VF number that is greater than or equal to the
+ associated PF's RVU_PRIV_PF()_CFG[NVF]. */
+#else /* Word 0 - Little Endian */
+ uint64_t unmapped : 1; /**< [ 0: 0](R/W1C/H) Received a register read or write request to an unmapped or disabled PF or
+ VF. Specifically:
+ * A PF/VF BAR2 access in a PF whose RVU_PRIV_PF()_CFG[ENA] is
+ clear.
+ * A VF BAR2 access to a VF number that is greater than or equal to the
+ associated PF's RVU_PRIV_PF()_CFG[NVF]. */
+ uint64_t msix_fault : 1; /**< [ 1: 1](R/W1C/H) Received MSIX-X table read response with fault data */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_gen_int_s cn; */
+};
+typedef union bdk_rvu_af_gen_int bdk_rvu_af_gen_int_t;
+
+#define BDK_RVU_AF_GEN_INT BDK_RVU_AF_GEN_INT_FUNC()
+static inline uint64_t BDK_RVU_AF_GEN_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_GEN_INT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000120ll;
+ __bdk_csr_fatal("RVU_AF_GEN_INT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_GEN_INT bdk_rvu_af_gen_int_t
+#define bustype_BDK_RVU_AF_GEN_INT BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_GEN_INT "RVU_AF_GEN_INT"
+#define device_bar_BDK_RVU_AF_GEN_INT 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_GEN_INT 0
+#define arguments_BDK_RVU_AF_GEN_INT -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_gen_int_ena_w1c
+ *
+ * RVU Admin Function General Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_rvu_af_gen_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_rvu_af_gen_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t msix_fault : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for RVU_AF_GEN_INT[MSIX_FAULT]. */
+ uint64_t unmapped : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for RVU_AF_GEN_INT[UNMAPPED]. */
+#else /* Word 0 - Little Endian */
+ uint64_t unmapped : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for RVU_AF_GEN_INT[UNMAPPED]. */
+ uint64_t msix_fault : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for RVU_AF_GEN_INT[MSIX_FAULT]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_gen_int_ena_w1c_s cn; */
+};
+typedef union bdk_rvu_af_gen_int_ena_w1c bdk_rvu_af_gen_int_ena_w1c_t;
+
+#define BDK_RVU_AF_GEN_INT_ENA_W1C BDK_RVU_AF_GEN_INT_ENA_W1C_FUNC()
+static inline uint64_t BDK_RVU_AF_GEN_INT_ENA_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_GEN_INT_ENA_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000138ll;
+ __bdk_csr_fatal("RVU_AF_GEN_INT_ENA_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_GEN_INT_ENA_W1C bdk_rvu_af_gen_int_ena_w1c_t
+#define bustype_BDK_RVU_AF_GEN_INT_ENA_W1C BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_GEN_INT_ENA_W1C "RVU_AF_GEN_INT_ENA_W1C"
+#define device_bar_BDK_RVU_AF_GEN_INT_ENA_W1C 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_GEN_INT_ENA_W1C 0
+#define arguments_BDK_RVU_AF_GEN_INT_ENA_W1C -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_gen_int_ena_w1s
+ *
+ * RVU Admin Function General Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_rvu_af_gen_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_gen_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t msix_fault : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for RVU_AF_GEN_INT[MSIX_FAULT]. */
+ uint64_t unmapped : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for RVU_AF_GEN_INT[UNMAPPED]. */
+#else /* Word 0 - Little Endian */
+ uint64_t unmapped : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for RVU_AF_GEN_INT[UNMAPPED]. */
+ uint64_t msix_fault : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for RVU_AF_GEN_INT[MSIX_FAULT]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_gen_int_ena_w1s_s cn; */
+};
+typedef union bdk_rvu_af_gen_int_ena_w1s bdk_rvu_af_gen_int_ena_w1s_t;
+
+#define BDK_RVU_AF_GEN_INT_ENA_W1S BDK_RVU_AF_GEN_INT_ENA_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_GEN_INT_ENA_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_GEN_INT_ENA_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000130ll;
+ __bdk_csr_fatal("RVU_AF_GEN_INT_ENA_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_GEN_INT_ENA_W1S bdk_rvu_af_gen_int_ena_w1s_t
+#define bustype_BDK_RVU_AF_GEN_INT_ENA_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_GEN_INT_ENA_W1S "RVU_AF_GEN_INT_ENA_W1S"
+#define device_bar_BDK_RVU_AF_GEN_INT_ENA_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_GEN_INT_ENA_W1S 0
+#define arguments_BDK_RVU_AF_GEN_INT_ENA_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_gen_int_w1s
+ *
+ * RVU Admin Function General Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_rvu_af_gen_int_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_gen_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t msix_fault : 1; /**< [ 1: 1](R/W1S/H) Reads or sets RVU_AF_GEN_INT[MSIX_FAULT]. */
+ uint64_t unmapped : 1; /**< [ 0: 0](R/W1S/H) Reads or sets RVU_AF_GEN_INT[UNMAPPED]. */
+#else /* Word 0 - Little Endian */
+ uint64_t unmapped : 1; /**< [ 0: 0](R/W1S/H) Reads or sets RVU_AF_GEN_INT[UNMAPPED]. */
+ uint64_t msix_fault : 1; /**< [ 1: 1](R/W1S/H) Reads or sets RVU_AF_GEN_INT[MSIX_FAULT]. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_gen_int_w1s_s cn; */
+};
+typedef union bdk_rvu_af_gen_int_w1s bdk_rvu_af_gen_int_w1s_t;
+
+#define BDK_RVU_AF_GEN_INT_W1S BDK_RVU_AF_GEN_INT_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_GEN_INT_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_GEN_INT_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000128ll;
+ __bdk_csr_fatal("RVU_AF_GEN_INT_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_GEN_INT_W1S bdk_rvu_af_gen_int_w1s_t
+#define bustype_BDK_RVU_AF_GEN_INT_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_GEN_INT_W1S "RVU_AF_GEN_INT_W1S"
+#define device_bar_BDK_RVU_AF_GEN_INT_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_GEN_INT_W1S 0
+#define arguments_BDK_RVU_AF_GEN_INT_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_hwvf_rst
+ *
+ * RVU Admin Function Hardware VF Soft Reset Register
+ */
+union bdk_rvu_af_hwvf_rst
+{
+ uint64_t u;
+ struct bdk_rvu_af_hwvf_rst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t exec : 1; /**< [ 12: 12](R/W1S/H) Execute HWVF soft reset. When software writes a one to set this bit, hardware
+ resets the RVUM resources of the hardware VF selected by [HWVF] and the
+ associated MSI-X table in LLC/DRAM specified by
+ RVU_PRIV_PF()_MSIX_CFG[VF_MSIXT_OFFSET,VF_MSIXT_SIZEM1].
+ Hardware clears this bit when done. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t hwvf : 8; /**< [ 7: 0](R/W) Hardware VF that is reset when [EXEC] is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t hwvf : 8; /**< [ 7: 0](R/W) Hardware VF that is reset when [EXEC] is set. */
+ uint64_t reserved_8_11 : 4;
+ uint64_t exec : 1; /**< [ 12: 12](R/W1S/H) Execute HWVF soft reset. When software writes a one to set this bit, hardware
+ resets the RVUM resources of the hardware VF selected by [HWVF] and the
+ associated MSI-X table in LLC/DRAM specified by
+ RVU_PRIV_PF()_MSIX_CFG[VF_MSIXT_OFFSET,VF_MSIXT_SIZEM1].
+ Hardware clears this bit when done. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_hwvf_rst_s cn; */
+};
+typedef union bdk_rvu_af_hwvf_rst bdk_rvu_af_hwvf_rst_t;
+
+#define BDK_RVU_AF_HWVF_RST BDK_RVU_AF_HWVF_RST_FUNC()
+static inline uint64_t BDK_RVU_AF_HWVF_RST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_HWVF_RST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000002850ll;
+ __bdk_csr_fatal("RVU_AF_HWVF_RST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_HWVF_RST bdk_rvu_af_hwvf_rst_t
+#define bustype_BDK_RVU_AF_HWVF_RST BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_HWVF_RST "RVU_AF_HWVF_RST"
+#define device_bar_BDK_RVU_AF_HWVF_RST 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_HWVF_RST 0
+#define arguments_BDK_RVU_AF_HWVF_RST -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_msixtr_base
+ *
+ * RVU Admin Function MSI-X Table Region Base-Address Register
+ */
+union bdk_rvu_af_msixtr_base
+{
+ uint64_t u;
+ struct bdk_rvu_af_msixtr_base_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 46; /**< [ 52: 7](R/W) Base IOVA of MSI-X table region in LLC/DRAM. IOVA bits \<6:0\> are always zero.
+ See RVU_PRIV_PF()_MSIX_CFG. */
+ uint64_t reserved_0_6 : 7;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_6 : 7;
+ uint64_t addr : 46; /**< [ 52: 7](R/W) Base IOVA of MSI-X table region in LLC/DRAM. IOVA bits \<6:0\> are always zero.
+ See RVU_PRIV_PF()_MSIX_CFG. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_msixtr_base_s cn; */
+};
+typedef union bdk_rvu_af_msixtr_base bdk_rvu_af_msixtr_base_t;
+
+#define BDK_RVU_AF_MSIXTR_BASE BDK_RVU_AF_MSIXTR_BASE_FUNC()
+static inline uint64_t BDK_RVU_AF_MSIXTR_BASE_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_MSIXTR_BASE_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000010ll;
+ __bdk_csr_fatal("RVU_AF_MSIXTR_BASE", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_MSIXTR_BASE bdk_rvu_af_msixtr_base_t
+#define bustype_BDK_RVU_AF_MSIXTR_BASE BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_MSIXTR_BASE "RVU_AF_MSIXTR_BASE"
+#define device_bar_BDK_RVU_AF_MSIXTR_BASE 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_MSIXTR_BASE 0
+#define arguments_BDK_RVU_AF_MSIXTR_BASE -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pf_bar4_addr
+ *
+ * RVU Admin Function PF BAR4 Address Registers
+ */
+union bdk_rvu_af_pf_bar4_addr
+{
+ uint64_t u;
+ struct bdk_rvu_af_pf_bar4_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 48; /**< [ 63: 16](R/W) Programmable base address of up to 16 consecutive 64 KB
+ pages in DRAM (one per PF). May be used as PF/AF mailbox memory in addition to
+ RVU_AF_AFPF()_MBOX()/RVU_PF_PFAF_MBOX().
+ Provides PCC_EA_ENTRY_S[BASEH,BASEL] value advertised by PF BAR4's entry in
+ PCCPF_XXX_EA_ENTRY(). */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t addr : 48; /**< [ 63: 16](R/W) Programmable base address of up to 16 consecutive 64 KB
+ pages in DRAM (one per PF). May be used as PF/AF mailbox memory in addition to
+ RVU_AF_AFPF()_MBOX()/RVU_PF_PFAF_MBOX().
+ Provides PCC_EA_ENTRY_S[BASEH,BASEL] value advertised by PF BAR4's entry in
+ PCCPF_XXX_EA_ENTRY(). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pf_bar4_addr_s cn; */
+};
+typedef union bdk_rvu_af_pf_bar4_addr bdk_rvu_af_pf_bar4_addr_t;
+
+#define BDK_RVU_AF_PF_BAR4_ADDR BDK_RVU_AF_PF_BAR4_ADDR_FUNC()
+static inline uint64_t BDK_RVU_AF_PF_BAR4_ADDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PF_BAR4_ADDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000040ll;
+ __bdk_csr_fatal("RVU_AF_PF_BAR4_ADDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PF_BAR4_ADDR bdk_rvu_af_pf_bar4_addr_t
+#define bustype_BDK_RVU_AF_PF_BAR4_ADDR BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PF_BAR4_ADDR "RVU_AF_PF_BAR4_ADDR"
+#define device_bar_BDK_RVU_AF_PF_BAR4_ADDR 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PF_BAR4_ADDR 0
+#define arguments_BDK_RVU_AF_PF_BAR4_ADDR -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pf_rst
+ *
+ * RVU Admin Function PF Soft Reset Register
+ */
+union bdk_rvu_af_pf_rst
+{
+ uint64_t u;
+ struct bdk_rvu_af_pf_rst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_13_63 : 51;
+ uint64_t exec : 1; /**< [ 12: 12](R/W1S/H) Execute PF soft reset. When software writes a one to set this bit, hardware
+ resets the RVUM resources of the physical function selected by [PF] and the
+ associated MSI-X table in LLC/DRAM specified by
+ RVU_PRIV_PF()_MSIX_CFG[PF_MSIXT_OFFSET,PF_MSIXT_SIZEM1].
+ Hardware clears this bit when done.
+ Note this does not reset HWVFs which are mapped to the PF. */
+ uint64_t reserved_4_11 : 8;
+ uint64_t pf : 4; /**< [ 3: 0](R/W) Physical function that is reset when [EXEC] is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t pf : 4; /**< [ 3: 0](R/W) Physical function that is reset when [EXEC] is set. */
+ uint64_t reserved_4_11 : 8;
+ uint64_t exec : 1; /**< [ 12: 12](R/W1S/H) Execute PF soft reset. When software writes a one to set this bit, hardware
+ resets the RVUM resources of the physical function selected by [PF] and the
+ associated MSI-X table in LLC/DRAM specified by
+ RVU_PRIV_PF()_MSIX_CFG[PF_MSIXT_OFFSET,PF_MSIXT_SIZEM1].
+ Hardware clears this bit when done.
+ Note this does not reset HWVFs which are mapped to the PF. */
+ uint64_t reserved_13_63 : 51;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pf_rst_s cn; */
+};
+typedef union bdk_rvu_af_pf_rst bdk_rvu_af_pf_rst_t;
+
+#define BDK_RVU_AF_PF_RST BDK_RVU_AF_PF_RST_FUNC()
+static inline uint64_t BDK_RVU_AF_PF_RST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PF_RST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000002840ll;
+ __bdk_csr_fatal("RVU_AF_PF_RST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PF_RST bdk_rvu_af_pf_rst_t
+#define bustype_BDK_RVU_AF_PF_RST BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PF_RST "RVU_AF_PF_RST"
+#define device_bar_BDK_RVU_AF_PF_RST 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PF_RST 0
+#define arguments_BDK_RVU_AF_PF_RST -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfaf_mbox_int
+ *
+ * RVU Admin Function PF to AF Mailbox Interrupt Registers
+ */
+union bdk_rvu_af_pfaf_mbox_int
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfaf_mbox_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1C/H) Mailbox interrupt bit per PF.
+ Each bit is set when the PF writes to the corresponding
+ RVU_PF_PFAF_MBOX(1) register. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1C/H) Mailbox interrupt bit per PF.
+ Each bit is set when the PF writes to the corresponding
+ RVU_PF_PFAF_MBOX(1) register. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfaf_mbox_int_s cn; */
+};
+typedef union bdk_rvu_af_pfaf_mbox_int bdk_rvu_af_pfaf_mbox_int_t;
+
+#define BDK_RVU_AF_PFAF_MBOX_INT BDK_RVU_AF_PFAF_MBOX_INT_FUNC()
+static inline uint64_t BDK_RVU_AF_PFAF_MBOX_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFAF_MBOX_INT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000002880ll;
+ __bdk_csr_fatal("RVU_AF_PFAF_MBOX_INT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFAF_MBOX_INT bdk_rvu_af_pfaf_mbox_int_t
+#define bustype_BDK_RVU_AF_PFAF_MBOX_INT BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFAF_MBOX_INT "RVU_AF_PFAF_MBOX_INT"
+#define device_bar_BDK_RVU_AF_PFAF_MBOX_INT 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFAF_MBOX_INT 0
+#define arguments_BDK_RVU_AF_PFAF_MBOX_INT -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfaf_mbox_int_ena_w1c
+ *
+ * RVU Admin Function PF to AF Mailbox Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_rvu_af_pfaf_mbox_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfaf_mbox_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_AF_PFAF_MBOX_INT[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_AF_PFAF_MBOX_INT[MBOX]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfaf_mbox_int_ena_w1c_s cn; */
+};
+typedef union bdk_rvu_af_pfaf_mbox_int_ena_w1c bdk_rvu_af_pfaf_mbox_int_ena_w1c_t;
+
+#define BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1C BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1C_FUNC()
+static inline uint64_t BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000002898ll;
+ __bdk_csr_fatal("RVU_AF_PFAF_MBOX_INT_ENA_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1C bdk_rvu_af_pfaf_mbox_int_ena_w1c_t
+#define bustype_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1C BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1C "RVU_AF_PFAF_MBOX_INT_ENA_W1C"
+#define device_bar_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1C 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1C 0
+#define arguments_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1C -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfaf_mbox_int_ena_w1s
+ *
+ * RVU Admin Function PF to AF Mailbox Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_rvu_af_pfaf_mbox_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfaf_mbox_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_AF_PFAF_MBOX_INT[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_AF_PFAF_MBOX_INT[MBOX]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfaf_mbox_int_ena_w1s_s cn; */
+};
+typedef union bdk_rvu_af_pfaf_mbox_int_ena_w1s bdk_rvu_af_pfaf_mbox_int_ena_w1s_t;
+
+#define BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1S BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000002890ll;
+ __bdk_csr_fatal("RVU_AF_PFAF_MBOX_INT_ENA_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1S bdk_rvu_af_pfaf_mbox_int_ena_w1s_t
+#define bustype_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1S "RVU_AF_PFAF_MBOX_INT_ENA_W1S"
+#define device_bar_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1S 0
+#define arguments_BDK_RVU_AF_PFAF_MBOX_INT_ENA_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfaf_mbox_int_w1s
+ *
+ * RVU Admin Function PF to AF Mailbox Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_rvu_af_pfaf_mbox_int_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfaf_mbox_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_AF_PFAF_MBOX_INT[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_AF_PFAF_MBOX_INT[MBOX]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfaf_mbox_int_w1s_s cn; */
+};
+typedef union bdk_rvu_af_pfaf_mbox_int_w1s bdk_rvu_af_pfaf_mbox_int_w1s_t;
+
+#define BDK_RVU_AF_PFAF_MBOX_INT_W1S BDK_RVU_AF_PFAF_MBOX_INT_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_PFAF_MBOX_INT_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFAF_MBOX_INT_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000002888ll;
+ __bdk_csr_fatal("RVU_AF_PFAF_MBOX_INT_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFAF_MBOX_INT_W1S bdk_rvu_af_pfaf_mbox_int_w1s_t
+#define bustype_BDK_RVU_AF_PFAF_MBOX_INT_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFAF_MBOX_INT_W1S "RVU_AF_PFAF_MBOX_INT_W1S"
+#define device_bar_BDK_RVU_AF_PFAF_MBOX_INT_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFAF_MBOX_INT_W1S 0
+#define arguments_BDK_RVU_AF_PFAF_MBOX_INT_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfflr_int
+ *
+ * RVU Admin Function PF Function Level Reset Interrupt Registers
+ */
+union bdk_rvu_af_pfflr_int
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfflr_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1C/H) FLR interrupt bit per PF.
+
+ If RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, each bit is set along with
+ the corresponding bit in RVU_AF_PFTRPEND when function level reset is
+ initiated for the associated PF, i.e. a one is written to
+ PCCPF_XXX_E_DEV_CTL[BCR_FLR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1C/H) FLR interrupt bit per PF.
+
+ If RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, each bit is set along with
+ the corresponding bit in RVU_AF_PFTRPEND when function level reset is
+ initiated for the associated PF, i.e. a one is written to
+ PCCPF_XXX_E_DEV_CTL[BCR_FLR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfflr_int_s cn; */
+};
+typedef union bdk_rvu_af_pfflr_int bdk_rvu_af_pfflr_int_t;
+
+#define BDK_RVU_AF_PFFLR_INT BDK_RVU_AF_PFFLR_INT_FUNC()
+static inline uint64_t BDK_RVU_AF_PFFLR_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFFLR_INT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8500000028a0ll;
+ __bdk_csr_fatal("RVU_AF_PFFLR_INT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFFLR_INT bdk_rvu_af_pfflr_int_t
+#define bustype_BDK_RVU_AF_PFFLR_INT BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFFLR_INT "RVU_AF_PFFLR_INT"
+#define device_bar_BDK_RVU_AF_PFFLR_INT 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFFLR_INT 0
+#define arguments_BDK_RVU_AF_PFFLR_INT -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfflr_int_ena_w1c
+ *
+ * RVU Admin Function PF Function Level Reset Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_rvu_af_pfflr_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfflr_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_AF_PFFLR_INT[FLR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_AF_PFFLR_INT[FLR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfflr_int_ena_w1c_s cn; */
+};
+typedef union bdk_rvu_af_pfflr_int_ena_w1c bdk_rvu_af_pfflr_int_ena_w1c_t;
+
+#define BDK_RVU_AF_PFFLR_INT_ENA_W1C BDK_RVU_AF_PFFLR_INT_ENA_W1C_FUNC()
+static inline uint64_t BDK_RVU_AF_PFFLR_INT_ENA_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFFLR_INT_ENA_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8500000028b8ll;
+ __bdk_csr_fatal("RVU_AF_PFFLR_INT_ENA_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFFLR_INT_ENA_W1C bdk_rvu_af_pfflr_int_ena_w1c_t
+#define bustype_BDK_RVU_AF_PFFLR_INT_ENA_W1C BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFFLR_INT_ENA_W1C "RVU_AF_PFFLR_INT_ENA_W1C"
+#define device_bar_BDK_RVU_AF_PFFLR_INT_ENA_W1C 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFFLR_INT_ENA_W1C 0
+#define arguments_BDK_RVU_AF_PFFLR_INT_ENA_W1C -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfflr_int_ena_w1s
+ *
+ * RVU Admin Function PF Function Level Reset Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_rvu_af_pfflr_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfflr_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_AF_PFFLR_INT[FLR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_AF_PFFLR_INT[FLR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfflr_int_ena_w1s_s cn; */
+};
+typedef union bdk_rvu_af_pfflr_int_ena_w1s bdk_rvu_af_pfflr_int_ena_w1s_t;
+
+#define BDK_RVU_AF_PFFLR_INT_ENA_W1S BDK_RVU_AF_PFFLR_INT_ENA_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_PFFLR_INT_ENA_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFFLR_INT_ENA_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8500000028b0ll;
+ __bdk_csr_fatal("RVU_AF_PFFLR_INT_ENA_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFFLR_INT_ENA_W1S bdk_rvu_af_pfflr_int_ena_w1s_t
+#define bustype_BDK_RVU_AF_PFFLR_INT_ENA_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFFLR_INT_ENA_W1S "RVU_AF_PFFLR_INT_ENA_W1S"
+#define device_bar_BDK_RVU_AF_PFFLR_INT_ENA_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFFLR_INT_ENA_W1S 0
+#define arguments_BDK_RVU_AF_PFFLR_INT_ENA_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfflr_int_w1s
+ *
+ * RVU Admin Function PF Function Level Reset Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_rvu_af_pfflr_int_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfflr_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_AF_PFFLR_INT[FLR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_AF_PFFLR_INT[FLR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfflr_int_w1s_s cn; */
+};
+typedef union bdk_rvu_af_pfflr_int_w1s bdk_rvu_af_pfflr_int_w1s_t;
+
+#define BDK_RVU_AF_PFFLR_INT_W1S BDK_RVU_AF_PFFLR_INT_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_PFFLR_INT_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFFLR_INT_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8500000028a8ll;
+ __bdk_csr_fatal("RVU_AF_PFFLR_INT_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFFLR_INT_W1S bdk_rvu_af_pfflr_int_w1s_t
+#define bustype_BDK_RVU_AF_PFFLR_INT_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFFLR_INT_W1S "RVU_AF_PFFLR_INT_W1S"
+#define device_bar_BDK_RVU_AF_PFFLR_INT_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFFLR_INT_W1S 0
+#define arguments_BDK_RVU_AF_PFFLR_INT_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfme_int
+ *
+ * RVU Admin Function PF Bus Master Enable Interrupt Registers
+ */
+union bdk_rvu_af_pfme_int
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfme_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1C/H) Master enable interrupt bit per PF.
+ A device-dependent AF driver typically uses these bits to handle state
+ changes to PCCPF_XXX_CMD[ME], which are typically modified by
+ non-device-dependent software only.
+
+ If RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, each bit is set when the
+ corresponding PCCPF_XXX_CMD[ME] bit is either set or cleared for the
+ associated PF. The corresponding bit in RVU_AF_PFME_STATUS returns the
+ current value of PCCPF_XXX_CMD[ME].
+
+ Note that if RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, the corresponding
+ bit in RVU_AF_PFTRPEND is also set when PCCPF_XXX_CMD[ME] is set, but not
+ when PCCPF_XXX_CMD[ME] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1C/H) Master enable interrupt bit per PF.
+ A device-dependent AF driver typically uses these bits to handle state
+ changes to PCCPF_XXX_CMD[ME], which are typically modified by
+ non-device-dependent software only.
+
+ If RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, each bit is set when the
+ corresponding PCCPF_XXX_CMD[ME] bit is either set or cleared for the
+ associated PF. The corresponding bit in RVU_AF_PFME_STATUS returns the
+ current value of PCCPF_XXX_CMD[ME].
+
+ Note that if RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, the corresponding
+ bit in RVU_AF_PFTRPEND is also set when PCCPF_XXX_CMD[ME] is set, but not
+ when PCCPF_XXX_CMD[ME] is cleared. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfme_int_s cn; */
+};
+typedef union bdk_rvu_af_pfme_int bdk_rvu_af_pfme_int_t;
+
+#define BDK_RVU_AF_PFME_INT BDK_RVU_AF_PFME_INT_FUNC()
+static inline uint64_t BDK_RVU_AF_PFME_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFME_INT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8500000028c0ll;
+ __bdk_csr_fatal("RVU_AF_PFME_INT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFME_INT bdk_rvu_af_pfme_int_t
+#define bustype_BDK_RVU_AF_PFME_INT BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFME_INT "RVU_AF_PFME_INT"
+#define device_bar_BDK_RVU_AF_PFME_INT 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFME_INT 0
+#define arguments_BDK_RVU_AF_PFME_INT -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfme_int_ena_w1c
+ *
+ * RVU Admin Function PF Bus Master Enable Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_rvu_af_pfme_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfme_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_AF_PFME_INT[ME]. */
+#else /* Word 0 - Little Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_AF_PFME_INT[ME]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfme_int_ena_w1c_s cn; */
+};
+typedef union bdk_rvu_af_pfme_int_ena_w1c bdk_rvu_af_pfme_int_ena_w1c_t;
+
+#define BDK_RVU_AF_PFME_INT_ENA_W1C BDK_RVU_AF_PFME_INT_ENA_W1C_FUNC()
+static inline uint64_t BDK_RVU_AF_PFME_INT_ENA_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFME_INT_ENA_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8500000028d8ll;
+ __bdk_csr_fatal("RVU_AF_PFME_INT_ENA_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFME_INT_ENA_W1C bdk_rvu_af_pfme_int_ena_w1c_t
+#define bustype_BDK_RVU_AF_PFME_INT_ENA_W1C BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFME_INT_ENA_W1C "RVU_AF_PFME_INT_ENA_W1C"
+#define device_bar_BDK_RVU_AF_PFME_INT_ENA_W1C 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFME_INT_ENA_W1C 0
+#define arguments_BDK_RVU_AF_PFME_INT_ENA_W1C -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfme_int_ena_w1s
+ *
+ * RVU Admin Function PF Bus Master Enable Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_rvu_af_pfme_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfme_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_AF_PFME_INT[ME]. */
+#else /* Word 0 - Little Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_AF_PFME_INT[ME]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfme_int_ena_w1s_s cn; */
+};
+typedef union bdk_rvu_af_pfme_int_ena_w1s bdk_rvu_af_pfme_int_ena_w1s_t;
+
+#define BDK_RVU_AF_PFME_INT_ENA_W1S BDK_RVU_AF_PFME_INT_ENA_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_PFME_INT_ENA_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFME_INT_ENA_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8500000028d0ll;
+ __bdk_csr_fatal("RVU_AF_PFME_INT_ENA_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFME_INT_ENA_W1S bdk_rvu_af_pfme_int_ena_w1s_t
+#define bustype_BDK_RVU_AF_PFME_INT_ENA_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFME_INT_ENA_W1S "RVU_AF_PFME_INT_ENA_W1S"
+#define device_bar_BDK_RVU_AF_PFME_INT_ENA_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFME_INT_ENA_W1S 0
+#define arguments_BDK_RVU_AF_PFME_INT_ENA_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfme_int_w1s
+ *
+ * RVU Admin Function PF Bus Master Enable Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_rvu_af_pfme_int_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfme_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_AF_PFME_INT[ME]. */
+#else /* Word 0 - Little Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_AF_PFME_INT[ME]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfme_int_w1s_s cn; */
+};
+typedef union bdk_rvu_af_pfme_int_w1s bdk_rvu_af_pfme_int_w1s_t;
+
+#define BDK_RVU_AF_PFME_INT_W1S BDK_RVU_AF_PFME_INT_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_PFME_INT_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFME_INT_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x8500000028c8ll;
+ __bdk_csr_fatal("RVU_AF_PFME_INT_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFME_INT_W1S bdk_rvu_af_pfme_int_w1s_t
+#define bustype_BDK_RVU_AF_PFME_INT_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFME_INT_W1S "RVU_AF_PFME_INT_W1S"
+#define device_bar_BDK_RVU_AF_PFME_INT_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFME_INT_W1S 0
+#define arguments_BDK_RVU_AF_PFME_INT_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pfme_status
+ *
+ * RVU Admin Function PF Bus Master Enable Status Registers
+ */
+union bdk_rvu_af_pfme_status
+{
+ uint64_t u;
+ struct bdk_rvu_af_pfme_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t me : 64; /**< [ 63: 0](RO/H) Bus master enable bit per PF. Each bit returns the PF's
+ PCCPF_XXX_CMD[ME] value. */
+#else /* Word 0 - Little Endian */
+ uint64_t me : 64; /**< [ 63: 0](RO/H) Bus master enable bit per PF. Each bit returns the PF's
+ PCCPF_XXX_CMD[ME] value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pfme_status_s cn; */
+};
+typedef union bdk_rvu_af_pfme_status bdk_rvu_af_pfme_status_t;
+
+#define BDK_RVU_AF_PFME_STATUS BDK_RVU_AF_PFME_STATUS_FUNC()
+static inline uint64_t BDK_RVU_AF_PFME_STATUS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFME_STATUS_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000002800ll;
+ __bdk_csr_fatal("RVU_AF_PFME_STATUS", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFME_STATUS bdk_rvu_af_pfme_status_t
+#define bustype_BDK_RVU_AF_PFME_STATUS BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFME_STATUS "RVU_AF_PFME_STATUS"
+#define device_bar_BDK_RVU_AF_PFME_STATUS 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFME_STATUS 0
+#define arguments_BDK_RVU_AF_PFME_STATUS -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pftrpend
+ *
+ * RVU Admin Function PF Transaction Pending Registers
+ */
+union bdk_rvu_af_pftrpend
+{
+ uint64_t u;
+ struct bdk_rvu_af_pftrpend_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trpend : 64; /**< [ 63: 0](R/W1C/H) Transaction pending bit per PF.
+
+ A PF's bit is set when RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set and:
+ * A one is written to the corresponding PCCPF_XXX_E_DEV_CTL[BCR_FLR], or
+ * PCCPF_XXX_CMD[ME] is set or cleared.
+
+ When a PF's bit is set, forces the corresponding
+ PCCPF_XXX_E_DEV_CTL[TRPEND] to be set.
+
+ Software (typically a device-dependent AF driver) can clear the bit by
+ writing a 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t trpend : 64; /**< [ 63: 0](R/W1C/H) Transaction pending bit per PF.
+
+ A PF's bit is set when RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set and:
+ * A one is written to the corresponding PCCPF_XXX_E_DEV_CTL[BCR_FLR], or
+ * PCCPF_XXX_CMD[ME] is set or cleared.
+
+ When a PF's bit is set, forces the corresponding
+ PCCPF_XXX_E_DEV_CTL[TRPEND] to be set.
+
+ Software (typically a device-dependent AF driver) can clear the bit by
+ writing a 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pftrpend_s cn; */
+};
+typedef union bdk_rvu_af_pftrpend bdk_rvu_af_pftrpend_t;
+
+#define BDK_RVU_AF_PFTRPEND BDK_RVU_AF_PFTRPEND_FUNC()
+static inline uint64_t BDK_RVU_AF_PFTRPEND_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFTRPEND_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000002810ll;
+ __bdk_csr_fatal("RVU_AF_PFTRPEND", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFTRPEND bdk_rvu_af_pftrpend_t
+#define bustype_BDK_RVU_AF_PFTRPEND BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFTRPEND "RVU_AF_PFTRPEND"
+#define device_bar_BDK_RVU_AF_PFTRPEND 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFTRPEND 0
+#define arguments_BDK_RVU_AF_PFTRPEND -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_pftrpend_w1s
+ *
+ * RVU Admin Function PF Transaction Pending Set Registers
+ * This register reads or sets bits.
+ */
+union bdk_rvu_af_pftrpend_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_pftrpend_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trpend : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_AF_PFTRPEND[TRPEND]. */
+#else /* Word 0 - Little Endian */
+ uint64_t trpend : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_AF_PFTRPEND[TRPEND]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_pftrpend_w1s_s cn; */
+};
+typedef union bdk_rvu_af_pftrpend_w1s bdk_rvu_af_pftrpend_w1s_t;
+
+#define BDK_RVU_AF_PFTRPEND_W1S BDK_RVU_AF_PFTRPEND_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_PFTRPEND_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_PFTRPEND_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000002820ll;
+ __bdk_csr_fatal("RVU_AF_PFTRPEND_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_PFTRPEND_W1S bdk_rvu_af_pftrpend_w1s_t
+#define bustype_BDK_RVU_AF_PFTRPEND_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_PFTRPEND_W1S "RVU_AF_PFTRPEND_W1S"
+#define device_bar_BDK_RVU_AF_PFTRPEND_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_PFTRPEND_W1S 0
+#define arguments_BDK_RVU_AF_PFTRPEND_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_ras
+ *
+ * RVU Admin Function RAS Interrupt Register
+ * This register is intended for delivery of RAS events to the SCP, so should be
+ * ignored by OS drivers.
+ */
+union bdk_rvu_af_ras
+{
+ uint64_t u;
+ struct bdk_rvu_af_ras_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t msix_poison : 1; /**< [ 0: 0](R/W1C/H) Received MSI-X table read response with poisoned data. */
+#else /* Word 0 - Little Endian */
+ uint64_t msix_poison : 1; /**< [ 0: 0](R/W1C/H) Received MSI-X table read response with poisoned data. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_ras_s cn; */
+};
+typedef union bdk_rvu_af_ras bdk_rvu_af_ras_t;
+
+#define BDK_RVU_AF_RAS BDK_RVU_AF_RAS_FUNC()
+static inline uint64_t BDK_RVU_AF_RAS_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_RAS_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000100ll;
+ __bdk_csr_fatal("RVU_AF_RAS", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_RAS bdk_rvu_af_ras_t
+#define bustype_BDK_RVU_AF_RAS BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_RAS "RVU_AF_RAS"
+#define device_bar_BDK_RVU_AF_RAS 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_RAS 0
+#define arguments_BDK_RVU_AF_RAS -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_ras_ena_w1c
+ *
+ * RVU Admin Function RAS Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_rvu_af_ras_ena_w1c
+{
+ uint64_t u;
+ struct bdk_rvu_af_ras_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t msix_poison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for RVU_AF_RAS[MSIX_POISON]. */
+#else /* Word 0 - Little Endian */
+ uint64_t msix_poison : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for RVU_AF_RAS[MSIX_POISON]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_ras_ena_w1c_s cn; */
+};
+typedef union bdk_rvu_af_ras_ena_w1c bdk_rvu_af_ras_ena_w1c_t;
+
+#define BDK_RVU_AF_RAS_ENA_W1C BDK_RVU_AF_RAS_ENA_W1C_FUNC()
+static inline uint64_t BDK_RVU_AF_RAS_ENA_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_RAS_ENA_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000118ll;
+ __bdk_csr_fatal("RVU_AF_RAS_ENA_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_RAS_ENA_W1C bdk_rvu_af_ras_ena_w1c_t
+#define bustype_BDK_RVU_AF_RAS_ENA_W1C BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_RAS_ENA_W1C "RVU_AF_RAS_ENA_W1C"
+#define device_bar_BDK_RVU_AF_RAS_ENA_W1C 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_RAS_ENA_W1C 0
+#define arguments_BDK_RVU_AF_RAS_ENA_W1C -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_ras_ena_w1s
+ *
+ * RVU Admin Function RAS Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_rvu_af_ras_ena_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_ras_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t msix_poison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for RVU_AF_RAS[MSIX_POISON]. */
+#else /* Word 0 - Little Endian */
+ uint64_t msix_poison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for RVU_AF_RAS[MSIX_POISON]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_ras_ena_w1s_s cn; */
+};
+typedef union bdk_rvu_af_ras_ena_w1s bdk_rvu_af_ras_ena_w1s_t;
+
+#define BDK_RVU_AF_RAS_ENA_W1S BDK_RVU_AF_RAS_ENA_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_RAS_ENA_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_RAS_ENA_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000110ll;
+ __bdk_csr_fatal("RVU_AF_RAS_ENA_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_RAS_ENA_W1S bdk_rvu_af_ras_ena_w1s_t
+#define bustype_BDK_RVU_AF_RAS_ENA_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_RAS_ENA_W1S "RVU_AF_RAS_ENA_W1S"
+#define device_bar_BDK_RVU_AF_RAS_ENA_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_RAS_ENA_W1S 0
+#define arguments_BDK_RVU_AF_RAS_ENA_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_af_ras_w1s
+ *
+ * RVU Admin Function RAS Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_rvu_af_ras_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_af_ras_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t msix_poison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets RVU_AF_RAS[MSIX_POISON]. */
+#else /* Word 0 - Little Endian */
+ uint64_t msix_poison : 1; /**< [ 0: 0](R/W1S/H) Reads or sets RVU_AF_RAS[MSIX_POISON]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_af_ras_w1s_s cn; */
+};
+typedef union bdk_rvu_af_ras_w1s bdk_rvu_af_ras_w1s_t;
+
+#define BDK_RVU_AF_RAS_W1S BDK_RVU_AF_RAS_W1S_FUNC()
+static inline uint64_t BDK_RVU_AF_RAS_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_AF_RAS_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850000000108ll;
+ __bdk_csr_fatal("RVU_AF_RAS_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_AF_RAS_W1S bdk_rvu_af_ras_w1s_t
+#define bustype_BDK_RVU_AF_RAS_W1S BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_AF_RAS_W1S "RVU_AF_RAS_W1S"
+#define device_bar_BDK_RVU_AF_RAS_W1S 0x0 /* BAR0 */
+#define busnum_BDK_RVU_AF_RAS_W1S 0
+#define arguments_BDK_RVU_AF_RAS_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_block_addr#_disc
+ *
+ * RVU PF Block Address Discovery Registers
+ * These registers allow each PF driver to discover block resources that are
+ * provisioned to its PF. The register's block address index is enumerated by
+ * RVU_BLOCK_ADDR_E.
+ */
+union bdk_rvu_pf_block_addrx_disc
+{
+ uint64_t u;
+ struct bdk_rvu_pf_block_addrx_disc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t btype : 8; /**< [ 27: 20](RO/H) Block type enumerated by RVU_BLOCK_TYPE_E. */
+ uint64_t rid : 8; /**< [ 19: 12](RO/H) Revision ID of the block from RVU_PRIV_BLOCK_TYPE()_REV[RID]. */
+ uint64_t imp : 1; /**< [ 11: 11](RO/H) Implemented. When set, a block is present at this block address index as
+ enumerated by RVU_BLOCK_ADDR_E. When clear, a block is not present and the
+ remaining fields in the register are RAZ.
+
+ Internal:
+ Returns zero if the block is implemented but fused out. */
+ uint64_t reserved_9_10 : 2;
+ uint64_t num_lfs : 9; /**< [ 8: 0](RO/H) Number of local functions from the block that are provisioned to the VF/PF.
+ When non-zero, the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in
+ the the block.
+ Returns 0 for block types that do not have local functions, 0 or 1 for
+ single-slot blocks; see RVU_BLOCK_TYPE_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_lfs : 9; /**< [ 8: 0](RO/H) Number of local functions from the block that are provisioned to the VF/PF.
+ When non-zero, the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in
+ the the block.
+ Returns 0 for block types that do not have local functions, 0 or 1 for
+ single-slot blocks; see RVU_BLOCK_TYPE_E. */
+ uint64_t reserved_9_10 : 2;
+ uint64_t imp : 1; /**< [ 11: 11](RO/H) Implemented. When set, a block is present at this block address index as
+ enumerated by RVU_BLOCK_ADDR_E. When clear, a block is not present and the
+ remaining fields in the register are RAZ.
+
+ Internal:
+ Returns zero if the block is implemented but fused out. */
+ uint64_t rid : 8; /**< [ 19: 12](RO/H) Revision ID of the block from RVU_PRIV_BLOCK_TYPE()_REV[RID]. */
+ uint64_t btype : 8; /**< [ 27: 20](RO/H) Block type enumerated by RVU_BLOCK_TYPE_E. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_block_addrx_disc_s cn; */
+};
+typedef union bdk_rvu_pf_block_addrx_disc bdk_rvu_pf_block_addrx_disc_t;
+
+static inline uint64_t BDK_RVU_PF_BLOCK_ADDRX_DISC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_BLOCK_ADDRX_DISC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=31))
+ return 0x850200000200ll + 8ll * ((a) & 0x1f);
+ __bdk_csr_fatal("RVU_PF_BLOCK_ADDRX_DISC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_BLOCK_ADDRX_DISC(a) bdk_rvu_pf_block_addrx_disc_t
+#define bustype_BDK_RVU_PF_BLOCK_ADDRX_DISC(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_BLOCK_ADDRX_DISC(a) "RVU_PF_BLOCK_ADDRX_DISC"
+#define device_bar_BDK_RVU_PF_BLOCK_ADDRX_DISC(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_BLOCK_ADDRX_DISC(a) (a)
+#define arguments_BDK_RVU_PF_BLOCK_ADDRX_DISC(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_int
+ *
+ * RVU PF Interrupt Registers
+ */
+union bdk_rvu_pf_int
+{
+ uint64_t u;
+ struct bdk_rvu_pf_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1C/H) AF to PF mailbox interrupt. Set when RVU_AF_AFPF()_MBOX(0) is written. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1C/H) AF to PF mailbox interrupt. Set when RVU_AF_AFPF()_MBOX(0) is written. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_int_s cn; */
+};
+typedef union bdk_rvu_pf_int bdk_rvu_pf_int_t;
+
+#define BDK_RVU_PF_INT BDK_RVU_PF_INT_FUNC()
+static inline uint64_t BDK_RVU_PF_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_INT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850200000c20ll;
+ __bdk_csr_fatal("RVU_PF_INT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_INT bdk_rvu_pf_int_t
+#define bustype_BDK_RVU_PF_INT BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_INT "RVU_PF_INT"
+#define device_bar_BDK_RVU_PF_INT 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_INT 0
+#define arguments_BDK_RVU_PF_INT -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_int_ena_w1c
+ *
+ * RVU PF Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_rvu_pf_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_rvu_pf_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for RVU_PF_INT[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for RVU_PF_INT[MBOX]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_int_ena_w1c_s cn; */
+};
+typedef union bdk_rvu_pf_int_ena_w1c bdk_rvu_pf_int_ena_w1c_t;
+
+#define BDK_RVU_PF_INT_ENA_W1C BDK_RVU_PF_INT_ENA_W1C_FUNC()
+static inline uint64_t BDK_RVU_PF_INT_ENA_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_INT_ENA_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850200000c38ll;
+ __bdk_csr_fatal("RVU_PF_INT_ENA_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_INT_ENA_W1C bdk_rvu_pf_int_ena_w1c_t
+#define bustype_BDK_RVU_PF_INT_ENA_W1C BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_INT_ENA_W1C "RVU_PF_INT_ENA_W1C"
+#define device_bar_BDK_RVU_PF_INT_ENA_W1C 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_INT_ENA_W1C 0
+#define arguments_BDK_RVU_PF_INT_ENA_W1C -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_int_ena_w1s
+ *
+ * RVU PF Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_rvu_pf_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_pf_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for RVU_PF_INT[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for RVU_PF_INT[MBOX]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_int_ena_w1s_s cn; */
+};
+typedef union bdk_rvu_pf_int_ena_w1s bdk_rvu_pf_int_ena_w1s_t;
+
+#define BDK_RVU_PF_INT_ENA_W1S BDK_RVU_PF_INT_ENA_W1S_FUNC()
+static inline uint64_t BDK_RVU_PF_INT_ENA_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_INT_ENA_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850200000c30ll;
+ __bdk_csr_fatal("RVU_PF_INT_ENA_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_INT_ENA_W1S bdk_rvu_pf_int_ena_w1s_t
+#define bustype_BDK_RVU_PF_INT_ENA_W1S BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_INT_ENA_W1S "RVU_PF_INT_ENA_W1S"
+#define device_bar_BDK_RVU_PF_INT_ENA_W1S 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_INT_ENA_W1S 0
+#define arguments_BDK_RVU_PF_INT_ENA_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_int_w1s
+ *
+ * RVU PF Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_rvu_pf_int_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_pf_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1S/H) Reads or sets RVU_PF_INT[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1S/H) Reads or sets RVU_PF_INT[MBOX]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_int_w1s_s cn; */
+};
+typedef union bdk_rvu_pf_int_w1s bdk_rvu_pf_int_w1s_t;
+
+#define BDK_RVU_PF_INT_W1S BDK_RVU_PF_INT_W1S_FUNC()
+static inline uint64_t BDK_RVU_PF_INT_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_INT_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850200000c28ll;
+ __bdk_csr_fatal("RVU_PF_INT_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_INT_W1S bdk_rvu_pf_int_w1s_t
+#define bustype_BDK_RVU_PF_INT_W1S BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_INT_W1S "RVU_PF_INT_W1S"
+#define device_bar_BDK_RVU_PF_INT_W1S 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_INT_W1S 0
+#define arguments_BDK_RVU_PF_INT_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_msix_pba#
+ *
+ * RVU PF MSI-X Pending-Bit-Array Registers
+ * This register is the MSI-X PF PBA table.
+ */
+union bdk_rvu_pf_msix_pbax
+{
+ uint64_t u;
+ struct bdk_rvu_pf_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message bit for each MSI-X vector, i.e. one bit per
+ RVU_PF_MSIX_VEC()_CTL register.
+ The total number of bits for a given PF (and thus the number of PBA
+ registers) is determined by RVU_PRIV_PF()_MSIX_CFG[VF_MSIXT_SIZEM1]
+ (plus 1). */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message bit for each MSI-X vector, i.e. one bit per
+ RVU_PF_MSIX_VEC()_CTL register.
+ The total number of bits for a given PF (and thus the number of PBA
+ registers) is determined by RVU_PRIV_PF()_MSIX_CFG[VF_MSIXT_SIZEM1]
+ (plus 1). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_msix_pbax_s cn; */
+};
+typedef union bdk_rvu_pf_msix_pbax bdk_rvu_pf_msix_pbax_t;
+
+static inline uint64_t BDK_RVU_PF_MSIX_PBAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_MSIX_PBAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a==0))
+ return 0x8502002f0000ll + 8ll * ((a) & 0x0);
+ __bdk_csr_fatal("RVU_PF_MSIX_PBAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_MSIX_PBAX(a) bdk_rvu_pf_msix_pbax_t
+#define bustype_BDK_RVU_PF_MSIX_PBAX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_MSIX_PBAX(a) "RVU_PF_MSIX_PBAX"
+#define device_bar_BDK_RVU_PF_MSIX_PBAX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_MSIX_PBAX(a) (a)
+#define arguments_BDK_RVU_PF_MSIX_PBAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_msix_vec#_addr
+ *
+ * RVU PF MSI-X Vector-Table Address Registers
+ * These registers and RVU_PF_MSIX_VEC()_CTL form the PF MSI-X vector table.
+ * The number of MSI-X vectors for a given PF is specified by
+ * RVU_PRIV_PF()_MSIX_CFG[PF_MSIXT_SIZEM1] (plus 1).
+ *
+ * Internal:
+ * PF vector count of 256 is sized to allow up to 120 for AF, 4 for PF/VF
+ * mailboxes, and 128 for LF resources from various blocks that are directly
+ * provisioned to the PF.
+ */
+union bdk_rvu_pf_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_rvu_pf_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's RVU_PF_MSIX_VEC()_ADDR, RVU_PF_MSIX_VEC()_CTL, and
+ corresponding bit of RVU_PF_MSIX_PBA() are RAZ/WI and does not cause a
+ fault when accessed by the nonsecure world.
+
+ If PCCPF_RVU_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors of the function are
+ secure as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's RVU_PF_MSIX_VEC()_ADDR, RVU_PF_MSIX_VEC()_CTL, and
+ corresponding bit of RVU_PF_MSIX_PBA() are RAZ/WI and does not cause a
+ fault when accessed by the nonsecure world.
+
+ If PCCPF_RVU_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is set, all vectors of the function are
+ secure as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_msix_vecx_addr_s cn; */
+};
+typedef union bdk_rvu_pf_msix_vecx_addr bdk_rvu_pf_msix_vecx_addr_t;
+
+static inline uint64_t BDK_RVU_PF_MSIX_VECX_ADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_MSIX_VECX_ADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a==0))
+ return 0x850200200000ll + 0x10ll * ((a) & 0x0);
+ __bdk_csr_fatal("RVU_PF_MSIX_VECX_ADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_MSIX_VECX_ADDR(a) bdk_rvu_pf_msix_vecx_addr_t
+#define bustype_BDK_RVU_PF_MSIX_VECX_ADDR(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_MSIX_VECX_ADDR(a) "RVU_PF_MSIX_VECX_ADDR"
+#define device_bar_BDK_RVU_PF_MSIX_VECX_ADDR(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_MSIX_VECX_ADDR(a) (a)
+#define arguments_BDK_RVU_PF_MSIX_VECX_ADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_msix_vec#_ctl
+ *
+ * RVU PF MSI-X Vector-Table Control and Data Registers
+ * These registers and RVU_PF_MSIX_VEC()_ADDR form the PF MSI-X vector table.
+ */
+union bdk_rvu_pf_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_rvu_pf_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_msix_vecx_ctl_s cn; */
+};
+typedef union bdk_rvu_pf_msix_vecx_ctl bdk_rvu_pf_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_RVU_PF_MSIX_VECX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_MSIX_VECX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a==0))
+ return 0x850200200008ll + 0x10ll * ((a) & 0x0);
+ __bdk_csr_fatal("RVU_PF_MSIX_VECX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_MSIX_VECX_CTL(a) bdk_rvu_pf_msix_vecx_ctl_t
+#define bustype_BDK_RVU_PF_MSIX_VECX_CTL(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_MSIX_VECX_CTL(a) "RVU_PF_MSIX_VECX_CTL"
+#define device_bar_BDK_RVU_PF_MSIX_VECX_CTL(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_MSIX_VECX_CTL(a) (a)
+#define arguments_BDK_RVU_PF_MSIX_VECX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_pfaf_mbox#
+ *
+ * RVU PF/AF Mailbox Registers
+ */
+union bdk_rvu_pf_pfaf_mboxx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_pfaf_mboxx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Mailbox data. These PF registers access the 16-byte-per-PF PF/AF
+ mailbox. The AF may access the same storage using
+ RVU_AF_AFPF()_MBOX(). MBOX(0) is typically used for AF to PF
+ signaling, MBOX(1) for PF to AF.
+ Writing RVU_PF_PFAF_MBOX(1) (but not RVU_AF_AFPF()_MBOX(1))
+ will set the corresponding RVU_AF_PFAF_MBOX_INT bit, which if appropriately
+ enabled will send an interrupt to the AF. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Mailbox data. These PF registers access the 16-byte-per-PF PF/AF
+ mailbox. The AF may access the same storage using
+ RVU_AF_AFPF()_MBOX(). MBOX(0) is typically used for AF to PF
+ signaling, MBOX(1) for PF to AF.
+ Writing RVU_PF_PFAF_MBOX(1) (but not RVU_AF_AFPF()_MBOX(1))
+ will set the corresponding RVU_AF_PFAF_MBOX_INT bit, which if appropriately
+ enabled will send an interrupt to the AF. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_pfaf_mboxx_s cn; */
+};
+typedef union bdk_rvu_pf_pfaf_mboxx bdk_rvu_pf_pfaf_mboxx_t;
+
+static inline uint64_t BDK_RVU_PF_PFAF_MBOXX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_PFAF_MBOXX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x850200000c00ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("RVU_PF_PFAF_MBOXX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_PFAF_MBOXX(a) bdk_rvu_pf_pfaf_mboxx_t
+#define bustype_BDK_RVU_PF_PFAF_MBOXX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_PFAF_MBOXX(a) "RVU_PF_PFAF_MBOXX"
+#define device_bar_BDK_RVU_PF_PFAF_MBOXX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_PFAF_MBOXX(a) (a)
+#define arguments_BDK_RVU_PF_PFAF_MBOXX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vf#_pfvf_mbox#
+ *
+ * RVU PF/VF Mailbox Registers
+ */
+union bdk_rvu_pf_vfx_pfvf_mboxx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfx_pfvf_mboxx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Mailbox data. These PF registers access the 16-byte-per-VF VF/PF mailbox
+ RAM. Each corresponding VF may access the same storage using
+ RVU_VF_VFPF_MBOX(). MBOX(0) is typically used for PF to VF
+ signaling, MBOX(1) for VF to PF. Writing RVU_PF_VF()_PFVF_MBOX(0) (but
+ not RVU_VF_VFPF_MBOX(0)) will set the corresponding
+ RVU_VF_INT[MBOX] which if appropriately enabled will send an
+ interrupt to the VF. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Mailbox data. These PF registers access the 16-byte-per-VF VF/PF mailbox
+ RAM. Each corresponding VF may access the same storage using
+ RVU_VF_VFPF_MBOX(). MBOX(0) is typically used for PF to VF
+ signaling, MBOX(1) for VF to PF. Writing RVU_PF_VF()_PFVF_MBOX(0) (but
+ not RVU_VF_VFPF_MBOX(0)) will set the corresponding
+ RVU_VF_INT[MBOX] which if appropriately enabled will send an
+ interrupt to the VF. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfx_pfvf_mboxx_s cn; */
+};
+typedef union bdk_rvu_pf_vfx_pfvf_mboxx bdk_rvu_pf_vfx_pfvf_mboxx_t;
+
+static inline uint64_t BDK_RVU_PF_VFX_PFVF_MBOXX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFX_PFVF_MBOXX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=127) && (b<=1)))
+ return 0x850200000000ll + 0x1000ll * ((a) & 0x7f) + 8ll * ((b) & 0x1);
+ __bdk_csr_fatal("RVU_PF_VFX_PFVF_MBOXX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFX_PFVF_MBOXX(a,b) bdk_rvu_pf_vfx_pfvf_mboxx_t
+#define bustype_BDK_RVU_PF_VFX_PFVF_MBOXX(a,b) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFX_PFVF_MBOXX(a,b) "RVU_PF_VFX_PFVF_MBOXX"
+#define device_bar_BDK_RVU_PF_VFX_PFVF_MBOXX(a,b) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFX_PFVF_MBOXX(a,b) (a)
+#define arguments_BDK_RVU_PF_VFX_PFVF_MBOXX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vf_bar4_addr
+ *
+ * RVU PF VF BAR4 Address Registers
+ */
+union bdk_rvu_pf_vf_bar4_addr
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vf_bar4_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 48; /**< [ 63: 16](R/W) Programmable base address of RVU_PRIV_PF()_CFG[NVF] consecutive 64 KB
+ pages in DRAM. May be used as VF/PF mailbox memory in addition to
+ RVU_PF_VF()_PFVF_MBOX()/RVU_VF_VFPF_MBOX().
+ Provides PCC_EA_ENTRY_S[BASEH,BASEL] value advertised by VF BAR4's entry in
+ PCCPF_XXX_EA_ENTRY(). */
+ uint64_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_15 : 16;
+ uint64_t addr : 48; /**< [ 63: 16](R/W) Programmable base address of RVU_PRIV_PF()_CFG[NVF] consecutive 64 KB
+ pages in DRAM. May be used as VF/PF mailbox memory in addition to
+ RVU_PF_VF()_PFVF_MBOX()/RVU_VF_VFPF_MBOX().
+ Provides PCC_EA_ENTRY_S[BASEH,BASEL] value advertised by VF BAR4's entry in
+ PCCPF_XXX_EA_ENTRY(). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vf_bar4_addr_s cn; */
+};
+typedef union bdk_rvu_pf_vf_bar4_addr bdk_rvu_pf_vf_bar4_addr_t;
+
+#define BDK_RVU_PF_VF_BAR4_ADDR BDK_RVU_PF_VF_BAR4_ADDR_FUNC()
+static inline uint64_t BDK_RVU_PF_VF_BAR4_ADDR_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VF_BAR4_ADDR_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850200000010ll;
+ __bdk_csr_fatal("RVU_PF_VF_BAR4_ADDR", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VF_BAR4_ADDR bdk_rvu_pf_vf_bar4_addr_t
+#define bustype_BDK_RVU_PF_VF_BAR4_ADDR BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VF_BAR4_ADDR "RVU_PF_VF_BAR4_ADDR"
+#define device_bar_BDK_RVU_PF_VF_BAR4_ADDR 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VF_BAR4_ADDR 0
+#define arguments_BDK_RVU_PF_VF_BAR4_ADDR -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfflr_int#
+ *
+ * RVU PF VF Function Level Reset Interrupt Registers
+ */
+union bdk_rvu_pf_vfflr_intx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfflr_intx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1C/H) FLR interrupt bit per VF (RVU_PF_VFFLR_INT({a})[FLR]\<{b}\> for VF
+ number 64*{a} + {b}).
+ If RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, each bit is set along with
+ the corresponding bit in RVU_PF_VFTRPEND() when function level reset is
+ initiated for the associated VF, i.e. a one is written to
+ PCCVF_XXX_E_DEV_CTL[BCR_FLR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1C/H) FLR interrupt bit per VF (RVU_PF_VFFLR_INT({a})[FLR]\<{b}\> for VF
+ number 64*{a} + {b}).
+ If RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, each bit is set along with
+ the corresponding bit in RVU_PF_VFTRPEND() when function level reset is
+ initiated for the associated VF, i.e. a one is written to
+ PCCVF_XXX_E_DEV_CTL[BCR_FLR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfflr_intx_s cn; */
+};
+typedef union bdk_rvu_pf_vfflr_intx bdk_rvu_pf_vfflr_intx_t;
+
+static inline uint64_t BDK_RVU_PF_VFFLR_INTX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFFLR_INTX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x850200000900ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFFLR_INTX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFFLR_INTX(a) bdk_rvu_pf_vfflr_intx_t
+#define bustype_BDK_RVU_PF_VFFLR_INTX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFFLR_INTX(a) "RVU_PF_VFFLR_INTX"
+#define device_bar_BDK_RVU_PF_VFFLR_INTX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFFLR_INTX(a) (a)
+#define arguments_BDK_RVU_PF_VFFLR_INTX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfflr_int_ena_w1c#
+ *
+ * RVU PF VF Function Level Reset Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_rvu_pf_vfflr_int_ena_w1cx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfflr_int_ena_w1cx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_PF_VFFLR_INT(0..3)[FLR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_PF_VFFLR_INT(0..3)[FLR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfflr_int_ena_w1cx_s cn; */
+};
+typedef union bdk_rvu_pf_vfflr_int_ena_w1cx bdk_rvu_pf_vfflr_int_ena_w1cx_t;
+
+static inline uint64_t BDK_RVU_PF_VFFLR_INT_ENA_W1CX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFFLR_INT_ENA_W1CX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x850200000960ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFFLR_INT_ENA_W1CX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFFLR_INT_ENA_W1CX(a) bdk_rvu_pf_vfflr_int_ena_w1cx_t
+#define bustype_BDK_RVU_PF_VFFLR_INT_ENA_W1CX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFFLR_INT_ENA_W1CX(a) "RVU_PF_VFFLR_INT_ENA_W1CX"
+#define device_bar_BDK_RVU_PF_VFFLR_INT_ENA_W1CX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFFLR_INT_ENA_W1CX(a) (a)
+#define arguments_BDK_RVU_PF_VFFLR_INT_ENA_W1CX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfflr_int_ena_w1s#
+ *
+ * RVU PF VF Function Level Reset Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_rvu_pf_vfflr_int_ena_w1sx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfflr_int_ena_w1sx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_PF_VFFLR_INT(0..3)[FLR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_PF_VFFLR_INT(0..3)[FLR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfflr_int_ena_w1sx_s cn; */
+};
+typedef union bdk_rvu_pf_vfflr_int_ena_w1sx bdk_rvu_pf_vfflr_int_ena_w1sx_t;
+
+static inline uint64_t BDK_RVU_PF_VFFLR_INT_ENA_W1SX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFFLR_INT_ENA_W1SX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x850200000940ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFFLR_INT_ENA_W1SX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFFLR_INT_ENA_W1SX(a) bdk_rvu_pf_vfflr_int_ena_w1sx_t
+#define bustype_BDK_RVU_PF_VFFLR_INT_ENA_W1SX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFFLR_INT_ENA_W1SX(a) "RVU_PF_VFFLR_INT_ENA_W1SX"
+#define device_bar_BDK_RVU_PF_VFFLR_INT_ENA_W1SX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFFLR_INT_ENA_W1SX(a) (a)
+#define arguments_BDK_RVU_PF_VFFLR_INT_ENA_W1SX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfflr_int_w1s#
+ *
+ * RVU PF VF Function Level Reset Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_rvu_pf_vfflr_int_w1sx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfflr_int_w1sx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_PF_VFFLR_INT(0..3)[FLR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t flr : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_PF_VFFLR_INT(0..3)[FLR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfflr_int_w1sx_s cn; */
+};
+typedef union bdk_rvu_pf_vfflr_int_w1sx bdk_rvu_pf_vfflr_int_w1sx_t;
+
+static inline uint64_t BDK_RVU_PF_VFFLR_INT_W1SX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFFLR_INT_W1SX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x850200000920ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFFLR_INT_W1SX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFFLR_INT_W1SX(a) bdk_rvu_pf_vfflr_int_w1sx_t
+#define bustype_BDK_RVU_PF_VFFLR_INT_W1SX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFFLR_INT_W1SX(a) "RVU_PF_VFFLR_INT_W1SX"
+#define device_bar_BDK_RVU_PF_VFFLR_INT_W1SX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFFLR_INT_W1SX(a) (a)
+#define arguments_BDK_RVU_PF_VFFLR_INT_W1SX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfme_int#
+ *
+ * RVU PF VF Bus Master Enable Interrupt Registers
+ */
+union bdk_rvu_pf_vfme_intx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfme_intx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1C/H) Master enable interrupt bit per VF (RVU_PF_VFME_INT({a})[ME]\<{b}\> for VF
+ number 64*{a} + {b}).
+ A device-dependent PF driver typically uses these bits to handle state
+ changes to PCCPF_XXX_CMD[ME], which are typically modified by
+ non-device-dependent software only.
+
+ If RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, each bit is set when the
+ corresponding PCCVF_XXX_CMD[ME] bit is either set or cleared for the
+ associated PF. The corresponding bit in RVU_PF_VFME_STATUS() returns the
+ current value of PCCVF_XXX_CMD[ME].
+
+ If RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, the corresponding bit in
+ RVU_PF_VFTRPEND() is also set when PCCVF_XXX_CMD[ME] is set, but not
+ when PCCVF_XXX_CMD[ME] is cleared. */
+#else /* Word 0 - Little Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1C/H) Master enable interrupt bit per VF (RVU_PF_VFME_INT({a})[ME]\<{b}\> for VF
+ number 64*{a} + {b}).
+ A device-dependent PF driver typically uses these bits to handle state
+ changes to PCCPF_XXX_CMD[ME], which are typically modified by
+ non-device-dependent software only.
+
+ If RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, each bit is set when the
+ corresponding PCCVF_XXX_CMD[ME] bit is either set or cleared for the
+ associated PF. The corresponding bit in RVU_PF_VFME_STATUS() returns the
+ current value of PCCVF_XXX_CMD[ME].
+
+ If RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set, the corresponding bit in
+ RVU_PF_VFTRPEND() is also set when PCCVF_XXX_CMD[ME] is set, but not
+ when PCCVF_XXX_CMD[ME] is cleared. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfme_intx_s cn; */
+};
+typedef union bdk_rvu_pf_vfme_intx bdk_rvu_pf_vfme_intx_t;
+
+static inline uint64_t BDK_RVU_PF_VFME_INTX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFME_INTX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x850200000980ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFME_INTX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFME_INTX(a) bdk_rvu_pf_vfme_intx_t
+#define bustype_BDK_RVU_PF_VFME_INTX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFME_INTX(a) "RVU_PF_VFME_INTX"
+#define device_bar_BDK_RVU_PF_VFME_INTX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFME_INTX(a) (a)
+#define arguments_BDK_RVU_PF_VFME_INTX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfme_int_ena_w1c#
+ *
+ * RVU PF VF Bus Master Enable Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_rvu_pf_vfme_int_ena_w1cx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfme_int_ena_w1cx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_PF_VFME_INT(0..3)[ME]. */
+#else /* Word 0 - Little Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_PF_VFME_INT(0..3)[ME]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfme_int_ena_w1cx_s cn; */
+};
+typedef union bdk_rvu_pf_vfme_int_ena_w1cx bdk_rvu_pf_vfme_int_ena_w1cx_t;
+
+static inline uint64_t BDK_RVU_PF_VFME_INT_ENA_W1CX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFME_INT_ENA_W1CX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8502000009e0ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFME_INT_ENA_W1CX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFME_INT_ENA_W1CX(a) bdk_rvu_pf_vfme_int_ena_w1cx_t
+#define bustype_BDK_RVU_PF_VFME_INT_ENA_W1CX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFME_INT_ENA_W1CX(a) "RVU_PF_VFME_INT_ENA_W1CX"
+#define device_bar_BDK_RVU_PF_VFME_INT_ENA_W1CX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFME_INT_ENA_W1CX(a) (a)
+#define arguments_BDK_RVU_PF_VFME_INT_ENA_W1CX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfme_int_ena_w1s#
+ *
+ * RVU PF VF Bus Master Enable Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_rvu_pf_vfme_int_ena_w1sx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfme_int_ena_w1sx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_PF_VFME_INT(0..3)[ME]. */
+#else /* Word 0 - Little Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_PF_VFME_INT(0..3)[ME]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfme_int_ena_w1sx_s cn; */
+};
+typedef union bdk_rvu_pf_vfme_int_ena_w1sx bdk_rvu_pf_vfme_int_ena_w1sx_t;
+
+static inline uint64_t BDK_RVU_PF_VFME_INT_ENA_W1SX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFME_INT_ENA_W1SX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8502000009c0ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFME_INT_ENA_W1SX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFME_INT_ENA_W1SX(a) bdk_rvu_pf_vfme_int_ena_w1sx_t
+#define bustype_BDK_RVU_PF_VFME_INT_ENA_W1SX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFME_INT_ENA_W1SX(a) "RVU_PF_VFME_INT_ENA_W1SX"
+#define device_bar_BDK_RVU_PF_VFME_INT_ENA_W1SX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFME_INT_ENA_W1SX(a) (a)
+#define arguments_BDK_RVU_PF_VFME_INT_ENA_W1SX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfme_int_w1s#
+ *
+ * RVU PF VF Bus Master Enable Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_rvu_pf_vfme_int_w1sx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfme_int_w1sx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_PF_VFME_INT(0..3)[ME]. */
+#else /* Word 0 - Little Endian */
+ uint64_t me : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_PF_VFME_INT(0..3)[ME]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfme_int_w1sx_s cn; */
+};
+typedef union bdk_rvu_pf_vfme_int_w1sx bdk_rvu_pf_vfme_int_w1sx_t;
+
+static inline uint64_t BDK_RVU_PF_VFME_INT_W1SX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFME_INT_W1SX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8502000009a0ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFME_INT_W1SX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFME_INT_W1SX(a) bdk_rvu_pf_vfme_int_w1sx_t
+#define bustype_BDK_RVU_PF_VFME_INT_W1SX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFME_INT_W1SX(a) "RVU_PF_VFME_INT_W1SX"
+#define device_bar_BDK_RVU_PF_VFME_INT_W1SX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFME_INT_W1SX(a) (a)
+#define arguments_BDK_RVU_PF_VFME_INT_W1SX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfme_status#
+ *
+ * RVU PF VF Bus Master Enable Status Registers
+ */
+union bdk_rvu_pf_vfme_statusx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfme_statusx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t me : 64; /**< [ 63: 0](RO/H) Bus master enable bit per VF (RVU_PF_VFME_STATUS({a})[ME]\<{b}\> for VF
+ number 64*{a} + {b}).
+ Each bit returns the VF's PCCVF_XXX_CMD[ME] value. */
+#else /* Word 0 - Little Endian */
+ uint64_t me : 64; /**< [ 63: 0](RO/H) Bus master enable bit per VF (RVU_PF_VFME_STATUS({a})[ME]\<{b}\> for VF
+ number 64*{a} + {b}).
+ Each bit returns the VF's PCCVF_XXX_CMD[ME] value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfme_statusx_s cn; */
+};
+typedef union bdk_rvu_pf_vfme_statusx bdk_rvu_pf_vfme_statusx_t;
+
+static inline uint64_t BDK_RVU_PF_VFME_STATUSX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFME_STATUSX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x850200000800ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFME_STATUSX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFME_STATUSX(a) bdk_rvu_pf_vfme_statusx_t
+#define bustype_BDK_RVU_PF_VFME_STATUSX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFME_STATUSX(a) "RVU_PF_VFME_STATUSX"
+#define device_bar_BDK_RVU_PF_VFME_STATUSX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFME_STATUSX(a) (a)
+#define arguments_BDK_RVU_PF_VFME_STATUSX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfpf_mbox_int#
+ *
+ * RVU VF to PF Mailbox Interrupt Registers
+ */
+union bdk_rvu_pf_vfpf_mbox_intx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfpf_mbox_intx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1C/H) Mailbox interrupt bit per VF (RVU_PF_VFPF_MBOX_INT({a})[MBOX]\<{b}\> for VF
+ number 64*{a} + {b}).
+ Each bit is set when the VF writes to the corresponding
+ RVU_VF_VFPF_MBOX(1) register. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1C/H) Mailbox interrupt bit per VF (RVU_PF_VFPF_MBOX_INT({a})[MBOX]\<{b}\> for VF
+ number 64*{a} + {b}).
+ Each bit is set when the VF writes to the corresponding
+ RVU_VF_VFPF_MBOX(1) register. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfpf_mbox_intx_s cn; */
+};
+typedef union bdk_rvu_pf_vfpf_mbox_intx bdk_rvu_pf_vfpf_mbox_intx_t;
+
+static inline uint64_t BDK_RVU_PF_VFPF_MBOX_INTX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFPF_MBOX_INTX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x850200000880ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFPF_MBOX_INTX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFPF_MBOX_INTX(a) bdk_rvu_pf_vfpf_mbox_intx_t
+#define bustype_BDK_RVU_PF_VFPF_MBOX_INTX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFPF_MBOX_INTX(a) "RVU_PF_VFPF_MBOX_INTX"
+#define device_bar_BDK_RVU_PF_VFPF_MBOX_INTX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFPF_MBOX_INTX(a) (a)
+#define arguments_BDK_RVU_PF_VFPF_MBOX_INTX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfpf_mbox_int_ena_w1c#
+ *
+ * RVU VF to PF Mailbox Interrupt Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_rvu_pf_vfpf_mbox_int_ena_w1cx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfpf_mbox_int_ena_w1cx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_PF_VFPF_MBOX_INT(0..3)[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for RVU_PF_VFPF_MBOX_INT(0..3)[MBOX]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfpf_mbox_int_ena_w1cx_s cn; */
+};
+typedef union bdk_rvu_pf_vfpf_mbox_int_ena_w1cx bdk_rvu_pf_vfpf_mbox_int_ena_w1cx_t;
+
+static inline uint64_t BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1CX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1CX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8502000008e0ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFPF_MBOX_INT_ENA_W1CX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) bdk_rvu_pf_vfpf_mbox_int_ena_w1cx_t
+#define bustype_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) "RVU_PF_VFPF_MBOX_INT_ENA_W1CX"
+#define device_bar_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (a)
+#define arguments_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1CX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfpf_mbox_int_ena_w1s#
+ *
+ * RVU VF to PF Mailbox Interrupt Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_rvu_pf_vfpf_mbox_int_ena_w1sx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfpf_mbox_int_ena_w1sx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_PF_VFPF_MBOX_INT(0..3)[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for RVU_PF_VFPF_MBOX_INT(0..3)[MBOX]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfpf_mbox_int_ena_w1sx_s cn; */
+};
+typedef union bdk_rvu_pf_vfpf_mbox_int_ena_w1sx bdk_rvu_pf_vfpf_mbox_int_ena_w1sx_t;
+
+static inline uint64_t BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1SX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1SX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8502000008c0ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFPF_MBOX_INT_ENA_W1SX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) bdk_rvu_pf_vfpf_mbox_int_ena_w1sx_t
+#define bustype_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) "RVU_PF_VFPF_MBOX_INT_ENA_W1SX"
+#define device_bar_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (a)
+#define arguments_BDK_RVU_PF_VFPF_MBOX_INT_ENA_W1SX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vfpf_mbox_int_w1s#
+ *
+ * RVU VF to PF Mailbox Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_rvu_pf_vfpf_mbox_int_w1sx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vfpf_mbox_int_w1sx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_PF_VFPF_MBOX_INT(0..3)[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_PF_VFPF_MBOX_INT(0..3)[MBOX]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vfpf_mbox_int_w1sx_s cn; */
+};
+typedef union bdk_rvu_pf_vfpf_mbox_int_w1sx bdk_rvu_pf_vfpf_mbox_int_w1sx_t;
+
+static inline uint64_t BDK_RVU_PF_VFPF_MBOX_INT_W1SX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFPF_MBOX_INT_W1SX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8502000008a0ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFPF_MBOX_INT_W1SX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFPF_MBOX_INT_W1SX(a) bdk_rvu_pf_vfpf_mbox_int_w1sx_t
+#define bustype_BDK_RVU_PF_VFPF_MBOX_INT_W1SX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFPF_MBOX_INT_W1SX(a) "RVU_PF_VFPF_MBOX_INT_W1SX"
+#define device_bar_BDK_RVU_PF_VFPF_MBOX_INT_W1SX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFPF_MBOX_INT_W1SX(a) (a)
+#define arguments_BDK_RVU_PF_VFPF_MBOX_INT_W1SX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vftrpend#
+ *
+ * RVU PF VF Transaction Pending Registers
+ */
+union bdk_rvu_pf_vftrpendx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vftrpendx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trpend : 64; /**< [ 63: 0](R/W1C/H) Transaction pending bit per VF (RVU_PF_VFTRPEND({a})[TRPEND]\<{b}\> for VF
+ number 64*{a} + {b}).
+
+ A VF's bit is set when RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set and:
+ * A one is written to the corresponding PCCVF_XXX_E_DEV_CTL[BCR_FLR], or
+ * PCCVF_XXX_CMD[ME] is set or cleared.
+
+ When a VF's bit is set, forces the corresponding
+ PCCVF_XXX_E_DEV_CTL[TRPEND] to be set.
+
+ Software (typically a device-dependent PF driver) can clear the bit by
+ writing a 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t trpend : 64; /**< [ 63: 0](R/W1C/H) Transaction pending bit per VF (RVU_PF_VFTRPEND({a})[TRPEND]\<{b}\> for VF
+ number 64*{a} + {b}).
+
+ A VF's bit is set when RVU_PRIV_PF()_CFG[ME_FLR_ENA] is set and:
+ * A one is written to the corresponding PCCVF_XXX_E_DEV_CTL[BCR_FLR], or
+ * PCCVF_XXX_CMD[ME] is set or cleared.
+
+ When a VF's bit is set, forces the corresponding
+ PCCVF_XXX_E_DEV_CTL[TRPEND] to be set.
+
+ Software (typically a device-dependent PF driver) can clear the bit by
+ writing a 1. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vftrpendx_s cn; */
+};
+typedef union bdk_rvu_pf_vftrpendx bdk_rvu_pf_vftrpendx_t;
+
+static inline uint64_t BDK_RVU_PF_VFTRPENDX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFTRPENDX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x850200000820ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFTRPENDX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFTRPENDX(a) bdk_rvu_pf_vftrpendx_t
+#define bustype_BDK_RVU_PF_VFTRPENDX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFTRPENDX(a) "RVU_PF_VFTRPENDX"
+#define device_bar_BDK_RVU_PF_VFTRPENDX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFTRPENDX(a) (a)
+#define arguments_BDK_RVU_PF_VFTRPENDX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR2) rvu_pf_vftrpend_w1s#
+ *
+ * RVU PF VF Transaction Pending Set Registers
+ * This register reads or sets bits.
+ */
+union bdk_rvu_pf_vftrpend_w1sx
+{
+ uint64_t u;
+ struct bdk_rvu_pf_vftrpend_w1sx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t trpend : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_PF_VFTRPEND(0..3)[TRPEND]. */
+#else /* Word 0 - Little Endian */
+ uint64_t trpend : 64; /**< [ 63: 0](R/W1S/H) Reads or sets RVU_PF_VFTRPEND(0..3)[TRPEND]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_pf_vftrpend_w1sx_s cn; */
+};
+typedef union bdk_rvu_pf_vftrpend_w1sx bdk_rvu_pf_vftrpend_w1sx_t;
+
+static inline uint64_t BDK_RVU_PF_VFTRPEND_W1SX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PF_VFTRPEND_W1SX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x850200000840ll + 8ll * ((a) & 0x3);
+ __bdk_csr_fatal("RVU_PF_VFTRPEND_W1SX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PF_VFTRPEND_W1SX(a) bdk_rvu_pf_vftrpend_w1sx_t
+#define bustype_BDK_RVU_PF_VFTRPEND_W1SX(a) BDK_CSR_TYPE_RVU_PF_BAR2
+#define basename_BDK_RVU_PF_VFTRPEND_W1SX(a) "RVU_PF_VFTRPEND_W1SX"
+#define device_bar_BDK_RVU_PF_VFTRPEND_W1SX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_PF_VFTRPEND_W1SX(a) (a)
+#define arguments_BDK_RVU_PF_VFTRPEND_W1SX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_active_pc
+ *
+ * RVU Active Program Counter Register
+ */
+union bdk_rvu_priv_active_pc
+{
+ uint64_t u;
+ struct bdk_rvu_priv_active_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t active_pc : 64; /**< [ 63: 0](R/W/H) This register increments on every coprocessor-clock cycle that the RVU conditional clocks
+ are enabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t active_pc : 64; /**< [ 63: 0](R/W/H) This register increments on every coprocessor-clock cycle that the RVU conditional clocks
+ are enabled. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_active_pc_s cn; */
+};
+typedef union bdk_rvu_priv_active_pc bdk_rvu_priv_active_pc_t;
+
+#define BDK_RVU_PRIV_ACTIVE_PC BDK_RVU_PRIV_ACTIVE_PC_FUNC()
+static inline uint64_t BDK_RVU_PRIV_ACTIVE_PC_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_ACTIVE_PC_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850008000030ll;
+ __bdk_csr_fatal("RVU_PRIV_ACTIVE_PC", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_ACTIVE_PC bdk_rvu_priv_active_pc_t
+#define bustype_BDK_RVU_PRIV_ACTIVE_PC BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_ACTIVE_PC "RVU_PRIV_ACTIVE_PC"
+#define device_bar_BDK_RVU_PRIV_ACTIVE_PC 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_ACTIVE_PC 0
+#define arguments_BDK_RVU_PRIV_ACTIVE_PC -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_block_type#_rev
+ *
+ * RVU Privileged Block Type Revision Registers
+ * These registers are used by configuration software to specify the revision ID
+ * of each block type enumerated by RVU_BLOCK_TYPE_E, to assist VF/PF software
+ * discovery.
+ */
+union bdk_rvu_priv_block_typex_rev
+{
+ uint64_t u;
+ struct bdk_rvu_priv_block_typex_rev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t rid : 8; /**< [ 7: 0](R/W) Revision ID of the block. This is the read value returned by
+ RVU_VF_BLOCK_ADDR()_DISC[RID]. */
+#else /* Word 0 - Little Endian */
+ uint64_t rid : 8; /**< [ 7: 0](R/W) Revision ID of the block. This is the read value returned by
+ RVU_VF_BLOCK_ADDR()_DISC[RID]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_block_typex_rev_s cn; */
+};
+typedef union bdk_rvu_priv_block_typex_rev bdk_rvu_priv_block_typex_rev_t;
+
+static inline uint64_t BDK_RVU_PRIV_BLOCK_TYPEX_REV(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_BLOCK_TYPEX_REV(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x850008000400ll + 8ll * ((a) & 0xf);
+ __bdk_csr_fatal("RVU_PRIV_BLOCK_TYPEX_REV", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_BLOCK_TYPEX_REV(a) bdk_rvu_priv_block_typex_rev_t
+#define bustype_BDK_RVU_PRIV_BLOCK_TYPEX_REV(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_BLOCK_TYPEX_REV(a) "RVU_PRIV_BLOCK_TYPEX_REV"
+#define device_bar_BDK_RVU_PRIV_BLOCK_TYPEX_REV(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_BLOCK_TYPEX_REV(a) (a)
+#define arguments_BDK_RVU_PRIV_BLOCK_TYPEX_REV(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_clk_cfg
+ *
+ * RVU Privileged General Configuration Register
+ */
+union bdk_rvu_priv_clk_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_clk_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t clk_ena : 1; /**< [ 0: 0](R/W) Force conditional clock to always be enabled. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t clk_ena : 1; /**< [ 0: 0](R/W) Force conditional clock to always be enabled. For diagnostic use only. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_clk_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_clk_cfg bdk_rvu_priv_clk_cfg_t;
+
+#define BDK_RVU_PRIV_CLK_CFG BDK_RVU_PRIV_CLK_CFG_FUNC()
+static inline uint64_t BDK_RVU_PRIV_CLK_CFG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_CLK_CFG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850008000020ll;
+ __bdk_csr_fatal("RVU_PRIV_CLK_CFG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_CLK_CFG bdk_rvu_priv_clk_cfg_t
+#define bustype_BDK_RVU_PRIV_CLK_CFG BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_CLK_CFG "RVU_PRIV_CLK_CFG"
+#define device_bar_BDK_RVU_PRIV_CLK_CFG 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_CLK_CFG 0
+#define arguments_BDK_RVU_PRIV_CLK_CFG -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_const
+ *
+ * RVU Privileged Constants Register
+ * This register contains constants for software discovery.
+ */
+union bdk_rvu_priv_const
+{
+ uint64_t u;
+ struct bdk_rvu_priv_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t max_vfs_per_pf : 8; /**< [ 47: 40](RO) Maximum number of VFs per RVU PF. */
+ uint64_t pfs : 8; /**< [ 39: 32](RO) Number of RVU PFs. */
+ uint64_t hwvfs : 12; /**< [ 31: 20](RO) Number of RVU hardware VFs (HWVFs). */
+ uint64_t max_msix : 20; /**< [ 19: 0](RO) Combined maximum number of MSI-X vectors that may be provisioned to the RVU
+ PFs and VFs. Also the maximum number of 16-byte RVU_MSIX_VEC_S structures
+ in RVU's MSI-X table region in LLC/DRAM. See RVU_PRIV_PF()_MSIX_CFG.
+
+ Internal:
+ Also, size of RVU's internal PBA memory.
+
+ Sized as follows:
+ \<pre\>
+ AP cores 24
+ Vectors per LF:
+ NIX CINT 32
+ NIX QINT 32
+ NIX GINT 1
+ NPA QINT 32
+ NPA GINT 1
+ SSO 1
+ TIM 1
+ CPT 1
+ RVU 1
+ Total per LF: \<128
+ Num LFs 256
+ Total LF vectors \<32K
+ Total AF vectors 64 (budget 16 blocks * 4)
+ Total vectors budget 32K
+ \</pre\> */
+#else /* Word 0 - Little Endian */
+ uint64_t max_msix : 20; /**< [ 19: 0](RO) Combined maximum number of MSI-X vectors that may be provisioned to the RVU
+ PFs and VFs. Also the maximum number of 16-byte RVU_MSIX_VEC_S structures
+ in RVU's MSI-X table region in LLC/DRAM. See RVU_PRIV_PF()_MSIX_CFG.
+
+ Internal:
+ Also, size of RVU's internal PBA memory.
+
+ Sized as follows:
+ \<pre\>
+ AP cores 24
+ Vectors per LF:
+ NIX CINT 32
+ NIX QINT 32
+ NIX GINT 1
+ NPA QINT 32
+ NPA GINT 1
+ SSO 1
+ TIM 1
+ CPT 1
+ RVU 1
+ Total per LF: \<128
+ Num LFs 256
+ Total LF vectors \<32K
+ Total AF vectors 64 (budget 16 blocks * 4)
+ Total vectors budget 32K
+ \</pre\> */
+ uint64_t hwvfs : 12; /**< [ 31: 20](RO) Number of RVU hardware VFs (HWVFs). */
+ uint64_t pfs : 8; /**< [ 39: 32](RO) Number of RVU PFs. */
+ uint64_t max_vfs_per_pf : 8; /**< [ 47: 40](RO) Maximum number of VFs per RVU PF. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_const_s cn; */
+};
+typedef union bdk_rvu_priv_const bdk_rvu_priv_const_t;
+
+#define BDK_RVU_PRIV_CONST BDK_RVU_PRIV_CONST_FUNC()
+static inline uint64_t BDK_RVU_PRIV_CONST_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_CONST_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850008000000ll;
+ __bdk_csr_fatal("RVU_PRIV_CONST", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_CONST bdk_rvu_priv_const_t
+#define bustype_BDK_RVU_PRIV_CONST BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_CONST "RVU_PRIV_CONST"
+#define device_bar_BDK_RVU_PRIV_CONST 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_CONST 0
+#define arguments_BDK_RVU_PRIV_CONST -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_gen_cfg
+ *
+ * RVU Privileged General Configuration Register
+ */
+union bdk_rvu_priv_gen_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_gen_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t lock : 1; /**< [ 0: 0](R/W1S) Lock privileged registers. When set, all privileged registers in RVU and
+ its resource blocks are locked down and cannot be modified. Writing a 1
+ sets this bit; once set, the bit can only be cleared by core reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t lock : 1; /**< [ 0: 0](R/W1S) Lock privileged registers. When set, all privileged registers in RVU and
+ its resource blocks are locked down and cannot be modified. Writing a 1
+ sets this bit; once set, the bit can only be cleared by core reset. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_gen_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_gen_cfg bdk_rvu_priv_gen_cfg_t;
+
+#define BDK_RVU_PRIV_GEN_CFG BDK_RVU_PRIV_GEN_CFG_FUNC()
+static inline uint64_t BDK_RVU_PRIV_GEN_CFG_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_GEN_CFG_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850008000010ll;
+ __bdk_csr_fatal("RVU_PRIV_GEN_CFG", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_GEN_CFG bdk_rvu_priv_gen_cfg_t
+#define bustype_BDK_RVU_PRIV_GEN_CFG BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_GEN_CFG "RVU_PRIV_GEN_CFG"
+#define device_bar_BDK_RVU_PRIV_GEN_CFG 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_GEN_CFG 0
+#define arguments_BDK_RVU_PRIV_GEN_CFG -1,-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_cpt#_cfg
+ *
+ * RVU Privileged Hardware VF CPT Configuration Registers
+ * Similar to RVU_PRIV_HWVF()_NIX()_CFG, but for CPT({a}) block.
+ */
+union bdk_rvu_priv_hwvfx_cptx_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_hwvfx_cptx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_hwvfx_cptx_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_hwvfx_cptx_cfg bdk_rvu_priv_hwvfx_cptx_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_HWVFX_CPTX_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_HWVFX_CPTX_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=255) && (b==0)))
+ return 0x850008001350ll + 0x10000ll * ((a) & 0xff) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("RVU_PRIV_HWVFX_CPTX_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_HWVFX_CPTX_CFG(a,b) bdk_rvu_priv_hwvfx_cptx_cfg_t
+#define bustype_BDK_RVU_PRIV_HWVFX_CPTX_CFG(a,b) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_HWVFX_CPTX_CFG(a,b) "RVU_PRIV_HWVFX_CPTX_CFG"
+#define device_bar_BDK_RVU_PRIV_HWVFX_CPTX_CFG(a,b) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_HWVFX_CPTX_CFG(a,b) (a)
+#define arguments_BDK_RVU_PRIV_HWVFX_CPTX_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_int_cfg
+ *
+ * RVU Privileged Hardware VF Interrupt Configuration Registers
+ */
+union bdk_rvu_priv_hwvfx_int_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_hwvfx_int_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t msix_size : 8; /**< [ 19: 12](RO) Number of interrupt vectors enumerated by RVU_VF_INT_VEC_E. */
+ uint64_t reserved_11 : 1;
+ uint64_t msix_offset : 11; /**< [ 10: 0](R/W) MSI-X offset. Offset of VF interrupt vectors enumerated by RVU_VF_INT_VEC_E
+ in the HWVF's MSI-X table. This is added to each enumerated value to obtain
+ the corresponding MSI-X vector index.
+ The highest enumerated value plus [MSIX_OFFSET] must be less than or equal
+ to RVU_PRIV_PF()_MSIX_CFG[VF_MSIXT_SIZEM1]. */
+#else /* Word 0 - Little Endian */
+ uint64_t msix_offset : 11; /**< [ 10: 0](R/W) MSI-X offset. Offset of VF interrupt vectors enumerated by RVU_VF_INT_VEC_E
+ in the HWVF's MSI-X table. This is added to each enumerated value to obtain
+ the corresponding MSI-X vector index.
+ The highest enumerated value plus [MSIX_OFFSET] must be less than or equal
+ to RVU_PRIV_PF()_MSIX_CFG[VF_MSIXT_SIZEM1]. */
+ uint64_t reserved_11 : 1;
+ uint64_t msix_size : 8; /**< [ 19: 12](RO) Number of interrupt vectors enumerated by RVU_VF_INT_VEC_E. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_hwvfx_int_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_hwvfx_int_cfg bdk_rvu_priv_hwvfx_int_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_HWVFX_INT_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_HWVFX_INT_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=255))
+ return 0x850008001280ll + 0x10000ll * ((a) & 0xff);
+ __bdk_csr_fatal("RVU_PRIV_HWVFX_INT_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_HWVFX_INT_CFG(a) bdk_rvu_priv_hwvfx_int_cfg_t
+#define bustype_BDK_RVU_PRIV_HWVFX_INT_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_HWVFX_INT_CFG(a) "RVU_PRIV_HWVFX_INT_CFG"
+#define device_bar_BDK_RVU_PRIV_HWVFX_INT_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_HWVFX_INT_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_HWVFX_INT_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_nix#_cfg
+ *
+ * RVU Privileged Hardware VF NIX Configuration Registers
+ * These registers are used to assist VF software discovery. For each HWVF, if the
+ * HWVF is mapped to a VF by RVU_PRIV_PF()_CFG[FIRST_HWVF,NVF], software
+ * writes NIX block's resource configuration for the VF in this register. The VF
+ * driver can read RVU_VF_BLOCK_ADDR()_DISC to discover the configuration.
+ */
+union bdk_rvu_priv_hwvfx_nixx_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_hwvfx_nixx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t has_lf : 1; /**< [ 0: 0](R/W) Set when an LF from the block is provisioned to the VF, clear otherwise. */
+#else /* Word 0 - Little Endian */
+ uint64_t has_lf : 1; /**< [ 0: 0](R/W) Set when an LF from the block is provisioned to the VF, clear otherwise. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_hwvfx_nixx_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_hwvfx_nixx_cfg bdk_rvu_priv_hwvfx_nixx_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_HWVFX_NIXX_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_HWVFX_NIXX_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=255) && (b==0)))
+ return 0x850008001300ll + 0x10000ll * ((a) & 0xff) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("RVU_PRIV_HWVFX_NIXX_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_HWVFX_NIXX_CFG(a,b) bdk_rvu_priv_hwvfx_nixx_cfg_t
+#define bustype_BDK_RVU_PRIV_HWVFX_NIXX_CFG(a,b) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_HWVFX_NIXX_CFG(a,b) "RVU_PRIV_HWVFX_NIXX_CFG"
+#define device_bar_BDK_RVU_PRIV_HWVFX_NIXX_CFG(a,b) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_HWVFX_NIXX_CFG(a,b) (a)
+#define arguments_BDK_RVU_PRIV_HWVFX_NIXX_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_npa_cfg
+ *
+ * RVU Privileged Hardware VF NPA Configuration Registers
+ * Similar to RVU_PRIV_HWVF()_NIX()_CFG, but for NPA block.
+ */
+union bdk_rvu_priv_hwvfx_npa_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_hwvfx_npa_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t has_lf : 1; /**< [ 0: 0](R/W) Set when an LF from the block is provisioned to the VF, clear otherwise. */
+#else /* Word 0 - Little Endian */
+ uint64_t has_lf : 1; /**< [ 0: 0](R/W) Set when an LF from the block is provisioned to the VF, clear otherwise. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_hwvfx_npa_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_hwvfx_npa_cfg bdk_rvu_priv_hwvfx_npa_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_HWVFX_NPA_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_HWVFX_NPA_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=255))
+ return 0x850008001310ll + 0x10000ll * ((a) & 0xff);
+ __bdk_csr_fatal("RVU_PRIV_HWVFX_NPA_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_HWVFX_NPA_CFG(a) bdk_rvu_priv_hwvfx_npa_cfg_t
+#define bustype_BDK_RVU_PRIV_HWVFX_NPA_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_HWVFX_NPA_CFG(a) "RVU_PRIV_HWVFX_NPA_CFG"
+#define device_bar_BDK_RVU_PRIV_HWVFX_NPA_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_HWVFX_NPA_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_HWVFX_NPA_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_sso_cfg
+ *
+ * RVU Privileged Hardware VF SSO Configuration Registers
+ * Similar to RVU_PRIV_HWVF()_NIX()_CFG, but for SSO block.
+ */
+union bdk_rvu_priv_hwvfx_sso_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_hwvfx_sso_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_hwvfx_sso_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_hwvfx_sso_cfg bdk_rvu_priv_hwvfx_sso_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_HWVFX_SSO_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_HWVFX_SSO_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=255))
+ return 0x850008001320ll + 0x10000ll * ((a) & 0xff);
+ __bdk_csr_fatal("RVU_PRIV_HWVFX_SSO_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_HWVFX_SSO_CFG(a) bdk_rvu_priv_hwvfx_sso_cfg_t
+#define bustype_BDK_RVU_PRIV_HWVFX_SSO_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_HWVFX_SSO_CFG(a) "RVU_PRIV_HWVFX_SSO_CFG"
+#define device_bar_BDK_RVU_PRIV_HWVFX_SSO_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_HWVFX_SSO_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_HWVFX_SSO_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_ssow_cfg
+ *
+ * RVU Privileged Hardware VF SSO Work Slot Configuration Registers
+ * Similar to RVU_PRIV_HWVF()_NIX()_CFG, but for SSOW block.
+ */
+union bdk_rvu_priv_hwvfx_ssow_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_hwvfx_ssow_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_hwvfx_ssow_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_hwvfx_ssow_cfg bdk_rvu_priv_hwvfx_ssow_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_HWVFX_SSOW_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_HWVFX_SSOW_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=255))
+ return 0x850008001330ll + 0x10000ll * ((a) & 0xff);
+ __bdk_csr_fatal("RVU_PRIV_HWVFX_SSOW_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_HWVFX_SSOW_CFG(a) bdk_rvu_priv_hwvfx_ssow_cfg_t
+#define bustype_BDK_RVU_PRIV_HWVFX_SSOW_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_HWVFX_SSOW_CFG(a) "RVU_PRIV_HWVFX_SSOW_CFG"
+#define device_bar_BDK_RVU_PRIV_HWVFX_SSOW_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_HWVFX_SSOW_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_HWVFX_SSOW_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_hwvf#_tim_cfg
+ *
+ * RVU Privileged Hardware VF SSO Work Slot Configuration Registers
+ * Similar to RVU_PRIV_HWVF()_NIX()_CFG, but for TIM block.
+ */
+union bdk_rvu_priv_hwvfx_tim_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_hwvfx_tim_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_hwvfx_tim_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_hwvfx_tim_cfg bdk_rvu_priv_hwvfx_tim_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_HWVFX_TIM_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_HWVFX_TIM_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=255))
+ return 0x850008001340ll + 0x10000ll * ((a) & 0xff);
+ __bdk_csr_fatal("RVU_PRIV_HWVFX_TIM_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_HWVFX_TIM_CFG(a) bdk_rvu_priv_hwvfx_tim_cfg_t
+#define bustype_BDK_RVU_PRIV_HWVFX_TIM_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_HWVFX_TIM_CFG(a) "RVU_PRIV_HWVFX_TIM_CFG"
+#define device_bar_BDK_RVU_PRIV_HWVFX_TIM_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_HWVFX_TIM_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_HWVFX_TIM_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_cfg
+ *
+ * RVU Privileged PF Configuration Registers
+ */
+union bdk_rvu_priv_pfx_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_pfx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_23_63 : 41;
+ uint64_t me_flr_ena : 1; /**< [ 22: 22](R/W) Bus master enable (ME) and function level reset (FLR) enable. This bit
+ should be set when the PF is configured and associated PF and/or AF drivers
+ that manage VF and/or PF ME/FLR are loaded.
+
+ When clear, PCCPF/PCCVF_XXX_CMD[ME] state changes are ignored, and
+ PCCPF/PCCVF_XXX_E_DEV_CTL[BCR_FLR] reset the PF/VF configuration space and
+ MSI-X tables only.
+
+ When set, hardware updates to the following registers in response to ME/FLR
+ events are additionally enabled:
+ RVU_PF_VFTRPEND(), RVU_PF_VFFLR_INT(), RVU_PF_VFME_INT(),
+ RVU_AF_PFTRPEND, RVU_AF_PFFLR_INT, and RVU_AF_PFFLR_INT. */
+ uint64_t af_ena : 1; /**< [ 21: 21](R/W) Admin function enable. When set, the PF is allowed to access AF
+ (RVU PF BAR0) registers in all RVU blocks. When clear, the PF is not
+ allowed to access AF registers. Must be clear when [ENA] is clear.
+
+ Software should keep this bit set for PF(0) when RVU is used. */
+ uint64_t ena : 1; /**< [ 20: 20](R/W) Enable the PF. When clear, the PF is unused and hidden in the PCI config
+ space, and access to the PF's MSI-X tables in RVU PF/FUNC BAR2 is
+ disabled.
+ When set, the PF is enabled and remaining fields in this register are
+ valid.
+
+ Software should keep this bit set for PF(0) when RVU is used. Hardware
+ delivers all AF interrupts to PF(0). */
+ uint64_t nvf : 8; /**< [ 19: 12](R/W) Number of VFs in the PF. Must be less than or equal to
+ RVU_PRIV_CONST[MAX_VFS_PER_PF]. */
+ uint64_t first_hwvf : 12; /**< [ 11: 0](R/W) HWVF index of the PF's first VF. Valid when [NVF] is non-zero. The HWVF
+ index range for the PF is [FIRST_HWVF] to [FIRST_HWVF]+[NVF]-1, inclusive.
+ Different PFs must have non-overlapping HWVF ranges, and the maximum HWVF
+ index in any range must be less than RVU_PRIV_CONST[HWVFS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t first_hwvf : 12; /**< [ 11: 0](R/W) HWVF index of the PF's first VF. Valid when [NVF] is non-zero. The HWVF
+ index range for the PF is [FIRST_HWVF] to [FIRST_HWVF]+[NVF]-1, inclusive.
+ Different PFs must have non-overlapping HWVF ranges, and the maximum HWVF
+ index in any range must be less than RVU_PRIV_CONST[HWVFS]. */
+ uint64_t nvf : 8; /**< [ 19: 12](R/W) Number of VFs in the PF. Must be less than or equal to
+ RVU_PRIV_CONST[MAX_VFS_PER_PF]. */
+ uint64_t ena : 1; /**< [ 20: 20](R/W) Enable the PF. When clear, the PF is unused and hidden in the PCI config
+ space, and access to the PF's MSI-X tables in RVU PF/FUNC BAR2 is
+ disabled.
+ When set, the PF is enabled and remaining fields in this register are
+ valid.
+
+ Software should keep this bit set for PF(0) when RVU is used. Hardware
+ delivers all AF interrupts to PF(0). */
+ uint64_t af_ena : 1; /**< [ 21: 21](R/W) Admin function enable. When set, the PF is allowed to access AF
+ (RVU PF BAR0) registers in all RVU blocks. When clear, the PF is not
+ allowed to access AF registers. Must be clear when [ENA] is clear.
+
+ Software should keep this bit set for PF(0) when RVU is used. */
+ uint64_t me_flr_ena : 1; /**< [ 22: 22](R/W) Bus master enable (ME) and function level reset (FLR) enable. This bit
+ should be set when the PF is configured and associated PF and/or AF drivers
+ that manage VF and/or PF ME/FLR are loaded.
+
+ When clear, PCCPF/PCCVF_XXX_CMD[ME] state changes are ignored, and
+ PCCPF/PCCVF_XXX_E_DEV_CTL[BCR_FLR] reset the PF/VF configuration space and
+ MSI-X tables only.
+
+ When set, hardware updates to the following registers in response to ME/FLR
+ events are additionally enabled:
+ RVU_PF_VFTRPEND(), RVU_PF_VFFLR_INT(), RVU_PF_VFME_INT(),
+ RVU_AF_PFTRPEND, RVU_AF_PFFLR_INT, and RVU_AF_PFFLR_INT. */
+ uint64_t reserved_23_63 : 41;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_pfx_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_pfx_cfg bdk_rvu_priv_pfx_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_PFX_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_PFX_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x850008000100ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("RVU_PRIV_PFX_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_PFX_CFG(a) bdk_rvu_priv_pfx_cfg_t
+#define bustype_BDK_RVU_PRIV_PFX_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_PFX_CFG(a) "RVU_PRIV_PFX_CFG"
+#define device_bar_BDK_RVU_PRIV_PFX_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_PFX_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_PFX_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_cpt#_cfg
+ *
+ * RVU Privileged PF CPT Configuration Registers
+ * Similar to RVU_PRIV_PF()_NIX()_CFG, but for CPT({a}) block.
+ */
+union bdk_rvu_priv_pfx_cptx_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_pfx_cptx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_pfx_cptx_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_pfx_cptx_cfg bdk_rvu_priv_pfx_cptx_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_PFX_CPTX_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_PFX_CPTX_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=15) && (b==0)))
+ return 0x850008000350ll + 0x10000ll * ((a) & 0xf) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("RVU_PRIV_PFX_CPTX_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_PFX_CPTX_CFG(a,b) bdk_rvu_priv_pfx_cptx_cfg_t
+#define bustype_BDK_RVU_PRIV_PFX_CPTX_CFG(a,b) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_PFX_CPTX_CFG(a,b) "RVU_PRIV_PFX_CPTX_CFG"
+#define device_bar_BDK_RVU_PRIV_PFX_CPTX_CFG(a,b) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_PFX_CPTX_CFG(a,b) (a)
+#define arguments_BDK_RVU_PRIV_PFX_CPTX_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_id_cfg
+ *
+ * RVU Privileged PF ID Configuration Registers
+ */
+union bdk_rvu_priv_pfx_id_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_pfx_id_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t class_code : 24; /**< [ 39: 16](R/W) Class code to be presented in PCCPF_XXX_REV[BCC,SC,PI] and
+ PCCVF_XXX_REV[BCC,SC,PI]. Format specified by PCC_CLASS_CODE_S.
+ Resets to PCC_DEV_IDL_E::RVU's class code. */
+ uint64_t vf_devid : 8; /**< [ 15: 8](R/W) Lower bits of VF device ID to be presented in PCCPF_XXX_SRIOV_DEV[VFDEV]\<7:0\>.
+ Resets to PCC_DEV_IDL_E::RVU_VF. */
+ uint64_t pf_devid : 8; /**< [ 7: 0](R/W) Lower bits of PF device ID to be presented in PCCPF_XXX_ID[DEVID]\<7:0\>.
+ Resets to PCC_DEV_IDL_E::RVU_AF for PF(0), PCC_DEV_IDL_E::RVU for other
+ PFs. */
+#else /* Word 0 - Little Endian */
+ uint64_t pf_devid : 8; /**< [ 7: 0](R/W) Lower bits of PF device ID to be presented in PCCPF_XXX_ID[DEVID]\<7:0\>.
+ Resets to PCC_DEV_IDL_E::RVU_AF for PF(0), PCC_DEV_IDL_E::RVU for other
+ PFs. */
+ uint64_t vf_devid : 8; /**< [ 15: 8](R/W) Lower bits of VF device ID to be presented in PCCPF_XXX_SRIOV_DEV[VFDEV]\<7:0\>.
+ Resets to PCC_DEV_IDL_E::RVU_VF. */
+ uint64_t class_code : 24; /**< [ 39: 16](R/W) Class code to be presented in PCCPF_XXX_REV[BCC,SC,PI] and
+ PCCVF_XXX_REV[BCC,SC,PI]. Format specified by PCC_CLASS_CODE_S.
+ Resets to PCC_DEV_IDL_E::RVU's class code. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_pfx_id_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_pfx_id_cfg bdk_rvu_priv_pfx_id_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_PFX_ID_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_PFX_ID_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x850008000120ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("RVU_PRIV_PFX_ID_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_PFX_ID_CFG(a) bdk_rvu_priv_pfx_id_cfg_t
+#define bustype_BDK_RVU_PRIV_PFX_ID_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_PFX_ID_CFG(a) "RVU_PRIV_PFX_ID_CFG"
+#define device_bar_BDK_RVU_PRIV_PFX_ID_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_PFX_ID_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_PFX_ID_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_int_cfg
+ *
+ * RVU Privileged PF Interrupt Configuration Registers
+ */
+union bdk_rvu_priv_pfx_int_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_pfx_int_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t msix_size : 8; /**< [ 19: 12](RO) Number of interrupt vectors enumerated by RVU_PF_INT_VEC_E. */
+ uint64_t reserved_11 : 1;
+ uint64_t msix_offset : 11; /**< [ 10: 0](R/W) MSI-X offset. Offset of PF interrupt vectors enumerated by RVU_PF_INT_VEC_E
+ in the PF's MSI-X table. This is added to each enumerated value to obtain
+ the corresponding MSI-X vector index.
+ The highest enumerated value plus [MSIX_OFFSET] must be less than or equal
+ to RVU_PRIV_PF()_MSIX_CFG[PF_MSIXT_SIZEM1].
+
+ Note that the AF interrupt vectors enumerated by RVU_AF_INT_VEC_E have a
+ fixed starting offset of 0 in RVU PF(0)'s MSI-X table. Other PF
+ interrupt vectors should not be mapped at the offsets used by RVU_AF_INT_VEC_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t msix_offset : 11; /**< [ 10: 0](R/W) MSI-X offset. Offset of PF interrupt vectors enumerated by RVU_PF_INT_VEC_E
+ in the PF's MSI-X table. This is added to each enumerated value to obtain
+ the corresponding MSI-X vector index.
+ The highest enumerated value plus [MSIX_OFFSET] must be less than or equal
+ to RVU_PRIV_PF()_MSIX_CFG[PF_MSIXT_SIZEM1].
+
+ Note that the AF interrupt vectors enumerated by RVU_AF_INT_VEC_E have a
+ fixed starting offset of 0 in RVU PF(0)'s MSI-X table. Other PF
+ interrupt vectors should not be mapped at the offsets used by RVU_AF_INT_VEC_E. */
+ uint64_t reserved_11 : 1;
+ uint64_t msix_size : 8; /**< [ 19: 12](RO) Number of interrupt vectors enumerated by RVU_PF_INT_VEC_E. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_pfx_int_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_pfx_int_cfg bdk_rvu_priv_pfx_int_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_PFX_INT_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_PFX_INT_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x850008000200ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("RVU_PRIV_PFX_INT_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_PFX_INT_CFG(a) bdk_rvu_priv_pfx_int_cfg_t
+#define bustype_BDK_RVU_PRIV_PFX_INT_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_PFX_INT_CFG(a) "RVU_PRIV_PFX_INT_CFG"
+#define device_bar_BDK_RVU_PRIV_PFX_INT_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_PFX_INT_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_PFX_INT_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_msix_cfg
+ *
+ * RVU Privileged PF MSI-X Configuration Registers
+ * These registers specify MSI-X table sizes and locations for RVU PFs and
+ * associated VFs. Hardware maintains all RVU MSI-X tables in a contiguous memory
+ * region in LLC/DRAM called the MSI-X table region. The table region's base IOVA
+ * is specified by RVU_AF_MSIXTR_BASE, and its size as a multiple of
+ * 16-byte RVU_MSIX_VEC_S structures must be less than or equal to
+ * RVU_PRIV_CONST[MAX_MSIX].
+ *
+ * A PF's MSI-X table consists of the following range of RVU_MSIX_VEC_S structures
+ * in the table region:
+ * * First index: [PF_MSIXT_OFFSET].
+ * * Last index: [PF_MSIXT_OFFSET] + [PF_MSIXT_SIZEM1].
+ *
+ * If a PF has enabled VFs (associated RVU_PRIV_PF()_CFG[NVF] is nonzero),
+ * then each VF's MSI-X table consumes the following range of RVU_MSIX_VEC_S structures:
+ * * First index: [VF_MSIXT_OFFSET] + N*([VF_MSIXT_SIZEM1] + 1).
+ * * Last index: [VF_MSIXT_OFFSET] + N*([VF_MSIXT_SIZEM1] + 1) + [VF_MSIXT_SIZEM1].
+ *
+ * N=0 for the first VF, N=1 for the second VF, etc.
+ *
+ * Different PFs and VFs must have non-overlapping vector ranges, and the last
+ * index of any range must be less than RVU_PRIV_CONST[MAX_MSIX].
+ */
+union bdk_rvu_priv_pfx_msix_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_pfx_msix_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pf_msixt_offset : 20; /**< [ 63: 44](R/W) Starting offset of PF's MSI-X table in the RVU MSI-X table region.
+ Internal:
+ Also, bit offset of the PF's PBA table in RVU's internal PBA memory. */
+ uint64_t pf_msixt_sizem1 : 12; /**< [ 43: 32](R/W) PF's MSI-X table size (number of MSI-X vectors) minus one. */
+ uint64_t vf_msixt_offset : 20; /**< [ 31: 12](R/W) Starting offset of first VF's MSI-X table in the RVU MSI-X table region.
+ Valid when RVU_PRIV_PF()_CFG[NVF] is nonzero.
+
+ Internal:
+ Also, bit offset of the first VF's PBA table in RVU's internal PBA memory. */
+ uint64_t vf_msixt_sizem1 : 12; /**< [ 11: 0](R/W) Each VF's MSI-X table size (number of MSI-X vectors) minus one.
+ Valid when RVU_PRIV_PF()_CFG[NVF] is nonzero. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_msixt_sizem1 : 12; /**< [ 11: 0](R/W) Each VF's MSI-X table size (number of MSI-X vectors) minus one.
+ Valid when RVU_PRIV_PF()_CFG[NVF] is nonzero. */
+ uint64_t vf_msixt_offset : 20; /**< [ 31: 12](R/W) Starting offset of first VF's MSI-X table in the RVU MSI-X table region.
+ Valid when RVU_PRIV_PF()_CFG[NVF] is nonzero.
+
+ Internal:
+ Also, bit offset of the first VF's PBA table in RVU's internal PBA memory. */
+ uint64_t pf_msixt_sizem1 : 12; /**< [ 43: 32](R/W) PF's MSI-X table size (number of MSI-X vectors) minus one. */
+ uint64_t pf_msixt_offset : 20; /**< [ 63: 44](R/W) Starting offset of PF's MSI-X table in the RVU MSI-X table region.
+ Internal:
+ Also, bit offset of the PF's PBA table in RVU's internal PBA memory. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_pfx_msix_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_pfx_msix_cfg bdk_rvu_priv_pfx_msix_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_PFX_MSIX_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_PFX_MSIX_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x850008000110ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("RVU_PRIV_PFX_MSIX_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_PFX_MSIX_CFG(a) bdk_rvu_priv_pfx_msix_cfg_t
+#define bustype_BDK_RVU_PRIV_PFX_MSIX_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_PFX_MSIX_CFG(a) "RVU_PRIV_PFX_MSIX_CFG"
+#define device_bar_BDK_RVU_PRIV_PFX_MSIX_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_PFX_MSIX_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_PFX_MSIX_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_nix#_cfg
+ *
+ * RVU Privileged PF NIX Configuration Registers
+ * These registers are used to assist PF software discovery. For each enabled RVU
+ * PF, software writes the block's resource configuration for the PF in this
+ * register. The PF driver can read RVU_PF_BLOCK_ADDR()_DISC to discover the
+ * configuration.
+ */
+union bdk_rvu_priv_pfx_nixx_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_pfx_nixx_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t has_lf : 1; /**< [ 0: 0](R/W) Set when an LF from the block is provisioned to the VF, clear otherwise. */
+#else /* Word 0 - Little Endian */
+ uint64_t has_lf : 1; /**< [ 0: 0](R/W) Set when an LF from the block is provisioned to the VF, clear otherwise. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_pfx_nixx_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_pfx_nixx_cfg bdk_rvu_priv_pfx_nixx_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_PFX_NIXX_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_PFX_NIXX_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=15) && (b==0)))
+ return 0x850008000300ll + 0x10000ll * ((a) & 0xf) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("RVU_PRIV_PFX_NIXX_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_PFX_NIXX_CFG(a,b) bdk_rvu_priv_pfx_nixx_cfg_t
+#define bustype_BDK_RVU_PRIV_PFX_NIXX_CFG(a,b) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_PFX_NIXX_CFG(a,b) "RVU_PRIV_PFX_NIXX_CFG"
+#define device_bar_BDK_RVU_PRIV_PFX_NIXX_CFG(a,b) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_PFX_NIXX_CFG(a,b) (a)
+#define arguments_BDK_RVU_PRIV_PFX_NIXX_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_npa_cfg
+ *
+ * RVU Privileged PF NPA Configuration Registers
+ * Similar to RVU_PRIV_PF()_NIX()_CFG, but for NPA block.
+ */
+union bdk_rvu_priv_pfx_npa_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_pfx_npa_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t has_lf : 1; /**< [ 0: 0](R/W) Set when an LF from the block is provisioned to the VF, clear otherwise. */
+#else /* Word 0 - Little Endian */
+ uint64_t has_lf : 1; /**< [ 0: 0](R/W) Set when an LF from the block is provisioned to the VF, clear otherwise. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_pfx_npa_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_pfx_npa_cfg bdk_rvu_priv_pfx_npa_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_PFX_NPA_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_PFX_NPA_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x850008000310ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("RVU_PRIV_PFX_NPA_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_PFX_NPA_CFG(a) bdk_rvu_priv_pfx_npa_cfg_t
+#define bustype_BDK_RVU_PRIV_PFX_NPA_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_PFX_NPA_CFG(a) "RVU_PRIV_PFX_NPA_CFG"
+#define device_bar_BDK_RVU_PRIV_PFX_NPA_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_PFX_NPA_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_PFX_NPA_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_sso_cfg
+ *
+ * RVU Privileged PF SSO Configuration Registers
+ * Similar to RVU_PRIV_PF()_NIX()_CFG, but for SSO block.
+ */
+union bdk_rvu_priv_pfx_sso_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_pfx_sso_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_pfx_sso_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_pfx_sso_cfg bdk_rvu_priv_pfx_sso_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_PFX_SSO_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_PFX_SSO_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x850008000320ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("RVU_PRIV_PFX_SSO_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_PFX_SSO_CFG(a) bdk_rvu_priv_pfx_sso_cfg_t
+#define bustype_BDK_RVU_PRIV_PFX_SSO_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_PFX_SSO_CFG(a) "RVU_PRIV_PFX_SSO_CFG"
+#define device_bar_BDK_RVU_PRIV_PFX_SSO_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_PFX_SSO_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_PFX_SSO_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_ssow_cfg
+ *
+ * RVU Privileged PF SSO Work Slot Configuration Registers
+ * Similar to RVU_PRIV_PF()_NIX()_CFG, but for SSOW block.
+ */
+union bdk_rvu_priv_pfx_ssow_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_pfx_ssow_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_pfx_ssow_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_pfx_ssow_cfg bdk_rvu_priv_pfx_ssow_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_PFX_SSOW_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_PFX_SSOW_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x850008000330ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("RVU_PRIV_PFX_SSOW_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_PFX_SSOW_CFG(a) bdk_rvu_priv_pfx_ssow_cfg_t
+#define bustype_BDK_RVU_PRIV_PFX_SSOW_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_PFX_SSOW_CFG(a) "RVU_PRIV_PFX_SSOW_CFG"
+#define device_bar_BDK_RVU_PRIV_PFX_SSOW_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_PFX_SSOW_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_PFX_SSOW_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_PF_BAR0) rvu_priv_pf#_tim_cfg
+ *
+ * RVU Privileged PF SSO Work Slot Configuration Registers
+ * Similar to RVU_PRIV_PF()_NIX()_CFG, but for TIM block.
+ */
+union bdk_rvu_priv_pfx_tim_cfg
+{
+ uint64_t u;
+ struct bdk_rvu_priv_pfx_tim_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_9_63 : 55;
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_lfs : 9; /**< [ 8: 0](R/W) Number of LFs from the block that are provisioned to the PF/VF. When non-zero,
+ the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in the the block. */
+ uint64_t reserved_9_63 : 55;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_priv_pfx_tim_cfg_s cn; */
+};
+typedef union bdk_rvu_priv_pfx_tim_cfg bdk_rvu_priv_pfx_tim_cfg_t;
+
+static inline uint64_t BDK_RVU_PRIV_PFX_TIM_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_PRIV_PFX_TIM_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=15))
+ return 0x850008000340ll + 0x10000ll * ((a) & 0xf);
+ __bdk_csr_fatal("RVU_PRIV_PFX_TIM_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_PRIV_PFX_TIM_CFG(a) bdk_rvu_priv_pfx_tim_cfg_t
+#define bustype_BDK_RVU_PRIV_PFX_TIM_CFG(a) BDK_CSR_TYPE_RVU_PF_BAR0
+#define basename_BDK_RVU_PRIV_PFX_TIM_CFG(a) "RVU_PRIV_PFX_TIM_CFG"
+#define device_bar_BDK_RVU_PRIV_PFX_TIM_CFG(a) 0x0 /* BAR0 */
+#define busnum_BDK_RVU_PRIV_PFX_TIM_CFG(a) (a)
+#define arguments_BDK_RVU_PRIV_PFX_TIM_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_block_addr#_disc
+ *
+ * RVU VF Block Address Discovery Registers
+ * These registers allow each VF driver to discover block resources that are
+ * provisioned to its VF. The register's block address index is enumerated by
+ * RVU_BLOCK_ADDR_E.
+ */
+union bdk_rvu_vf_block_addrx_disc
+{
+ uint64_t u;
+ struct bdk_rvu_vf_block_addrx_disc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_28_63 : 36;
+ uint64_t btype : 8; /**< [ 27: 20](RO/H) Block type enumerated by RVU_BLOCK_TYPE_E. */
+ uint64_t rid : 8; /**< [ 19: 12](RO/H) Revision ID of the block from RVU_PRIV_BLOCK_TYPE()_REV[RID]. */
+ uint64_t imp : 1; /**< [ 11: 11](RO/H) Implemented. When set, a block is present at this block address index as
+ enumerated by RVU_BLOCK_ADDR_E. When clear, a block is not present and the
+ remaining fields in the register are RAZ.
+
+ Internal:
+ Returns zero if the block is implemented but fused out. */
+ uint64_t reserved_9_10 : 2;
+ uint64_t num_lfs : 9; /**< [ 8: 0](RO/H) Number of local functions from the block that are provisioned to the VF/PF.
+ When non-zero, the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in
+ the the block.
+ Returns 0 for block types that do not have local functions, 0 or 1 for
+ single-slot blocks; see RVU_BLOCK_TYPE_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t num_lfs : 9; /**< [ 8: 0](RO/H) Number of local functions from the block that are provisioned to the VF/PF.
+ When non-zero, the provisioned LFs are mapped to slots 0 to [NUM_LFS]-1 in
+ the the block.
+ Returns 0 for block types that do not have local functions, 0 or 1 for
+ single-slot blocks; see RVU_BLOCK_TYPE_E. */
+ uint64_t reserved_9_10 : 2;
+ uint64_t imp : 1; /**< [ 11: 11](RO/H) Implemented. When set, a block is present at this block address index as
+ enumerated by RVU_BLOCK_ADDR_E. When clear, a block is not present and the
+ remaining fields in the register are RAZ.
+
+ Internal:
+ Returns zero if the block is implemented but fused out. */
+ uint64_t rid : 8; /**< [ 19: 12](RO/H) Revision ID of the block from RVU_PRIV_BLOCK_TYPE()_REV[RID]. */
+ uint64_t btype : 8; /**< [ 27: 20](RO/H) Block type enumerated by RVU_BLOCK_TYPE_E. */
+ uint64_t reserved_28_63 : 36;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_vf_block_addrx_disc_s cn; */
+};
+typedef union bdk_rvu_vf_block_addrx_disc bdk_rvu_vf_block_addrx_disc_t;
+
+static inline uint64_t BDK_RVU_VF_BLOCK_ADDRX_DISC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_VF_BLOCK_ADDRX_DISC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=31))
+ return 0x850200000200ll + 8ll * ((a) & 0x1f);
+ __bdk_csr_fatal("RVU_VF_BLOCK_ADDRX_DISC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_VF_BLOCK_ADDRX_DISC(a) bdk_rvu_vf_block_addrx_disc_t
+#define bustype_BDK_RVU_VF_BLOCK_ADDRX_DISC(a) BDK_CSR_TYPE_RVU_VF_BAR2
+#define basename_BDK_RVU_VF_BLOCK_ADDRX_DISC(a) "RVU_VF_BLOCK_ADDRX_DISC"
+#define device_bar_BDK_RVU_VF_BLOCK_ADDRX_DISC(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_VF_BLOCK_ADDRX_DISC(a) (a)
+#define arguments_BDK_RVU_VF_BLOCK_ADDRX_DISC(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_int
+ *
+ * RVU VF Interrupt Registers
+ */
+union bdk_rvu_vf_int
+{
+ uint64_t u;
+ struct bdk_rvu_vf_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1C/H) PF to VF mailbox interrupt. Set when RVU_PF_VF()_PFVF_MBOX(0) is written. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1C/H) PF to VF mailbox interrupt. Set when RVU_PF_VF()_PFVF_MBOX(0) is written. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_vf_int_s cn; */
+};
+typedef union bdk_rvu_vf_int bdk_rvu_vf_int_t;
+
+#define BDK_RVU_VF_INT BDK_RVU_VF_INT_FUNC()
+static inline uint64_t BDK_RVU_VF_INT_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_VF_INT_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850200000020ll;
+ __bdk_csr_fatal("RVU_VF_INT", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_VF_INT bdk_rvu_vf_int_t
+#define bustype_BDK_RVU_VF_INT BDK_CSR_TYPE_RVU_VF_BAR2
+#define basename_BDK_RVU_VF_INT "RVU_VF_INT"
+#define device_bar_BDK_RVU_VF_INT 0x2 /* BAR2 */
+#define busnum_BDK_RVU_VF_INT 0
+#define arguments_BDK_RVU_VF_INT -1,-1,-1,-1
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_int_ena_w1c
+ *
+ * RVU VF Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_rvu_vf_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_rvu_vf_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for RVU_VF_INT[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for RVU_VF_INT[MBOX]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_vf_int_ena_w1c_s cn; */
+};
+typedef union bdk_rvu_vf_int_ena_w1c bdk_rvu_vf_int_ena_w1c_t;
+
+#define BDK_RVU_VF_INT_ENA_W1C BDK_RVU_VF_INT_ENA_W1C_FUNC()
+static inline uint64_t BDK_RVU_VF_INT_ENA_W1C_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_VF_INT_ENA_W1C_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850200000038ll;
+ __bdk_csr_fatal("RVU_VF_INT_ENA_W1C", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_VF_INT_ENA_W1C bdk_rvu_vf_int_ena_w1c_t
+#define bustype_BDK_RVU_VF_INT_ENA_W1C BDK_CSR_TYPE_RVU_VF_BAR2
+#define basename_BDK_RVU_VF_INT_ENA_W1C "RVU_VF_INT_ENA_W1C"
+#define device_bar_BDK_RVU_VF_INT_ENA_W1C 0x2 /* BAR2 */
+#define busnum_BDK_RVU_VF_INT_ENA_W1C 0
+#define arguments_BDK_RVU_VF_INT_ENA_W1C -1,-1,-1,-1
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_int_ena_w1s
+ *
+ * RVU VF Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_rvu_vf_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_vf_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for RVU_VF_INT[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for RVU_VF_INT[MBOX]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_vf_int_ena_w1s_s cn; */
+};
+typedef union bdk_rvu_vf_int_ena_w1s bdk_rvu_vf_int_ena_w1s_t;
+
+#define BDK_RVU_VF_INT_ENA_W1S BDK_RVU_VF_INT_ENA_W1S_FUNC()
+static inline uint64_t BDK_RVU_VF_INT_ENA_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_VF_INT_ENA_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850200000030ll;
+ __bdk_csr_fatal("RVU_VF_INT_ENA_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_VF_INT_ENA_W1S bdk_rvu_vf_int_ena_w1s_t
+#define bustype_BDK_RVU_VF_INT_ENA_W1S BDK_CSR_TYPE_RVU_VF_BAR2
+#define basename_BDK_RVU_VF_INT_ENA_W1S "RVU_VF_INT_ENA_W1S"
+#define device_bar_BDK_RVU_VF_INT_ENA_W1S 0x2 /* BAR2 */
+#define busnum_BDK_RVU_VF_INT_ENA_W1S 0
+#define arguments_BDK_RVU_VF_INT_ENA_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_int_w1s
+ *
+ * RVU VF Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_rvu_vf_int_w1s
+{
+ uint64_t u;
+ struct bdk_rvu_vf_int_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1S/H) Reads or sets RVU_VF_INT[MBOX]. */
+#else /* Word 0 - Little Endian */
+ uint64_t mbox : 1; /**< [ 0: 0](R/W1S/H) Reads or sets RVU_VF_INT[MBOX]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_vf_int_w1s_s cn; */
+};
+typedef union bdk_rvu_vf_int_w1s bdk_rvu_vf_int_w1s_t;
+
+#define BDK_RVU_VF_INT_W1S BDK_RVU_VF_INT_W1S_FUNC()
+static inline uint64_t BDK_RVU_VF_INT_W1S_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_VF_INT_W1S_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x850200000028ll;
+ __bdk_csr_fatal("RVU_VF_INT_W1S", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_VF_INT_W1S bdk_rvu_vf_int_w1s_t
+#define bustype_BDK_RVU_VF_INT_W1S BDK_CSR_TYPE_RVU_VF_BAR2
+#define basename_BDK_RVU_VF_INT_W1S "RVU_VF_INT_W1S"
+#define device_bar_BDK_RVU_VF_INT_W1S 0x2 /* BAR2 */
+#define busnum_BDK_RVU_VF_INT_W1S 0
+#define arguments_BDK_RVU_VF_INT_W1S -1,-1,-1,-1
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_msix_pba#
+ *
+ * RVU VF MSI-X Pending-Bit-Array Registers
+ * This register is the MSI-X VF PBA table.
+ */
+union bdk_rvu_vf_msix_pbax
+{
+ uint64_t u;
+ struct bdk_rvu_vf_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message bit for each MSI-X vector, i.e. one bit per
+ RVU_VF_MSIX_VEC()_CTL register.
+ The total number of bits for a given VF (and thus the number of PBA
+ registers) is determined by RVU_PRIV_PF()_MSIX_CFG[VF_MSIXT_SIZEM1]
+ (plus 1). */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message bit for each MSI-X vector, i.e. one bit per
+ RVU_VF_MSIX_VEC()_CTL register.
+ The total number of bits for a given VF (and thus the number of PBA
+ registers) is determined by RVU_PRIV_PF()_MSIX_CFG[VF_MSIXT_SIZEM1]
+ (plus 1). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_vf_msix_pbax_s cn; */
+};
+typedef union bdk_rvu_vf_msix_pbax bdk_rvu_vf_msix_pbax_t;
+
+static inline uint64_t BDK_RVU_VF_MSIX_PBAX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_VF_MSIX_PBAX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a==0))
+ return 0x8502002f0000ll + 8ll * ((a) & 0x0);
+ __bdk_csr_fatal("RVU_VF_MSIX_PBAX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_VF_MSIX_PBAX(a) bdk_rvu_vf_msix_pbax_t
+#define bustype_BDK_RVU_VF_MSIX_PBAX(a) BDK_CSR_TYPE_RVU_VF_BAR2
+#define basename_BDK_RVU_VF_MSIX_PBAX(a) "RVU_VF_MSIX_PBAX"
+#define device_bar_BDK_RVU_VF_MSIX_PBAX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_VF_MSIX_PBAX(a) (a)
+#define arguments_BDK_RVU_VF_MSIX_PBAX(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_msix_vec#_addr
+ *
+ * RVU VF MSI-X Vector-Table Address Registers
+ * These registers and RVU_VF_MSIX_VEC()_CTL form the VF MSI-X vector table.
+ * The number of MSI-X vectors for a given VF is specified by
+ * RVU_PRIV_PF()_MSIX_CFG[VF_MSIXT_SIZEM1] (plus 1).
+ *
+ * Internal:
+ * VF vector count of 128 allows up to that number to be provisioned to the VF
+ * from LF resources of various blocks.
+ */
+union bdk_rvu_vf_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_rvu_vf_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](RAZ) Secure vector. Zero as not supported for RVU vectors. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](RAZ) Secure vector. Zero as not supported for RVU vectors. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_vf_msix_vecx_addr_s cn; */
+};
+typedef union bdk_rvu_vf_msix_vecx_addr bdk_rvu_vf_msix_vecx_addr_t;
+
+static inline uint64_t BDK_RVU_VF_MSIX_VECX_ADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_VF_MSIX_VECX_ADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a==0))
+ return 0x850200200000ll + 0x10ll * ((a) & 0x0);
+ __bdk_csr_fatal("RVU_VF_MSIX_VECX_ADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_VF_MSIX_VECX_ADDR(a) bdk_rvu_vf_msix_vecx_addr_t
+#define bustype_BDK_RVU_VF_MSIX_VECX_ADDR(a) BDK_CSR_TYPE_RVU_VF_BAR2
+#define basename_BDK_RVU_VF_MSIX_VECX_ADDR(a) "RVU_VF_MSIX_VECX_ADDR"
+#define device_bar_BDK_RVU_VF_MSIX_VECX_ADDR(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_VF_MSIX_VECX_ADDR(a) (a)
+#define arguments_BDK_RVU_VF_MSIX_VECX_ADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_msix_vec#_ctl
+ *
+ * RVU VF MSI-X Vector-Table Control and Data Registers
+ * These registers and RVU_VF_MSIX_VEC()_ADDR form the VF MSI-X vector table.
+ */
+union bdk_rvu_vf_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_rvu_vf_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_vf_msix_vecx_ctl_s cn; */
+};
+typedef union bdk_rvu_vf_msix_vecx_ctl bdk_rvu_vf_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_RVU_VF_MSIX_VECX_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_VF_MSIX_VECX_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a==0))
+ return 0x850200200008ll + 0x10ll * ((a) & 0x0);
+ __bdk_csr_fatal("RVU_VF_MSIX_VECX_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_VF_MSIX_VECX_CTL(a) bdk_rvu_vf_msix_vecx_ctl_t
+#define bustype_BDK_RVU_VF_MSIX_VECX_CTL(a) BDK_CSR_TYPE_RVU_VF_BAR2
+#define basename_BDK_RVU_VF_MSIX_VECX_CTL(a) "RVU_VF_MSIX_VECX_CTL"
+#define device_bar_BDK_RVU_VF_MSIX_VECX_CTL(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_VF_MSIX_VECX_CTL(a) (a)
+#define arguments_BDK_RVU_VF_MSIX_VECX_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (RVU_VF_BAR2) rvu_vf_vfpf_mbox#
+ *
+ * RVU VF/PF Mailbox Registers
+ */
+union bdk_rvu_vf_vfpf_mboxx
+{
+ uint64_t u;
+ struct bdk_rvu_vf_vfpf_mboxx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Mailbox data. These VF registers access the 16-byte-per-VF VF/PF mailbox
+ RAM. The PF may access the same storage using RVU_PF_VF()_PFVF_MBOX().
+ MBOX(0) is typically used for PF to VF signaling, MBOX(1) for VF to PF.
+ Writing RVU_VF_VFPF_MBOX(1) (but not RVU_PF_VF()_PFVF_MBOX(1))
+ will set the corresponding RVU_PF_VFPF_MBOX_INT() bit, which if appropriately
+ enabled will send an interrupt to the PF. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W/H) Mailbox data. These VF registers access the 16-byte-per-VF VF/PF mailbox
+ RAM. The PF may access the same storage using RVU_PF_VF()_PFVF_MBOX().
+ MBOX(0) is typically used for PF to VF signaling, MBOX(1) for VF to PF.
+ Writing RVU_VF_VFPF_MBOX(1) (but not RVU_PF_VF()_PFVF_MBOX(1))
+ will set the corresponding RVU_PF_VFPF_MBOX_INT() bit, which if appropriately
+ enabled will send an interrupt to the PF. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_rvu_vf_vfpf_mboxx_s cn; */
+};
+typedef union bdk_rvu_vf_vfpf_mboxx bdk_rvu_vf_vfpf_mboxx_t;
+
+static inline uint64_t BDK_RVU_VF_VFPF_MBOXX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_RVU_VF_VFPF_MBOXX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x850200000000ll + 8ll * ((a) & 0x1);
+ __bdk_csr_fatal("RVU_VF_VFPF_MBOXX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_RVU_VF_VFPF_MBOXX(a) bdk_rvu_vf_vfpf_mboxx_t
+#define bustype_BDK_RVU_VF_VFPF_MBOXX(a) BDK_CSR_TYPE_RVU_VF_BAR2
+#define basename_BDK_RVU_VF_VFPF_MBOXX(a) "RVU_VF_VFPF_MBOXX"
+#define device_bar_BDK_RVU_VF_VFPF_MBOXX(a) 0x2 /* BAR2 */
+#define busnum_BDK_RVU_VF_VFPF_MBOXX(a) (a)
+#define arguments_BDK_RVU_VF_VFPF_MBOXX(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_RVU_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sata.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sata.h
new file mode 100644
index 0000000000..262cf0c5b3
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sata.h
@@ -0,0 +1,4896 @@
+#ifndef __BDK_CSRS_SATA_H__
+#define __BDK_CSRS_SATA_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium SATA.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration sata_bar_e
+ *
+ * SATA Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_SATA_BAR_E_SATAX_PF_BAR0(a) (0x810000000000ll + 0x1000000000ll * (a))
+#define BDK_SATA_BAR_E_SATAX_PF_BAR0_SIZE 0x200000ull
+#define BDK_SATA_BAR_E_SATAX_PF_BAR2(a) (0x810000200000ll + 0x1000000000ll * (a))
+#define BDK_SATA_BAR_E_SATAX_PF_BAR2_SIZE 0x100000ull
+#define BDK_SATA_BAR_E_SATAX_PF_BAR4_CN8(a) (0x810000200000ll + 0x1000000000ll * (a))
+#define BDK_SATA_BAR_E_SATAX_PF_BAR4_CN8_SIZE 0x100000ull
+#define BDK_SATA_BAR_E_SATAX_PF_BAR4_CN9(a) (0x810000000000ll + 0x1000000000ll * (a))
+#define BDK_SATA_BAR_E_SATAX_PF_BAR4_CN9_SIZE 0x200000ull
+
+/**
+ * Enumeration sata_int_vec_e
+ *
+ * SATA MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_SATA_INT_VEC_E_UAHC_INTRQ_IP (0)
+#define BDK_SATA_INT_VEC_E_UAHC_INTRQ_IP_CLEAR (1)
+#define BDK_SATA_INT_VEC_E_UAHC_PME_REQ_IP (2)
+#define BDK_SATA_INT_VEC_E_UAHC_PME_REQ_IP_CLEAR (3)
+#define BDK_SATA_INT_VEC_E_UCTL_INTSTAT_CN88XXP1 (4)
+#define BDK_SATA_INT_VEC_E_UCTL_INTSTAT_CN9 (1)
+#define BDK_SATA_INT_VEC_E_UCTL_INTSTAT_CN81XX (1)
+#define BDK_SATA_INT_VEC_E_UCTL_INTSTAT_CN83XX (1)
+#define BDK_SATA_INT_VEC_E_UCTL_INTSTAT_CN88XXP2 (1)
+#define BDK_SATA_INT_VEC_E_UCTL_RAS (4)
+
+/**
+ * Enumeration sata_uctl_dma_read_cmd_e
+ *
+ * SATA UCTL DMA Read Command Enumeration
+ * Enumerates NCB inbound command selections for DMA read operations.
+ */
+#define BDK_SATA_UCTL_DMA_READ_CMD_E_LDI (0)
+#define BDK_SATA_UCTL_DMA_READ_CMD_E_LDT (1)
+#define BDK_SATA_UCTL_DMA_READ_CMD_E_LDY (2)
+
+/**
+ * Enumeration sata_uctl_dma_write_cmd_e
+ *
+ * SATA UCTL DMA Write Command Enumeration
+ * Enumerate NCB inbound command selections for DMA writes.
+ */
+#define BDK_SATA_UCTL_DMA_WRITE_CMD_E_RSTP (1)
+#define BDK_SATA_UCTL_DMA_WRITE_CMD_E_STP (0)
+
+/**
+ * Enumeration sata_uctl_ecc_err_source_e
+ *
+ * SATA UCTL ECC Error Source Enumeration
+ * Enumerate sources of ECC error log information.
+ */
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_FB_DBE (0xf)
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_FB_SBE (7)
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_NONE (0)
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_RX_DBE (0xd)
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_RX_SBE (5)
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_TX_DBE (0xe)
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_TX_SBE (6)
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_XM_R_DBE (0xa)
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_XM_R_SBE (2)
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_XM_W_DBE (9)
+#define BDK_SATA_UCTL_ECC_ERR_SOURCE_E_XM_W_SBE (1)
+
+/**
+ * Enumeration sata_uctl_xm_bad_dma_type_e
+ *
+ * SATA UCTL XM Bad DMA Type Enumeration
+ * Enumerates the type of DMA error seen.
+ */
+#define BDK_SATA_UCTL_XM_BAD_DMA_TYPE_E_ADDR_OOB (1)
+#define BDK_SATA_UCTL_XM_BAD_DMA_TYPE_E_LEN_GT_8 (2)
+#define BDK_SATA_UCTL_XM_BAD_DMA_TYPE_E_MULTIBEAT_BYTE (3)
+#define BDK_SATA_UCTL_XM_BAD_DMA_TYPE_E_MULTIBEAT_HALFWORD (4)
+#define BDK_SATA_UCTL_XM_BAD_DMA_TYPE_E_MULTIBEAT_QWORD (6)
+#define BDK_SATA_UCTL_XM_BAD_DMA_TYPE_E_MULTIBEAT_WORD (5)
+#define BDK_SATA_UCTL_XM_BAD_DMA_TYPE_E_NONE (0)
+
+/**
+ * Register (NCB) sata#_msix_pba#
+ *
+ * SATA MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table, the bit number is indexed by the SATA_INT_VEC_E enumeration.
+ */
+union bdk_satax_msix_pbax
+{
+ uint64_t u;
+ struct bdk_satax_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated SATA()_MSIX_VEC()_CTL, enumerated by SATA_INT_VEC_E.
+ Bits that have no associated SATA_INT_VEC_E are zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated SATA()_MSIX_VEC()_CTL, enumerated by SATA_INT_VEC_E.
+ Bits that have no associated SATA_INT_VEC_E are zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_msix_pbax_s cn; */
+};
+typedef union bdk_satax_msix_pbax bdk_satax_msix_pbax_t;
+
+static inline uint64_t BDK_SATAX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x8100002f0000ll + 0x1000000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=5) && (b==0)))
+ return 0x8100002f0000ll + 0x1000000000ll * ((a) & 0x7) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=15) && (b==0)))
+ return 0x8100002f0000ll + 0x1000000000ll * ((a) & 0xf) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b==0)))
+ return 0x8100002f0000ll + 0x1000000000ll * ((a) & 0x3) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("SATAX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SATAX_MSIX_PBAX(a,b) bdk_satax_msix_pbax_t
+#define bustype_BDK_SATAX_MSIX_PBAX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_MSIX_PBAX(a,b) "SATAX_MSIX_PBAX"
+#define device_bar_BDK_SATAX_MSIX_PBAX(a,b) 0x2 /* PF_BAR2 */
+#define busnum_BDK_SATAX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_SATAX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sata#_msix_vec#_addr
+ *
+ * SATA MSI-X Vector Table Address Registers
+ * This register is the MSI-X vector table, indexed by the SATA_INT_VEC_E enumeration.
+ */
+union bdk_satax_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_satax_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SATA()_MSIX_VEC()_ADDR, SATA()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of SATA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SATA(0..15)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SATA()_MSIX_VEC()_ADDR, SATA()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of SATA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SATA(0..15)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_satax_msix_vecx_addr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SATA()_MSIX_VEC()_ADDR, SATA()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of SATA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SATA()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SATA()_MSIX_VEC()_ADDR, SATA()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of SATA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SATA()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_satax_msix_vecx_addr_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SATA()_MSIX_VEC()_ADDR, SATA()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of SATA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SATA(0..1)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SATA()_MSIX_VEC()_ADDR, SATA()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of SATA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SATA(0..1)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_satax_msix_vecx_addr_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SATA()_MSIX_VEC()_ADDR, SATA()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of SATA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SATA(0..15)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SATA()_MSIX_VEC()_ADDR, SATA()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of SATA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SATA(0..15)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_satax_msix_vecx_addr_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SATA()_MSIX_VEC()_ADDR, SATA()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of SATA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SATA(0..5)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SATA()_MSIX_VEC()_ADDR, SATA()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of SATA()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SATA(0..5)_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_satax_msix_vecx_addr bdk_satax_msix_vecx_addr_t;
+
+static inline uint64_t BDK_SATAX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x810000200000ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=5) && (b<=3)))
+ return 0x810000200000ll + 0x1000000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && ((a<=15) && (b<=4)))
+ return 0x810000200000ll + 0x1000000000ll * ((a) & 0xf) + 0x10ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && ((a<=15) && (b<=3)))
+ return 0x810000200000ll + 0x1000000000ll * ((a) & 0xf) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=4)))
+ return 0x810000200000ll + 0x1000000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x7);
+ __bdk_csr_fatal("SATAX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SATAX_MSIX_VECX_ADDR(a,b) bdk_satax_msix_vecx_addr_t
+#define bustype_BDK_SATAX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_MSIX_VECX_ADDR(a,b) "SATAX_MSIX_VECX_ADDR"
+#define device_bar_BDK_SATAX_MSIX_VECX_ADDR(a,b) 0x2 /* PF_BAR2 */
+#define busnum_BDK_SATAX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_SATAX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sata#_msix_vec#_ctl
+ *
+ * SATA MSI-X Vector Table Control and Data Registers
+ * This register is the MSI-X vector table, indexed by the SATA_INT_VEC_E enumeration.
+ */
+union bdk_satax_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_satax_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_satax_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_satax_msix_vecx_ctl_s cn9; */
+};
+typedef union bdk_satax_msix_vecx_ctl bdk_satax_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_SATAX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x810000200008ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=5) && (b<=3)))
+ return 0x810000200008ll + 0x1000000000ll * ((a) & 0x7) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X) && ((a<=15) && (b<=4)))
+ return 0x810000200008ll + 0x1000000000ll * ((a) & 0xf) + 0x10ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && ((a<=15) && (b<=3)))
+ return 0x810000200008ll + 0x1000000000ll * ((a) & 0xf) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=3) && (b<=4)))
+ return 0x810000200008ll + 0x1000000000ll * ((a) & 0x3) + 0x10ll * ((b) & 0x7);
+ __bdk_csr_fatal("SATAX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SATAX_MSIX_VECX_CTL(a,b) bdk_satax_msix_vecx_ctl_t
+#define bustype_BDK_SATAX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_MSIX_VECX_CTL(a,b) "SATAX_MSIX_VECX_CTL"
+#define device_bar_BDK_SATAX_MSIX_VECX_CTL(a,b) 0x2 /* PF_BAR2 */
+#define busnum_BDK_SATAX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_SATAX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_bistafr
+ *
+ * SATA UAHC BIST Activate FIS Register
+ * This register is shared between SATA ports. Before accessing this
+ * register, first select the required port by writing the port number
+ * to the SATA()_UAHC_GBL_TESTR[PSEL] field.
+ *
+ * This register contains the pattern definition (bits 23:16 of the
+ * first DWORD) and the data pattern (bits 7:0 of the second DWORD)
+ * fields of the received BIST activate FIS.
+ *
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_bistafr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_bistafr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Bits 7:0 of the second DWORD of BIST activate FIS.
+ 0xF1 = Low transition density pattern (LTDP).
+ 0xB5 = High transition density pattern (HTDP).
+ 0xAB = Low frequency spectral component pattern (LFSCP).
+ 0x7F = Simultaneous switching outputs pattern (SSOP).
+ 0x78 = Mid frequency test pattern (MFTP).
+ 0x4A = High frequency test pattern (HFTP).
+ 0x7E = Low frequency test pattern (LFTP).
+ else = Lone bit pattern (LBP). */
+ uint32_t pd : 8; /**< [ 7: 0](RO) Bits 23:16 of the first DWORD of the BIST activate FIS. Only the following values are
+ supported:
+ 0x10 = Far-end retimed.
+ 0xC0 = Far-end transmit only.
+ 0xE0 = Far-end transmit only with scrambler bypassed. */
+#else /* Word 0 - Little Endian */
+ uint32_t pd : 8; /**< [ 7: 0](RO) Bits 23:16 of the first DWORD of the BIST activate FIS. Only the following values are
+ supported:
+ 0x10 = Far-end retimed.
+ 0xC0 = Far-end transmit only.
+ 0xE0 = Far-end transmit only with scrambler bypassed. */
+ uint32_t ncp : 8; /**< [ 15: 8](RO) Bits 7:0 of the second DWORD of BIST activate FIS.
+ 0xF1 = Low transition density pattern (LTDP).
+ 0xB5 = High transition density pattern (HTDP).
+ 0xAB = Low frequency spectral component pattern (LFSCP).
+ 0x7F = Simultaneous switching outputs pattern (SSOP).
+ 0x78 = Mid frequency test pattern (MFTP).
+ 0x4A = High frequency test pattern (HFTP).
+ 0x7E = Low frequency test pattern (LFTP).
+ else = Lone bit pattern (LBP). */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_bistafr_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_bistafr bdk_satax_uahc_gbl_bistafr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_BISTAFR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_BISTAFR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000a0ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000a0ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000a0ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000a0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_BISTAFR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_BISTAFR(a) bdk_satax_uahc_gbl_bistafr_t
+#define bustype_BDK_SATAX_UAHC_GBL_BISTAFR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_BISTAFR(a) "SATAX_UAHC_GBL_BISTAFR"
+#define device_bar_BDK_SATAX_UAHC_GBL_BISTAFR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_BISTAFR(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_BISTAFR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_bistcr
+ *
+ * SATA UAHC BIST Control Register
+ * This register is shared between SATA ports. Before accessing this
+ * register, first select the required port by writing the port number
+ * to the SATA()_UAHC_GBL_TESTR[PSEL] field.
+ *
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_bistcr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_bistcr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t old_phy_ready : 1; /**< [ 25: 25](R/W) Old phy_ready. Do not change the value of this bit. */
+ uint32_t late_phy_ready : 1; /**< [ 24: 24](R/W) Late phy_ready. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t ferlib : 1; /**< [ 20: 20](WO) Far-end retimed loopback. */
+ uint32_t reserved_19 : 1;
+ uint32_t txo : 1; /**< [ 18: 18](WO) Transmit only. */
+ uint32_t cntclr : 1; /**< [ 17: 17](WO) Counter clear. */
+ uint32_t nealb : 1; /**< [ 16: 16](WO) Near-end analog loopback. */
+ uint32_t llb : 1; /**< [ 15: 15](R/W) Lab loopback mode. */
+ uint32_t reserved_14 : 1;
+ uint32_t errlossen : 1; /**< [ 13: 13](R/W) Error loss detect enable. */
+ uint32_t sdfe : 1; /**< [ 12: 12](R/W) Signal detect feature enable. */
+ uint32_t rsvd_1rsvd_11 : 1; /**< [ 11: 11](R/W) Reserved. */
+ uint32_t llc : 3; /**< [ 10: 8](R/W) Link layer control.
+ \<10\> = RPD - repeat primitive drop enable.
+ \<9\> = DESCRAM - descrambler enable.
+ \<8\> = SCRAM - scrambler enable. */
+ uint32_t reserved_7 : 1;
+ uint32_t erren : 1; /**< [ 6: 6](R/W) Error enable. */
+ uint32_t flip : 1; /**< [ 5: 5](R/W) Flip disparity. */
+ uint32_t pv : 1; /**< [ 4: 4](R/W) Pattern version. */
+ uint32_t pattern : 4; /**< [ 3: 0](RO) SATA compliant pattern selection. */
+#else /* Word 0 - Little Endian */
+ uint32_t pattern : 4; /**< [ 3: 0](RO) SATA compliant pattern selection. */
+ uint32_t pv : 1; /**< [ 4: 4](R/W) Pattern version. */
+ uint32_t flip : 1; /**< [ 5: 5](R/W) Flip disparity. */
+ uint32_t erren : 1; /**< [ 6: 6](R/W) Error enable. */
+ uint32_t reserved_7 : 1;
+ uint32_t llc : 3; /**< [ 10: 8](R/W) Link layer control.
+ \<10\> = RPD - repeat primitive drop enable.
+ \<9\> = DESCRAM - descrambler enable.
+ \<8\> = SCRAM - scrambler enable. */
+ uint32_t rsvd_1rsvd_11 : 1; /**< [ 11: 11](R/W) Reserved. */
+ uint32_t sdfe : 1; /**< [ 12: 12](R/W) Signal detect feature enable. */
+ uint32_t errlossen : 1; /**< [ 13: 13](R/W) Error loss detect enable. */
+ uint32_t reserved_14 : 1;
+ uint32_t llb : 1; /**< [ 15: 15](R/W) Lab loopback mode. */
+ uint32_t nealb : 1; /**< [ 16: 16](WO) Near-end analog loopback. */
+ uint32_t cntclr : 1; /**< [ 17: 17](WO) Counter clear. */
+ uint32_t txo : 1; /**< [ 18: 18](WO) Transmit only. */
+ uint32_t reserved_19 : 1;
+ uint32_t ferlib : 1; /**< [ 20: 20](WO) Far-end retimed loopback. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t late_phy_ready : 1; /**< [ 24: 24](R/W) Late phy_ready. */
+ uint32_t old_phy_ready : 1; /**< [ 25: 25](R/W) Old phy_ready. Do not change the value of this bit. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_bistcr_s cn8; */
+ struct bdk_satax_uahc_gbl_bistcr_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_26_31 : 6;
+ uint32_t old_phy_ready : 1; /**< [ 25: 25](R/W) Old phy_ready. Do not change the value of this bit. */
+ uint32_t late_phy_ready : 1; /**< [ 24: 24](R/W) Late phy_ready. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t ferlib : 1; /**< [ 20: 20](WO) Far-end retimed loopback. */
+ uint32_t reserved_19 : 1;
+ uint32_t txo : 1; /**< [ 18: 18](WO) Transmit only. */
+ uint32_t cntclr : 1; /**< [ 17: 17](WO) Counter clear. */
+ uint32_t nealb : 1; /**< [ 16: 16](WO) Near-end analog loopback. */
+ uint32_t llb : 1; /**< [ 15: 15](R/W) Lab loopback mode. */
+ uint32_t reserved_14 : 1;
+ uint32_t errlossen : 1; /**< [ 13: 13](R/W) Error loss detect enable. */
+ uint32_t sdfe : 1; /**< [ 12: 12](R/W) Signal detect feature enable. */
+ uint32_t rsvd_1rsvd_11 : 1; /**< [ 11: 11](R/W) Reserved. */
+ uint32_t llc : 3; /**< [ 10: 8](R/W) Link layer control.
+ \<10\> = RPD - repeat primitive drop enable.
+ \<9\> = DESCRAM - descrambler enable.
+ \<8\> = SCRAM - scrambler enable. */
+ uint32_t reserved_7 : 1;
+ uint32_t erren : 1; /**< [ 6: 6](R/W) Error enable. */
+ uint32_t flip : 1; /**< [ 5: 5](R/W) Flip disparity. */
+ uint32_t pv : 1; /**< [ 4: 4](R/W) Pattern version. */
+ uint32_t pattern : 4; /**< [ 3: 0](R/W) SATA compliant pattern selection. */
+#else /* Word 0 - Little Endian */
+ uint32_t pattern : 4; /**< [ 3: 0](R/W) SATA compliant pattern selection. */
+ uint32_t pv : 1; /**< [ 4: 4](R/W) Pattern version. */
+ uint32_t flip : 1; /**< [ 5: 5](R/W) Flip disparity. */
+ uint32_t erren : 1; /**< [ 6: 6](R/W) Error enable. */
+ uint32_t reserved_7 : 1;
+ uint32_t llc : 3; /**< [ 10: 8](R/W) Link layer control.
+ \<10\> = RPD - repeat primitive drop enable.
+ \<9\> = DESCRAM - descrambler enable.
+ \<8\> = SCRAM - scrambler enable. */
+ uint32_t rsvd_1rsvd_11 : 1; /**< [ 11: 11](R/W) Reserved. */
+ uint32_t sdfe : 1; /**< [ 12: 12](R/W) Signal detect feature enable. */
+ uint32_t errlossen : 1; /**< [ 13: 13](R/W) Error loss detect enable. */
+ uint32_t reserved_14 : 1;
+ uint32_t llb : 1; /**< [ 15: 15](R/W) Lab loopback mode. */
+ uint32_t nealb : 1; /**< [ 16: 16](WO) Near-end analog loopback. */
+ uint32_t cntclr : 1; /**< [ 17: 17](WO) Counter clear. */
+ uint32_t txo : 1; /**< [ 18: 18](WO) Transmit only. */
+ uint32_t reserved_19 : 1;
+ uint32_t ferlib : 1; /**< [ 20: 20](WO) Far-end retimed loopback. */
+ uint32_t reserved_21_23 : 3;
+ uint32_t late_phy_ready : 1; /**< [ 24: 24](R/W) Late phy_ready. */
+ uint32_t old_phy_ready : 1; /**< [ 25: 25](R/W) Old phy_ready. Do not change the value of this bit. */
+ uint32_t reserved_26_31 : 6;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_satax_uahc_gbl_bistcr bdk_satax_uahc_gbl_bistcr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_BISTCR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_BISTCR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000a4ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000a4ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000a4ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000a4ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_BISTCR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_BISTCR(a) bdk_satax_uahc_gbl_bistcr_t
+#define bustype_BDK_SATAX_UAHC_GBL_BISTCR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_BISTCR(a) "SATAX_UAHC_GBL_BISTCR"
+#define device_bar_BDK_SATAX_UAHC_GBL_BISTCR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_BISTCR(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_BISTCR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_bistdecr
+ *
+ * SATA UAHC BIST DWORD Error Count Register
+ * This register is shared between SATA ports. Before accessing this
+ * register, first select the required port by writing the port number
+ * to the SATA()_UAHC_GBL_TESTR[PSEL] field.
+ * Access to the register is disabled on power-on (system reset) or global
+ * SATA block reset, and when the TESTR.BSEL is set to 0.
+ *
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_bistdecr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_bistdecr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dwerr : 32; /**< [ 31: 0](RO) DWORD error count. */
+#else /* Word 0 - Little Endian */
+ uint32_t dwerr : 32; /**< [ 31: 0](RO) DWORD error count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_bistdecr_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_bistdecr bdk_satax_uahc_gbl_bistdecr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_BISTDECR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_BISTDECR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000b0ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000b0ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000b0ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000b0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_BISTDECR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_BISTDECR(a) bdk_satax_uahc_gbl_bistdecr_t
+#define bustype_BDK_SATAX_UAHC_GBL_BISTDECR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_BISTDECR(a) "SATAX_UAHC_GBL_BISTDECR"
+#define device_bar_BDK_SATAX_UAHC_GBL_BISTDECR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_BISTDECR(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_BISTDECR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_bistfctr
+ *
+ * SATA UAHC BIST FIS Count Register
+ * This register is shared between SATA ports. Before accessing this
+ * register, first select the required port by writing the port number
+ * to the SATA()_UAHC_GBL_TESTR[PSEL] field.
+ *
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_bistfctr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_bistfctr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t count : 32; /**< [ 31: 0](RO) Received BIST FIS count. */
+#else /* Word 0 - Little Endian */
+ uint32_t count : 32; /**< [ 31: 0](RO) Received BIST FIS count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_bistfctr_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_bistfctr bdk_satax_uahc_gbl_bistfctr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_BISTFCTR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_BISTFCTR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000a8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000a8ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000a8ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000a8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_BISTFCTR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_BISTFCTR(a) bdk_satax_uahc_gbl_bistfctr_t
+#define bustype_BDK_SATAX_UAHC_GBL_BISTFCTR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_BISTFCTR(a) "SATAX_UAHC_GBL_BISTFCTR"
+#define device_bar_BDK_SATAX_UAHC_GBL_BISTFCTR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_BISTFCTR(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_BISTFCTR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_bistsr
+ *
+ * INTERNAL: SATA UAHC BIST Status Register
+ *
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_bistsr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_bistsr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t brsterr : 8; /**< [ 23: 16](RO) Burst error. */
+ uint32_t framerr : 16; /**< [ 15: 0](RO) Frame error. */
+#else /* Word 0 - Little Endian */
+ uint32_t framerr : 16; /**< [ 15: 0](RO) Frame error. */
+ uint32_t brsterr : 8; /**< [ 23: 16](RO) Burst error. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_bistsr_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_bistsr bdk_satax_uahc_gbl_bistsr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_BISTSR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_BISTSR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000acll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000acll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000acll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000acll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_BISTSR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_BISTSR(a) bdk_satax_uahc_gbl_bistsr_t
+#define bustype_BDK_SATAX_UAHC_GBL_BISTSR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_BISTSR(a) "SATAX_UAHC_GBL_BISTSR"
+#define device_bar_BDK_SATAX_UAHC_GBL_BISTSR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_BISTSR(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_BISTSR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_cap
+ *
+ * SATA AHCI HBA Capabilities Register
+ * This register indicates basic capabilities of the SATA core to software.
+ */
+union bdk_satax_uahc_gbl_cap
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_cap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t s64a : 1; /**< [ 31: 31](RO) Supports 64-bit addressing. */
+ uint32_t sncq : 1; /**< [ 30: 30](RO) Supports native command queuing. */
+ uint32_t ssntf : 1; /**< [ 29: 29](RO) Supports SNotification register. */
+ uint32_t smps : 1; /**< [ 28: 28](R/W) Supports mechanical presence switch. */
+ uint32_t sss : 1; /**< [ 27: 27](R/W) Supports staggered spin-up. */
+ uint32_t salp : 1; /**< [ 26: 26](RO) Supports aggressive link power management. */
+ uint32_t sal : 1; /**< [ 25: 25](RO) Supports activity LED. */
+ uint32_t sclo : 1; /**< [ 24: 24](RO) Supports command list override. */
+ uint32_t iss : 4; /**< [ 23: 20](RO) Interface speed support. */
+ uint32_t snzo : 1; /**< [ 19: 19](RO) Supports nonzero DMA offsets. */
+ uint32_t sam : 1; /**< [ 18: 18](RO) Supports AHCI mode only. */
+ uint32_t spm : 1; /**< [ 17: 17](RO) Supports port multiplier. */
+ uint32_t fbss : 1; /**< [ 16: 16](RO) Supports FIS-based switching. */
+ uint32_t pmd : 1; /**< [ 15: 15](RO) PIO multiple DRQ block. */
+ uint32_t ssc : 1; /**< [ 14: 14](RO) Slumber state capable. */
+ uint32_t psc : 1; /**< [ 13: 13](RO) Partial state capable. */
+ uint32_t ncs : 5; /**< [ 12: 8](RO) Number of command slots. */
+ uint32_t cccs : 1; /**< [ 7: 7](RO) Command completion coalescing support. */
+ uint32_t ems : 1; /**< [ 6: 6](RO) Enclosure management support. */
+ uint32_t sxs : 1; /**< [ 5: 5](RO) Supports external SATA. */
+ uint32_t np : 5; /**< [ 4: 0](RO) Number of ports. 0x0 = 1 port. */
+#else /* Word 0 - Little Endian */
+ uint32_t np : 5; /**< [ 4: 0](RO) Number of ports. 0x0 = 1 port. */
+ uint32_t sxs : 1; /**< [ 5: 5](RO) Supports external SATA. */
+ uint32_t ems : 1; /**< [ 6: 6](RO) Enclosure management support. */
+ uint32_t cccs : 1; /**< [ 7: 7](RO) Command completion coalescing support. */
+ uint32_t ncs : 5; /**< [ 12: 8](RO) Number of command slots. */
+ uint32_t psc : 1; /**< [ 13: 13](RO) Partial state capable. */
+ uint32_t ssc : 1; /**< [ 14: 14](RO) Slumber state capable. */
+ uint32_t pmd : 1; /**< [ 15: 15](RO) PIO multiple DRQ block. */
+ uint32_t fbss : 1; /**< [ 16: 16](RO) Supports FIS-based switching. */
+ uint32_t spm : 1; /**< [ 17: 17](RO) Supports port multiplier. */
+ uint32_t sam : 1; /**< [ 18: 18](RO) Supports AHCI mode only. */
+ uint32_t snzo : 1; /**< [ 19: 19](RO) Supports nonzero DMA offsets. */
+ uint32_t iss : 4; /**< [ 23: 20](RO) Interface speed support. */
+ uint32_t sclo : 1; /**< [ 24: 24](RO) Supports command list override. */
+ uint32_t sal : 1; /**< [ 25: 25](RO) Supports activity LED. */
+ uint32_t salp : 1; /**< [ 26: 26](RO) Supports aggressive link power management. */
+ uint32_t sss : 1; /**< [ 27: 27](R/W) Supports staggered spin-up. */
+ uint32_t smps : 1; /**< [ 28: 28](R/W) Supports mechanical presence switch. */
+ uint32_t ssntf : 1; /**< [ 29: 29](RO) Supports SNotification register. */
+ uint32_t sncq : 1; /**< [ 30: 30](RO) Supports native command queuing. */
+ uint32_t s64a : 1; /**< [ 31: 31](RO) Supports 64-bit addressing. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_cap_s cn8; */
+ struct bdk_satax_uahc_gbl_cap_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t s64a : 1; /**< [ 31: 31](RO) Supports 64-bit addressing. */
+ uint32_t sncq : 1; /**< [ 30: 30](RO) Supports native command queuing. */
+ uint32_t ssntf : 1; /**< [ 29: 29](RO) Supports SNotification register. */
+ uint32_t smps : 1; /**< [ 28: 28](R/W) Supports mechanical presence switch. */
+ uint32_t sss : 1; /**< [ 27: 27](R/W) Supports staggered spin-up. */
+ uint32_t salp : 1; /**< [ 26: 26](RO) Supports aggressive link power management. */
+ uint32_t sal : 1; /**< [ 25: 25](RO) Supports activity LED. */
+ uint32_t sclo : 1; /**< [ 24: 24](RO) Supports command list override. */
+ uint32_t iss : 4; /**< [ 23: 20](RO) Interface speed support. */
+ uint32_t snzo : 1; /**< [ 19: 19](RO) Supports nonzero DMA offsets. */
+ uint32_t sam : 1; /**< [ 18: 18](RO) Supports AHCI mode only. */
+ uint32_t spm : 1; /**< [ 17: 17](RO) Supports port multiplier. */
+ uint32_t fbss : 1; /**< [ 16: 16](RO) Supports FIS-based switching. */
+ uint32_t pmd : 1; /**< [ 15: 15](RO) PIO multiple DRQ block. */
+ uint32_t ssc : 1; /**< [ 14: 14](RO) Slumber state capable. */
+ uint32_t psc : 1; /**< [ 13: 13](RO) Partial state capable. */
+ uint32_t ncs : 5; /**< [ 12: 8](RO) Number of command slots. */
+ uint32_t cccs : 1; /**< [ 7: 7](RO) Command completion coalescing support. */
+ uint32_t ems : 1; /**< [ 6: 6](RO) Enclosure management support, as in termination of commands.
+ CNXXXX does not terminate enclosure management commands, but does support
+ passing enclosure management commands through to downstream controllers. */
+ uint32_t sxs : 1; /**< [ 5: 5](RO) Supports external SATA. */
+ uint32_t np : 5; /**< [ 4: 0](RO) Number of ports. 0x0 = 1 port. */
+#else /* Word 0 - Little Endian */
+ uint32_t np : 5; /**< [ 4: 0](RO) Number of ports. 0x0 = 1 port. */
+ uint32_t sxs : 1; /**< [ 5: 5](RO) Supports external SATA. */
+ uint32_t ems : 1; /**< [ 6: 6](RO) Enclosure management support, as in termination of commands.
+ CNXXXX does not terminate enclosure management commands, but does support
+ passing enclosure management commands through to downstream controllers. */
+ uint32_t cccs : 1; /**< [ 7: 7](RO) Command completion coalescing support. */
+ uint32_t ncs : 5; /**< [ 12: 8](RO) Number of command slots. */
+ uint32_t psc : 1; /**< [ 13: 13](RO) Partial state capable. */
+ uint32_t ssc : 1; /**< [ 14: 14](RO) Slumber state capable. */
+ uint32_t pmd : 1; /**< [ 15: 15](RO) PIO multiple DRQ block. */
+ uint32_t fbss : 1; /**< [ 16: 16](RO) Supports FIS-based switching. */
+ uint32_t spm : 1; /**< [ 17: 17](RO) Supports port multiplier. */
+ uint32_t sam : 1; /**< [ 18: 18](RO) Supports AHCI mode only. */
+ uint32_t snzo : 1; /**< [ 19: 19](RO) Supports nonzero DMA offsets. */
+ uint32_t iss : 4; /**< [ 23: 20](RO) Interface speed support. */
+ uint32_t sclo : 1; /**< [ 24: 24](RO) Supports command list override. */
+ uint32_t sal : 1; /**< [ 25: 25](RO) Supports activity LED. */
+ uint32_t salp : 1; /**< [ 26: 26](RO) Supports aggressive link power management. */
+ uint32_t sss : 1; /**< [ 27: 27](R/W) Supports staggered spin-up. */
+ uint32_t smps : 1; /**< [ 28: 28](R/W) Supports mechanical presence switch. */
+ uint32_t ssntf : 1; /**< [ 29: 29](RO) Supports SNotification register. */
+ uint32_t sncq : 1; /**< [ 30: 30](RO) Supports native command queuing. */
+ uint32_t s64a : 1; /**< [ 31: 31](RO) Supports 64-bit addressing. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_satax_uahc_gbl_cap bdk_satax_uahc_gbl_cap_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_CAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_CAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000000ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000000ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000000ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000000ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_CAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_CAP(a) bdk_satax_uahc_gbl_cap_t
+#define bustype_BDK_SATAX_UAHC_GBL_CAP(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_CAP(a) "SATAX_UAHC_GBL_CAP"
+#define device_bar_BDK_SATAX_UAHC_GBL_CAP(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_CAP(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_CAP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_cap2
+ *
+ * SATA AHCI HBA Capabilities Extended Register
+ * This register indicates capabilities of the SATA core to software.
+ */
+union bdk_satax_uahc_gbl_cap2
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_cap2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_6_31 : 26;
+ uint32_t deso : 1; /**< [ 5: 5](RO) Device sleep entrance from slumber only. */
+ uint32_t sadm : 1; /**< [ 4: 4](RO) Supports aggressive device sleep management. */
+ uint32_t sds : 1; /**< [ 3: 3](RO) Supports device sleep. */
+ uint32_t apst : 1; /**< [ 2: 2](RO) Automatic partial to slumber transitions. */
+ uint32_t nvmp : 1; /**< [ 1: 1](RO) NVMHCI present. */
+ uint32_t boh : 1; /**< [ 0: 0](RO) Supports BIOS/OS handoff. */
+#else /* Word 0 - Little Endian */
+ uint32_t boh : 1; /**< [ 0: 0](RO) Supports BIOS/OS handoff. */
+ uint32_t nvmp : 1; /**< [ 1: 1](RO) NVMHCI present. */
+ uint32_t apst : 1; /**< [ 2: 2](RO) Automatic partial to slumber transitions. */
+ uint32_t sds : 1; /**< [ 3: 3](RO) Supports device sleep. */
+ uint32_t sadm : 1; /**< [ 4: 4](RO) Supports aggressive device sleep management. */
+ uint32_t deso : 1; /**< [ 5: 5](RO) Device sleep entrance from slumber only. */
+ uint32_t reserved_6_31 : 26;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_cap2_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_cap2 bdk_satax_uahc_gbl_cap2_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_CAP2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_CAP2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000024ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000024ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000024ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000024ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_CAP2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_CAP2(a) bdk_satax_uahc_gbl_cap2_t
+#define bustype_BDK_SATAX_UAHC_GBL_CAP2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_CAP2(a) "SATAX_UAHC_GBL_CAP2"
+#define device_bar_BDK_SATAX_UAHC_GBL_CAP2(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_CAP2(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_CAP2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_ccc_ctl
+ *
+ * SATA AHCI CCC Control Register
+ * This register is used to configure the command completion coalescing (CCC) feature for the
+ * SATA core. It is reset on global reset.
+ */
+union bdk_satax_uahc_gbl_ccc_ctl
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_ccc_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t tv : 16; /**< [ 31: 16](R/W) Time-out value. Writable only when [EN] = 0. */
+ uint32_t cc : 8; /**< [ 15: 8](R/W) Command completions. Writable only when [EN] = 0. */
+ uint32_t intr : 5; /**< [ 7: 3](RO) Specifies the port interrupt used by the CCC feature. */
+ uint32_t reserved_1_2 : 2;
+ uint32_t en : 1; /**< [ 0: 0](R/W) CCC enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t en : 1; /**< [ 0: 0](R/W) CCC enable. */
+ uint32_t reserved_1_2 : 2;
+ uint32_t intr : 5; /**< [ 7: 3](RO) Specifies the port interrupt used by the CCC feature. */
+ uint32_t cc : 8; /**< [ 15: 8](R/W) Command completions. Writable only when [EN] = 0. */
+ uint32_t tv : 16; /**< [ 31: 16](R/W) Time-out value. Writable only when [EN] = 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_ccc_ctl_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_ccc_ctl bdk_satax_uahc_gbl_ccc_ctl_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_CCC_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_CCC_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000014ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000014ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000014ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000014ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_CCC_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_CCC_CTL(a) bdk_satax_uahc_gbl_ccc_ctl_t
+#define bustype_BDK_SATAX_UAHC_GBL_CCC_CTL(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_CCC_CTL(a) "SATAX_UAHC_GBL_CCC_CTL"
+#define device_bar_BDK_SATAX_UAHC_GBL_CCC_CTL(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_CCC_CTL(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_CCC_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_ccc_ports
+ *
+ * SATA AHCI CCC Ports Register
+ * This register specifies the ports that are coalesced as part of the command completion
+ * coalescing
+ * (CCC) feature when SATA()_UAHC_GBL_CCC_CTL[EN]=1. It is reset on global reset.
+ */
+union bdk_satax_uahc_gbl_ccc_ports
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_ccc_ports_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t prt : 1; /**< [ 0: 0](R/W) Per port CCC enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t prt : 1; /**< [ 0: 0](R/W) Per port CCC enable. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_ccc_ports_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_ccc_ports bdk_satax_uahc_gbl_ccc_ports_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_CCC_PORTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_CCC_PORTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000018ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000018ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000018ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000018ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_CCC_PORTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_CCC_PORTS(a) bdk_satax_uahc_gbl_ccc_ports_t
+#define bustype_BDK_SATAX_UAHC_GBL_CCC_PORTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_CCC_PORTS(a) "SATAX_UAHC_GBL_CCC_PORTS"
+#define device_bar_BDK_SATAX_UAHC_GBL_CCC_PORTS(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_CCC_PORTS(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_CCC_PORTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_diagnr3
+ *
+ * SATA UAHC DIAGNR3 Register
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_diagnr3
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_diagnr3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t fbcsw_cnt : 32; /**< [ 31: 0](R/W1C) FIS-based context switching counter. Any 32-bit write to this location clears the counter. */
+#else /* Word 0 - Little Endian */
+ uint32_t fbcsw_cnt : 32; /**< [ 31: 0](R/W1C) FIS-based context switching counter. Any 32-bit write to this location clears the counter. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_diagnr3_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_diagnr3 bdk_satax_uahc_gbl_diagnr3_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_DIAGNR3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_DIAGNR3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000c4ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_DIAGNR3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_DIAGNR3(a) bdk_satax_uahc_gbl_diagnr3_t
+#define bustype_BDK_SATAX_UAHC_GBL_DIAGNR3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_DIAGNR3(a) "SATAX_UAHC_GBL_DIAGNR3"
+#define device_bar_BDK_SATAX_UAHC_GBL_DIAGNR3(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_DIAGNR3(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_DIAGNR3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_ghc
+ *
+ * SATA AHCI Global HBA Control Register
+ * This register controls various global actions of the SATA core.
+ */
+union bdk_satax_uahc_gbl_ghc
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_ghc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ae : 1; /**< [ 31: 31](RO) AHCI enable. */
+ uint32_t reserved_2_30 : 29;
+ uint32_t ie : 1; /**< [ 1: 1](R/W) Interrupt enable. */
+ uint32_t hr : 1; /**< [ 0: 0](R/W1/H) HBA reset. Writing a 1 resets the UAHC. Hardware clears this bit once reset is complete. */
+#else /* Word 0 - Little Endian */
+ uint32_t hr : 1; /**< [ 0: 0](R/W1/H) HBA reset. Writing a 1 resets the UAHC. Hardware clears this bit once reset is complete. */
+ uint32_t ie : 1; /**< [ 1: 1](R/W) Interrupt enable. */
+ uint32_t reserved_2_30 : 29;
+ uint32_t ae : 1; /**< [ 31: 31](RO) AHCI enable. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_ghc_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_ghc bdk_satax_uahc_gbl_ghc_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_GHC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_GHC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000004ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000004ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000004ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000004ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_GHC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_GHC(a) bdk_satax_uahc_gbl_ghc_t
+#define bustype_BDK_SATAX_UAHC_GBL_GHC(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_GHC(a) "SATAX_UAHC_GBL_GHC"
+#define device_bar_BDK_SATAX_UAHC_GBL_GHC(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_GHC(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_GHC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_gparam1r
+ *
+ * SATA UAHC Global Parameter Register 1
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_gparam1r
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_gparam1r_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t align_m : 1; /**< [ 31: 31](RO) RX data alignment mode (ALIGN_MODE). */
+ uint32_t rx_buffer : 1; /**< [ 30: 30](RO) RX data buffer mode (RX_BUFFER_MODE). */
+ uint32_t phy_data : 2; /**< [ 29: 28](RO) PHY data width (PHY_DATA_WIDTH). */
+ uint32_t phy_rst : 1; /**< [ 27: 27](RO) PHY reset mode (PHY_RST_MODE). */
+ uint32_t phy_ctrl : 6; /**< [ 26: 21](RO) PHY control width (PHY_CTRL_W). */
+ uint32_t phy_stat : 6; /**< [ 20: 15](RO) PHY status width (PHY_STAT_W). */
+ uint32_t latch_m : 1; /**< [ 14: 14](RO) Latch mode (LATCH_MODE). */
+ uint32_t phy_type : 3; /**< [ 13: 11](RO) PHY interface type (PHY_INTERFACE_TYPE). */
+ uint32_t return_err : 1; /**< [ 10: 10](RO) AMBA error response (RETURN_ERR_RESP). */
+ uint32_t ahb_endian : 2; /**< [ 9: 8](RO) AHB bus endianness (AHB_ENDIANNESS). */
+ uint32_t s_haddr : 1; /**< [ 7: 7](RO) AMBA slave address bus width (S_HADDR_WIDTH). */
+ uint32_t m_haddr : 1; /**< [ 6: 6](RO) AMBA master address bus width (M_HADDR_WIDTH). */
+ uint32_t s_hdata : 3; /**< [ 5: 3](RO) AMBA slave data width (S_HDATA_WIDTH). */
+ uint32_t m_hdata : 3; /**< [ 2: 0](RO) AMBA master data width (M_HDATA_WIDTH). */
+#else /* Word 0 - Little Endian */
+ uint32_t m_hdata : 3; /**< [ 2: 0](RO) AMBA master data width (M_HDATA_WIDTH). */
+ uint32_t s_hdata : 3; /**< [ 5: 3](RO) AMBA slave data width (S_HDATA_WIDTH). */
+ uint32_t m_haddr : 1; /**< [ 6: 6](RO) AMBA master address bus width (M_HADDR_WIDTH). */
+ uint32_t s_haddr : 1; /**< [ 7: 7](RO) AMBA slave address bus width (S_HADDR_WIDTH). */
+ uint32_t ahb_endian : 2; /**< [ 9: 8](RO) AHB bus endianness (AHB_ENDIANNESS). */
+ uint32_t return_err : 1; /**< [ 10: 10](RO) AMBA error response (RETURN_ERR_RESP). */
+ uint32_t phy_type : 3; /**< [ 13: 11](RO) PHY interface type (PHY_INTERFACE_TYPE). */
+ uint32_t latch_m : 1; /**< [ 14: 14](RO) Latch mode (LATCH_MODE). */
+ uint32_t phy_stat : 6; /**< [ 20: 15](RO) PHY status width (PHY_STAT_W). */
+ uint32_t phy_ctrl : 6; /**< [ 26: 21](RO) PHY control width (PHY_CTRL_W). */
+ uint32_t phy_rst : 1; /**< [ 27: 27](RO) PHY reset mode (PHY_RST_MODE). */
+ uint32_t phy_data : 2; /**< [ 29: 28](RO) PHY data width (PHY_DATA_WIDTH). */
+ uint32_t rx_buffer : 1; /**< [ 30: 30](RO) RX data buffer mode (RX_BUFFER_MODE). */
+ uint32_t align_m : 1; /**< [ 31: 31](RO) RX data alignment mode (ALIGN_MODE). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_gparam1r_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_gparam1r bdk_satax_uahc_gbl_gparam1r_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_GPARAM1R(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_GPARAM1R(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000e8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000e8ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000e8ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000e8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_GPARAM1R", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_GPARAM1R(a) bdk_satax_uahc_gbl_gparam1r_t
+#define bustype_BDK_SATAX_UAHC_GBL_GPARAM1R(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_GPARAM1R(a) "SATAX_UAHC_GBL_GPARAM1R"
+#define device_bar_BDK_SATAX_UAHC_GBL_GPARAM1R(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_GPARAM1R(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_GPARAM1R(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_gparam2r
+ *
+ * SATA UAHC Global Parameter Register 2
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_gparam2r
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_gparam2r_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t fbs_mem_mode : 1; /**< [ 31: 31](RO) Selects FBS memory read port type. */
+ uint32_t rxoob_clk_units : 1; /**< [ 30: 30](RO) RX OOB clock frequency units. */
+ uint32_t rxoob_clk_upper : 10; /**< [ 29: 20](RO) Upper bits of RX OOB clock frequency. */
+ uint32_t bist_m : 1; /**< [ 19: 19](RO) BIST loopback checking depth (BIST_MODE). */
+ uint32_t fbs_mem_s : 1; /**< [ 18: 18](RO) Context RAM memory location. */
+ uint32_t fbs_pmpn : 2; /**< [ 17: 16](RO) Maximum number of port multiplier ports (FBS_PMPN_MAX). */
+ uint32_t fbs_support : 1; /**< [ 15: 15](RO) FIS-based switching support (FBS_SUPPORT). */
+ uint32_t dev_cp : 1; /**< [ 14: 14](RO) Cold presence detect (DEV_CP_DET). */
+ uint32_t dev_mp : 1; /**< [ 13: 13](RO) Mechanical presence switch (DEV_MP_SWITCH). */
+ uint32_t encode_m : 1; /**< [ 12: 12](RO) 8/10 bit encoding/decoding (ENCODE_MODE). */
+ uint32_t rxoob_clk_m : 1; /**< [ 11: 11](RO) RX OOB clock mode (RXOOB_CLK_MODE). */
+ uint32_t rx_oob_m : 1; /**< [ 10: 10](RO) RX OOB mode (RX_OOB_MODE). */
+ uint32_t tx_oob_m : 1; /**< [ 9: 9](RO) TX OOB mode (TX_OOB_MODE). */
+ uint32_t rxoob_clk : 9; /**< [ 8: 0](RO) RX OOB clock frequency (RXOOB_CLK). */
+#else /* Word 0 - Little Endian */
+ uint32_t rxoob_clk : 9; /**< [ 8: 0](RO) RX OOB clock frequency (RXOOB_CLK). */
+ uint32_t tx_oob_m : 1; /**< [ 9: 9](RO) TX OOB mode (TX_OOB_MODE). */
+ uint32_t rx_oob_m : 1; /**< [ 10: 10](RO) RX OOB mode (RX_OOB_MODE). */
+ uint32_t rxoob_clk_m : 1; /**< [ 11: 11](RO) RX OOB clock mode (RXOOB_CLK_MODE). */
+ uint32_t encode_m : 1; /**< [ 12: 12](RO) 8/10 bit encoding/decoding (ENCODE_MODE). */
+ uint32_t dev_mp : 1; /**< [ 13: 13](RO) Mechanical presence switch (DEV_MP_SWITCH). */
+ uint32_t dev_cp : 1; /**< [ 14: 14](RO) Cold presence detect (DEV_CP_DET). */
+ uint32_t fbs_support : 1; /**< [ 15: 15](RO) FIS-based switching support (FBS_SUPPORT). */
+ uint32_t fbs_pmpn : 2; /**< [ 17: 16](RO) Maximum number of port multiplier ports (FBS_PMPN_MAX). */
+ uint32_t fbs_mem_s : 1; /**< [ 18: 18](RO) Context RAM memory location. */
+ uint32_t bist_m : 1; /**< [ 19: 19](RO) BIST loopback checking depth (BIST_MODE). */
+ uint32_t rxoob_clk_upper : 10; /**< [ 29: 20](RO) Upper bits of RX OOB clock frequency. */
+ uint32_t rxoob_clk_units : 1; /**< [ 30: 30](RO) RX OOB clock frequency units. */
+ uint32_t fbs_mem_mode : 1; /**< [ 31: 31](RO) Selects FBS memory read port type. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_satax_uahc_gbl_gparam2r_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t rxoob_clk_units : 1; /**< [ 30: 30](RO) RX OOB clock frequency units. */
+ uint32_t rxoob_clk_upper : 10; /**< [ 29: 20](RO) Upper bits of RX OOB clock frequency. */
+ uint32_t bist_m : 1; /**< [ 19: 19](RO) BIST loopback checking depth (BIST_MODE). */
+ uint32_t fbs_mem_s : 1; /**< [ 18: 18](RO) Context RAM memory location. */
+ uint32_t fbs_pmpn : 2; /**< [ 17: 16](RO) Maximum number of port multiplier ports (FBS_PMPN_MAX). */
+ uint32_t fbs_support : 1; /**< [ 15: 15](RO) FIS-based switching support (FBS_SUPPORT). */
+ uint32_t dev_cp : 1; /**< [ 14: 14](RO) Cold presence detect (DEV_CP_DET). */
+ uint32_t dev_mp : 1; /**< [ 13: 13](RO) Mechanical presence switch (DEV_MP_SWITCH). */
+ uint32_t encode_m : 1; /**< [ 12: 12](RO) 8/10 bit encoding/decoding (ENCODE_MODE). */
+ uint32_t rxoob_clk_m : 1; /**< [ 11: 11](RO) RX OOB clock mode (RXOOB_CLK_MODE). */
+ uint32_t rx_oob_m : 1; /**< [ 10: 10](RO) RX OOB mode (RX_OOB_MODE). */
+ uint32_t tx_oob_m : 1; /**< [ 9: 9](RO) TX OOB mode (TX_OOB_MODE). */
+ uint32_t rxoob_clk : 9; /**< [ 8: 0](RO) RX OOB clock frequency (RXOOB_CLK). */
+#else /* Word 0 - Little Endian */
+ uint32_t rxoob_clk : 9; /**< [ 8: 0](RO) RX OOB clock frequency (RXOOB_CLK). */
+ uint32_t tx_oob_m : 1; /**< [ 9: 9](RO) TX OOB mode (TX_OOB_MODE). */
+ uint32_t rx_oob_m : 1; /**< [ 10: 10](RO) RX OOB mode (RX_OOB_MODE). */
+ uint32_t rxoob_clk_m : 1; /**< [ 11: 11](RO) RX OOB clock mode (RXOOB_CLK_MODE). */
+ uint32_t encode_m : 1; /**< [ 12: 12](RO) 8/10 bit encoding/decoding (ENCODE_MODE). */
+ uint32_t dev_mp : 1; /**< [ 13: 13](RO) Mechanical presence switch (DEV_MP_SWITCH). */
+ uint32_t dev_cp : 1; /**< [ 14: 14](RO) Cold presence detect (DEV_CP_DET). */
+ uint32_t fbs_support : 1; /**< [ 15: 15](RO) FIS-based switching support (FBS_SUPPORT). */
+ uint32_t fbs_pmpn : 2; /**< [ 17: 16](RO) Maximum number of port multiplier ports (FBS_PMPN_MAX). */
+ uint32_t fbs_mem_s : 1; /**< [ 18: 18](RO) Context RAM memory location. */
+ uint32_t bist_m : 1; /**< [ 19: 19](RO) BIST loopback checking depth (BIST_MODE). */
+ uint32_t rxoob_clk_upper : 10; /**< [ 29: 20](RO) Upper bits of RX OOB clock frequency. */
+ uint32_t rxoob_clk_units : 1; /**< [ 30: 30](RO) RX OOB clock frequency units. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_satax_uahc_gbl_gparam2r_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t fbs_mem_mode : 1; /**< [ 31: 31](RO) Selects FBS memory read port type. */
+ uint32_t rxoob_clk_units : 1; /**< [ 30: 30](RO) RX OOB clock frequency units. */
+ uint32_t rxoob_clk_upper : 10; /**< [ 29: 20](RO) Upper bits of RX OOB clock frequency. */
+ uint32_t bist_m : 1; /**< [ 19: 19](RO) BIST loopback checking depth (BIST_MODE). */
+ uint32_t fbs_mem_s : 1; /**< [ 18: 18](RO) Context RAM memory location. */
+ uint32_t fbs_pmpn : 2; /**< [ 17: 16](RO) FBS RAM depth FBS_RAM_DEPTH. */
+ uint32_t fbs_support : 1; /**< [ 15: 15](RO) FIS-based switching support (FBS_SUPPORT). */
+ uint32_t dev_cp : 1; /**< [ 14: 14](RO) Cold presence detect (DEV_CP_DET). */
+ uint32_t dev_mp : 1; /**< [ 13: 13](RO) Mechanical presence switch (DEV_MP_SWITCH). */
+ uint32_t encode_m : 1; /**< [ 12: 12](RO) 8/10 bit encoding/decoding (ENCODE_MODE). */
+ uint32_t rxoob_clk_m : 1; /**< [ 11: 11](RO) RX OOB clock mode (RXOOB_CLK_MODE). */
+ uint32_t rx_oob_m : 1; /**< [ 10: 10](RO) RX OOB mode (RX_OOB_MODE). */
+ uint32_t tx_oob_m : 1; /**< [ 9: 9](RO) TX OOB mode (TX_OOB_MODE). */
+ uint32_t rxoob_clk : 9; /**< [ 8: 0](RO) RX OOB clock frequency (RXOOB_CLK_FREQ). */
+#else /* Word 0 - Little Endian */
+ uint32_t rxoob_clk : 9; /**< [ 8: 0](RO) RX OOB clock frequency (RXOOB_CLK_FREQ). */
+ uint32_t tx_oob_m : 1; /**< [ 9: 9](RO) TX OOB mode (TX_OOB_MODE). */
+ uint32_t rx_oob_m : 1; /**< [ 10: 10](RO) RX OOB mode (RX_OOB_MODE). */
+ uint32_t rxoob_clk_m : 1; /**< [ 11: 11](RO) RX OOB clock mode (RXOOB_CLK_MODE). */
+ uint32_t encode_m : 1; /**< [ 12: 12](RO) 8/10 bit encoding/decoding (ENCODE_MODE). */
+ uint32_t dev_mp : 1; /**< [ 13: 13](RO) Mechanical presence switch (DEV_MP_SWITCH). */
+ uint32_t dev_cp : 1; /**< [ 14: 14](RO) Cold presence detect (DEV_CP_DET). */
+ uint32_t fbs_support : 1; /**< [ 15: 15](RO) FIS-based switching support (FBS_SUPPORT). */
+ uint32_t fbs_pmpn : 2; /**< [ 17: 16](RO) FBS RAM depth FBS_RAM_DEPTH. */
+ uint32_t fbs_mem_s : 1; /**< [ 18: 18](RO) Context RAM memory location. */
+ uint32_t bist_m : 1; /**< [ 19: 19](RO) BIST loopback checking depth (BIST_MODE). */
+ uint32_t rxoob_clk_upper : 10; /**< [ 29: 20](RO) Upper bits of RX OOB clock frequency. */
+ uint32_t rxoob_clk_units : 1; /**< [ 30: 30](RO) RX OOB clock frequency units. */
+ uint32_t fbs_mem_mode : 1; /**< [ 31: 31](RO) Selects FBS memory read port type. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_satax_uahc_gbl_gparam2r bdk_satax_uahc_gbl_gparam2r_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_GPARAM2R(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_GPARAM2R(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000ecll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000ecll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000ecll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000ecll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_GPARAM2R", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_GPARAM2R(a) bdk_satax_uahc_gbl_gparam2r_t
+#define bustype_BDK_SATAX_UAHC_GBL_GPARAM2R(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_GPARAM2R(a) "SATAX_UAHC_GBL_GPARAM2R"
+#define device_bar_BDK_SATAX_UAHC_GBL_GPARAM2R(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_GPARAM2R(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_GPARAM2R(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_gparam3
+ *
+ * SATA UAHC Global Parameter 3 Register
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_gparam3
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_gparam3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t mem_ap_support : 1; /**< [ 8: 8](RO) Enable address protection. */
+ uint32_t phy_type : 5; /**< [ 7: 3](RO) PHY interface type. */
+ uint32_t mem_ecc_cor_en : 1; /**< [ 2: 2](RO) Single-bit correction enable. */
+ uint32_t mem_dp_type : 1; /**< [ 1: 1](RO) Data protection type. */
+ uint32_t mem_dp_support : 1; /**< [ 0: 0](RO) Enable data protection. */
+#else /* Word 0 - Little Endian */
+ uint32_t mem_dp_support : 1; /**< [ 0: 0](RO) Enable data protection. */
+ uint32_t mem_dp_type : 1; /**< [ 1: 1](RO) Data protection type. */
+ uint32_t mem_ecc_cor_en : 1; /**< [ 2: 2](RO) Single-bit correction enable. */
+ uint32_t phy_type : 5; /**< [ 7: 3](RO) PHY interface type. */
+ uint32_t mem_ap_support : 1; /**< [ 8: 8](RO) Enable address protection. */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_gparam3_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_gparam3 bdk_satax_uahc_gbl_gparam3_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_GPARAM3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_GPARAM3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000dcll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_GPARAM3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_GPARAM3(a) bdk_satax_uahc_gbl_gparam3_t
+#define bustype_BDK_SATAX_UAHC_GBL_GPARAM3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_GPARAM3(a) "SATAX_UAHC_GBL_GPARAM3"
+#define device_bar_BDK_SATAX_UAHC_GBL_GPARAM3(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_GPARAM3(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_GPARAM3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_idr
+ *
+ * SATA UAHC ID Register
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_idr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_idr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t id : 32; /**< [ 31: 0](RO) Core ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t id : 32; /**< [ 31: 0](RO) Core ID. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_idr_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_idr bdk_satax_uahc_gbl_idr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_IDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_IDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000fcll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000fcll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000fcll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000fcll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_IDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_IDR(a) bdk_satax_uahc_gbl_idr_t
+#define bustype_BDK_SATAX_UAHC_GBL_IDR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_IDR(a) "SATAX_UAHC_GBL_IDR"
+#define device_bar_BDK_SATAX_UAHC_GBL_IDR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_IDR(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_IDR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_is
+ *
+ * SATA AHCI Interrupt Status Register
+ * This register indicates which of the ports within the SATA core have an interrupt
+ * pending and require service. This register is reset on global reset (GHC.HR=1).
+ */
+union bdk_satax_uahc_gbl_is
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_is_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t ips : 2; /**< [ 1: 0](R/W1C/H) Interrupt pending status. */
+#else /* Word 0 - Little Endian */
+ uint32_t ips : 2; /**< [ 1: 0](R/W1C/H) Interrupt pending status. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_is_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_is bdk_satax_uahc_gbl_is_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_IS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_IS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000008ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000008ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000008ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000008ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_IS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_IS(a) bdk_satax_uahc_gbl_is_t
+#define bustype_BDK_SATAX_UAHC_GBL_IS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_IS(a) "SATAX_UAHC_GBL_IS"
+#define device_bar_BDK_SATAX_UAHC_GBL_IS(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_IS(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_IS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_oobr
+ *
+ * SATA UAHC OOB Register
+ * This register is shared between SATA ports. Before accessing this
+ * register, first select the required port by writing the port number
+ * to the SATA()_UAHC_GBL_TESTR[PSEL] field.
+ *
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_oobr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_oobr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t we : 1; /**< [ 31: 31](R/W/H) Write enable. */
+ uint32_t cwmin : 7; /**< [ 30: 24](R/W/H) COMWAKE minimum value. Writable only if WE is set. */
+ uint32_t cwmax : 8; /**< [ 23: 16](R/W/H) COMWAKE maximum value. Writable only if WE is set. */
+ uint32_t cimin : 8; /**< [ 15: 8](R/W/H) COMINIT minimum value. Writable only if WE is set. */
+ uint32_t cimax : 8; /**< [ 7: 0](R/W/H) COMINIT maximum value. Writable only if WE is set. */
+#else /* Word 0 - Little Endian */
+ uint32_t cimax : 8; /**< [ 7: 0](R/W/H) COMINIT maximum value. Writable only if WE is set. */
+ uint32_t cimin : 8; /**< [ 15: 8](R/W/H) COMINIT minimum value. Writable only if WE is set. */
+ uint32_t cwmax : 8; /**< [ 23: 16](R/W/H) COMWAKE maximum value. Writable only if WE is set. */
+ uint32_t cwmin : 7; /**< [ 30: 24](R/W/H) COMWAKE minimum value. Writable only if WE is set. */
+ uint32_t we : 1; /**< [ 31: 31](R/W/H) Write enable. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_oobr_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_oobr bdk_satax_uahc_gbl_oobr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_OOBR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_OOBR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000bcll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000bcll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000bcll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000bcll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_OOBR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_OOBR(a) bdk_satax_uahc_gbl_oobr_t
+#define bustype_BDK_SATAX_UAHC_GBL_OOBR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_OOBR(a) "SATAX_UAHC_GBL_OOBR"
+#define device_bar_BDK_SATAX_UAHC_GBL_OOBR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_OOBR(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_OOBR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_pi
+ *
+ * SATA AHCI Ports Implemented Register
+ * This register indicates which ports are exposed by the SATA core and are available
+ * for the software to use.
+ */
+union bdk_satax_uahc_gbl_pi
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_pi_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_1_31 : 31;
+ uint32_t pi : 1; /**< [ 0: 0](R/W) Number of ports implemented. This field is one-time writable, then becomes read-only. */
+#else /* Word 0 - Little Endian */
+ uint32_t pi : 1; /**< [ 0: 0](R/W) Number of ports implemented. This field is one-time writable, then becomes read-only. */
+ uint32_t reserved_1_31 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_pi_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_pi bdk_satax_uahc_gbl_pi_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_PI(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_PI(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x81000000000cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x81000000000cll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x81000000000cll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x81000000000cll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_PI", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_PI(a) bdk_satax_uahc_gbl_pi_t
+#define bustype_BDK_SATAX_UAHC_GBL_PI(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_PI(a) "SATAX_UAHC_GBL_PI"
+#define device_bar_BDK_SATAX_UAHC_GBL_PI(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_PI(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_PI(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_pparamr
+ *
+ * SATA UAHC Port Parameter Register
+ * Port is selected by the SATA()_UAHC_GBL_TESTR[PSEL] field.
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_pparamr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_pparamr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t tx_mem_m : 1; /**< [ 11: 11](RO) TX FIFO memory read port type (Pn_TX_MEM_MODE). */
+ uint32_t tx_mem_s : 1; /**< [ 10: 10](RO) TX FIFO memory type (Pn_TX_MEM_SELECT). */
+ uint32_t rx_mem_m : 1; /**< [ 9: 9](RO) RX FIFO memory read port type (Pn_RX_MEM_MODE). */
+ uint32_t rx_mem_s : 1; /**< [ 8: 8](RO) RX FIFO memory type (Pn_RX_MEM_SELECT). */
+ uint32_t txfifo_depth : 4; /**< [ 7: 4](RO) TX FIFO depth in FIFO words. */
+ uint32_t rxfifo_depth : 4; /**< [ 3: 0](RO) RX FIFO depth in FIFO words. */
+#else /* Word 0 - Little Endian */
+ uint32_t rxfifo_depth : 4; /**< [ 3: 0](RO) RX FIFO depth in FIFO words. */
+ uint32_t txfifo_depth : 4; /**< [ 7: 4](RO) TX FIFO depth in FIFO words. */
+ uint32_t rx_mem_s : 1; /**< [ 8: 8](RO) RX FIFO memory type (Pn_RX_MEM_SELECT). */
+ uint32_t rx_mem_m : 1; /**< [ 9: 9](RO) RX FIFO memory read port type (Pn_RX_MEM_MODE). */
+ uint32_t tx_mem_s : 1; /**< [ 10: 10](RO) TX FIFO memory type (Pn_TX_MEM_SELECT). */
+ uint32_t tx_mem_m : 1; /**< [ 11: 11](RO) TX FIFO memory read port type (Pn_TX_MEM_MODE). */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_pparamr_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_pparamr bdk_satax_uahc_gbl_pparamr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_PPARAMR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_PPARAMR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000f0ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000f0ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000f0ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000f0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_PPARAMR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_PPARAMR(a) bdk_satax_uahc_gbl_pparamr_t
+#define bustype_BDK_SATAX_UAHC_GBL_PPARAMR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_PPARAMR(a) "SATAX_UAHC_GBL_PPARAMR"
+#define device_bar_BDK_SATAX_UAHC_GBL_PPARAMR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_PPARAMR(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_PPARAMR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_testr
+ *
+ * SATA UAHC Test Register
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_testr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_testr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t bsel : 1; /**< [ 24: 24](R/W) Bank select. Always select 0 for BIST registers. */
+ uint32_t reserved_19_23 : 5;
+ uint32_t psel : 3; /**< [ 18: 16](R/W) Port select. */
+ uint32_t reserved_1_15 : 15;
+ uint32_t test_if : 1; /**< [ 0: 0](R/W) Test interface. */
+#else /* Word 0 - Little Endian */
+ uint32_t test_if : 1; /**< [ 0: 0](R/W) Test interface. */
+ uint32_t reserved_1_15 : 15;
+ uint32_t psel : 3; /**< [ 18: 16](R/W) Port select. */
+ uint32_t reserved_19_23 : 5;
+ uint32_t bsel : 1; /**< [ 24: 24](R/W) Bank select. Always select 0 for BIST registers. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_satax_uahc_gbl_testr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_19_31 : 13;
+ uint32_t psel : 3; /**< [ 18: 16](R/W) Port select. */
+ uint32_t reserved_1_15 : 15;
+ uint32_t test_if : 1; /**< [ 0: 0](R/W) Test interface. */
+#else /* Word 0 - Little Endian */
+ uint32_t test_if : 1; /**< [ 0: 0](R/W) Test interface. */
+ uint32_t reserved_1_15 : 15;
+ uint32_t psel : 3; /**< [ 18: 16](R/W) Port select. */
+ uint32_t reserved_19_31 : 13;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_satax_uahc_gbl_testr_s cn9; */
+};
+typedef union bdk_satax_uahc_gbl_testr bdk_satax_uahc_gbl_testr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_TESTR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_TESTR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000f4ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000f4ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000f4ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000f4ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_TESTR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_TESTR(a) bdk_satax_uahc_gbl_testr_t
+#define bustype_BDK_SATAX_UAHC_GBL_TESTR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_TESTR(a) "SATAX_UAHC_GBL_TESTR"
+#define device_bar_BDK_SATAX_UAHC_GBL_TESTR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_TESTR(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_TESTR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_timer1ms
+ *
+ * SATA UAHC Timer 1ms Register
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_timer1ms
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_timer1ms_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_20_31 : 12;
+ uint32_t timv : 20; /**< [ 19: 0](R/W) 1ms timer value. Writable only when SATA()_UAHC_GBL_CCC_CTL[EN] = 0. */
+#else /* Word 0 - Little Endian */
+ uint32_t timv : 20; /**< [ 19: 0](R/W) 1ms timer value. Writable only when SATA()_UAHC_GBL_CCC_CTL[EN] = 0. */
+ uint32_t reserved_20_31 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_timer1ms_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_timer1ms bdk_satax_uahc_gbl_timer1ms_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_TIMER1MS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_TIMER1MS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000e0ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000e0ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000e0ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000e0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_TIMER1MS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_TIMER1MS(a) bdk_satax_uahc_gbl_timer1ms_t
+#define bustype_BDK_SATAX_UAHC_GBL_TIMER1MS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_TIMER1MS(a) "SATAX_UAHC_GBL_TIMER1MS"
+#define device_bar_BDK_SATAX_UAHC_GBL_TIMER1MS(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_TIMER1MS(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_TIMER1MS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_versionr
+ *
+ * SATA UAHC Version Register
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_gbl_versionr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_versionr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ver : 32; /**< [ 31: 0](RO) SATA IP version number. */
+#else /* Word 0 - Little Endian */
+ uint32_t ver : 32; /**< [ 31: 0](RO) SATA IP version number. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_versionr_s cn; */
+};
+typedef union bdk_satax_uahc_gbl_versionr bdk_satax_uahc_gbl_versionr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_VERSIONR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_VERSIONR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100000000f8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100000000f8ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100000000f8ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100000000f8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_VERSIONR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_VERSIONR(a) bdk_satax_uahc_gbl_versionr_t
+#define bustype_BDK_SATAX_UAHC_GBL_VERSIONR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_VERSIONR(a) "SATAX_UAHC_GBL_VERSIONR"
+#define device_bar_BDK_SATAX_UAHC_GBL_VERSIONR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_VERSIONR(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_VERSIONR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_gbl_vs
+ *
+ * SATA AHCI Version Register
+ * This register indicates the major and minor version of the AHCI specification that
+ * the SATA core supports.
+ */
+union bdk_satax_uahc_gbl_vs
+{
+ uint32_t u;
+ struct bdk_satax_uahc_gbl_vs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t mjr : 16; /**< [ 31: 16](RO) Major version number. */
+ uint32_t mnr : 16; /**< [ 15: 0](RO) Minor version number. No DevSleep support. */
+#else /* Word 0 - Little Endian */
+ uint32_t mnr : 16; /**< [ 15: 0](RO) Minor version number. No DevSleep support. */
+ uint32_t mjr : 16; /**< [ 31: 16](RO) Major version number. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_gbl_vs_s cn8; */
+ struct bdk_satax_uahc_gbl_vs_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t mjr : 16; /**< [ 31: 16](RO) Major version number. */
+ uint32_t mnr : 16; /**< [ 15: 0](RO) Minor version number. DevSleep is supported. */
+#else /* Word 0 - Little Endian */
+ uint32_t mnr : 16; /**< [ 15: 0](RO) Minor version number. DevSleep is supported. */
+ uint32_t mjr : 16; /**< [ 31: 16](RO) Major version number. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_satax_uahc_gbl_vs bdk_satax_uahc_gbl_vs_t;
+
+static inline uint64_t BDK_SATAX_UAHC_GBL_VS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_GBL_VS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000010ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000010ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000010ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000010ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_GBL_VS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_GBL_VS(a) bdk_satax_uahc_gbl_vs_t
+#define bustype_BDK_SATAX_UAHC_GBL_VS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_GBL_VS(a) "SATAX_UAHC_GBL_VS"
+#define device_bar_BDK_SATAX_UAHC_GBL_VS(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_GBL_VS(a) (a)
+#define arguments_BDK_SATAX_UAHC_GBL_VS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_ci
+ *
+ * SATA UAHC Command Issue Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_ci
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_ci_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ci : 32; /**< [ 31: 0](R/W1S/H) Command issued. */
+#else /* Word 0 - Little Endian */
+ uint32_t ci : 32; /**< [ 31: 0](R/W1S/H) Command issued. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_ci_s cn; */
+};
+typedef union bdk_satax_uahc_p0_ci bdk_satax_uahc_p0_ci_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_CI(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_CI(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000138ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000138ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000138ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000138ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_CI", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_CI(a) bdk_satax_uahc_p0_ci_t
+#define bustype_BDK_SATAX_UAHC_P0_CI(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_CI(a) "SATAX_UAHC_P0_CI"
+#define device_bar_BDK_SATAX_UAHC_P0_CI(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_CI(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_CI(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uahc_p0_clb
+ *
+ * SATA UAHC Command-List Base-Address Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_clb
+{
+ uint64_t u;
+ struct bdk_satax_uahc_p0_clb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t clb : 54; /**< [ 63: 10](R/W) Command-list base address. */
+ uint64_t reserved_0_9 : 10;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_9 : 10;
+ uint64_t clb : 54; /**< [ 63: 10](R/W) Command-list base address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_clb_s cn; */
+};
+typedef union bdk_satax_uahc_p0_clb bdk_satax_uahc_p0_clb_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_CLB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_CLB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000100ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000100ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000100ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000100ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_CLB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_CLB(a) bdk_satax_uahc_p0_clb_t
+#define bustype_BDK_SATAX_UAHC_P0_CLB(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UAHC_P0_CLB(a) "SATAX_UAHC_P0_CLB"
+#define device_bar_BDK_SATAX_UAHC_P0_CLB(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_CLB(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_CLB(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_cmd
+ *
+ * SATA UAHC Command Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_cmd
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_cmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t icc : 4; /**< [ 31: 28](R/W) Interface communication control. */
+ uint32_t asp : 1; /**< [ 27: 27](R/W) Aggressive slumber/partial. */
+ uint32_t alpe : 1; /**< [ 26: 26](R/W) Aggressive link-power-management enable. */
+ uint32_t dlae : 1; /**< [ 25: 25](R/W) Drive LED on ATAPI enable. */
+ uint32_t atapi : 1; /**< [ 24: 24](R/W) Device is ATAPI. */
+ uint32_t apste : 1; /**< [ 23: 23](R/W) Automatic partial to slumber transitions enable. */
+ uint32_t fbscp : 1; /**< [ 22: 22](R/W) FIS-based switching capable port. Write-once. */
+ uint32_t esp : 1; /**< [ 21: 21](R/W) External SATA port. Write-once. */
+ uint32_t cpd : 1; /**< [ 20: 20](R/W) Cold-presence detection. Write-once. */
+ uint32_t mpsp : 1; /**< [ 19: 19](R/W) Mechanical presence switch attached to port. Write-once. */
+ uint32_t hpcp : 1; /**< [ 18: 18](R/W) Hot-plug-capable support. Write-once. */
+ uint32_t pma : 1; /**< [ 17: 17](R/W) Port multiplier attached. */
+ uint32_t cps : 1; /**< [ 16: 16](RO) Cold presence state. */
+ uint32_t cr : 1; /**< [ 15: 15](RO) Command list running. */
+ uint32_t fr : 1; /**< [ 14: 14](RO/H) FIS receive running. */
+ uint32_t mpss : 1; /**< [ 13: 13](RO) Mechanical presence switch state. */
+ uint32_t ccs : 5; /**< [ 12: 8](RO) Current-command slot. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t fre : 1; /**< [ 4: 4](R/W) FIS-receive enable. */
+ uint32_t clo : 1; /**< [ 3: 3](WO) Command-list override. */
+ uint32_t pod : 1; /**< [ 2: 2](R/W) Power-on device. R/W only if CPD = 1, else read only. */
+ uint32_t sud : 1; /**< [ 1: 1](R/W) Spin-up device. R/W only if SATA()_UAHC_GBL_CAP[SSS]=1, else read only.
+ Setting this bit triggers a COMRESET initialization sequence. */
+ uint32_t st : 1; /**< [ 0: 0](R/W) Start. */
+#else /* Word 0 - Little Endian */
+ uint32_t st : 1; /**< [ 0: 0](R/W) Start. */
+ uint32_t sud : 1; /**< [ 1: 1](R/W) Spin-up device. R/W only if SATA()_UAHC_GBL_CAP[SSS]=1, else read only.
+ Setting this bit triggers a COMRESET initialization sequence. */
+ uint32_t pod : 1; /**< [ 2: 2](R/W) Power-on device. R/W only if CPD = 1, else read only. */
+ uint32_t clo : 1; /**< [ 3: 3](WO) Command-list override. */
+ uint32_t fre : 1; /**< [ 4: 4](R/W) FIS-receive enable. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t ccs : 5; /**< [ 12: 8](RO) Current-command slot. */
+ uint32_t mpss : 1; /**< [ 13: 13](RO) Mechanical presence switch state. */
+ uint32_t fr : 1; /**< [ 14: 14](RO/H) FIS receive running. */
+ uint32_t cr : 1; /**< [ 15: 15](RO) Command list running. */
+ uint32_t cps : 1; /**< [ 16: 16](RO) Cold presence state. */
+ uint32_t pma : 1; /**< [ 17: 17](R/W) Port multiplier attached. */
+ uint32_t hpcp : 1; /**< [ 18: 18](R/W) Hot-plug-capable support. Write-once. */
+ uint32_t mpsp : 1; /**< [ 19: 19](R/W) Mechanical presence switch attached to port. Write-once. */
+ uint32_t cpd : 1; /**< [ 20: 20](R/W) Cold-presence detection. Write-once. */
+ uint32_t esp : 1; /**< [ 21: 21](R/W) External SATA port. Write-once. */
+ uint32_t fbscp : 1; /**< [ 22: 22](R/W) FIS-based switching capable port. Write-once. */
+ uint32_t apste : 1; /**< [ 23: 23](R/W) Automatic partial to slumber transitions enable. */
+ uint32_t atapi : 1; /**< [ 24: 24](R/W) Device is ATAPI. */
+ uint32_t dlae : 1; /**< [ 25: 25](R/W) Drive LED on ATAPI enable. */
+ uint32_t alpe : 1; /**< [ 26: 26](R/W) Aggressive link-power-management enable. */
+ uint32_t asp : 1; /**< [ 27: 27](R/W) Aggressive slumber/partial. */
+ uint32_t icc : 4; /**< [ 31: 28](R/W) Interface communication control. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_cmd_s cn; */
+};
+typedef union bdk_satax_uahc_p0_cmd bdk_satax_uahc_p0_cmd_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_CMD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_CMD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000118ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000118ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000118ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000118ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_CMD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_CMD(a) bdk_satax_uahc_p0_cmd_t
+#define bustype_BDK_SATAX_UAHC_P0_CMD(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_CMD(a) "SATAX_UAHC_P0_CMD"
+#define device_bar_BDK_SATAX_UAHC_P0_CMD(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_CMD(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_CMD(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_devslp
+ *
+ * SATA UAHC Device Sleep Register
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_devslp
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_devslp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_29_31 : 3;
+ uint32_t dm : 4; /**< [ 28: 25](R/W) DITO multiplier. Write once only. */
+ uint32_t dito : 10; /**< [ 24: 15](R/W) Device sleep idle timeout.
+ If [DSP]=0, then these bits are read-only zero and software should treat them as reserved.
+ If [DSP]=1, then these bits are read-write and reset to 0xA on powerup only. */
+ uint32_t mdat : 5; /**< [ 14: 10](R/W) Minimum device sleep assertion time.
+ If [DSP]=0, then these bits are read-only zero and software should treat them as reserved.
+ If [DSP]=1, then these bits are read-write and reset to 0xA on powerup only. */
+ uint32_t deto : 8; /**< [ 9: 2](R/W) Device sleep exit timeout.
+ If [DSP]=0, then these bits are read-only zero and software should treat them as reserved.
+ If [DSP]=1, then these bits are read-write and reset to 0x14 on powerup only. */
+ uint32_t dsp : 1; /**< [ 1: 1](R/W) Device sleep present. Write once only. */
+ uint32_t adse : 1; /**< [ 0: 0](R/W) Aggressive device sleep enable.
+ If [DSP]=0, then this bit is read-only zero and software should treat it as reserved.
+ If [DSP]=1, then this bit is read-write. */
+#else /* Word 0 - Little Endian */
+ uint32_t adse : 1; /**< [ 0: 0](R/W) Aggressive device sleep enable.
+ If [DSP]=0, then this bit is read-only zero and software should treat it as reserved.
+ If [DSP]=1, then this bit is read-write. */
+ uint32_t dsp : 1; /**< [ 1: 1](R/W) Device sleep present. Write once only. */
+ uint32_t deto : 8; /**< [ 9: 2](R/W) Device sleep exit timeout.
+ If [DSP]=0, then these bits are read-only zero and software should treat them as reserved.
+ If [DSP]=1, then these bits are read-write and reset to 0x14 on powerup only. */
+ uint32_t mdat : 5; /**< [ 14: 10](R/W) Minimum device sleep assertion time.
+ If [DSP]=0, then these bits are read-only zero and software should treat them as reserved.
+ If [DSP]=1, then these bits are read-write and reset to 0xA on powerup only. */
+ uint32_t dito : 10; /**< [ 24: 15](R/W) Device sleep idle timeout.
+ If [DSP]=0, then these bits are read-only zero and software should treat them as reserved.
+ If [DSP]=1, then these bits are read-write and reset to 0xA on powerup only. */
+ uint32_t dm : 4; /**< [ 28: 25](R/W) DITO multiplier. Write once only. */
+ uint32_t reserved_29_31 : 3;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_devslp_s cn; */
+};
+typedef union bdk_satax_uahc_p0_devslp bdk_satax_uahc_p0_devslp_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_DEVSLP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_DEVSLP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000144ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_DEVSLP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_DEVSLP(a) bdk_satax_uahc_p0_devslp_t
+#define bustype_BDK_SATAX_UAHC_P0_DEVSLP(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_DEVSLP(a) "SATAX_UAHC_P0_DEVSLP"
+#define device_bar_BDK_SATAX_UAHC_P0_DEVSLP(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_DEVSLP(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_DEVSLP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_dmacr
+ *
+ * SATA UAHC DMA Control Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_dmacr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_dmacr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t rxts : 4; /**< [ 7: 4](R/W) Receive transaction size. This field is R/W when SATA()_UAHC_P0_CMD[ST] = 0
+ and read only when SATA()_UAHC_P0_CMD[ST] = 1. */
+ uint32_t txts : 4; /**< [ 3: 0](R/W) Transmit transaction size. This field is R/W when SATA()_UAHC_P0_CMD[ST] = 0
+ and read only when SATA()_UAHC_P0_CMD[ST] = 1. */
+#else /* Word 0 - Little Endian */
+ uint32_t txts : 4; /**< [ 3: 0](R/W) Transmit transaction size. This field is R/W when SATA()_UAHC_P0_CMD[ST] = 0
+ and read only when SATA()_UAHC_P0_CMD[ST] = 1. */
+ uint32_t rxts : 4; /**< [ 7: 4](R/W) Receive transaction size. This field is R/W when SATA()_UAHC_P0_CMD[ST] = 0
+ and read only when SATA()_UAHC_P0_CMD[ST] = 1. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_dmacr_s cn; */
+};
+typedef union bdk_satax_uahc_p0_dmacr bdk_satax_uahc_p0_dmacr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_DMACR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_DMACR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000170ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000170ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000170ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000170ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_DMACR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_DMACR(a) bdk_satax_uahc_p0_dmacr_t
+#define bustype_BDK_SATAX_UAHC_P0_DMACR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_DMACR(a) "SATAX_UAHC_P0_DMACR"
+#define device_bar_BDK_SATAX_UAHC_P0_DMACR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_DMACR(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_DMACR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uahc_p0_fb
+ *
+ * SATA UAHC FIS Base-Address Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_fb
+{
+ uint64_t u;
+ struct bdk_satax_uahc_p0_fb_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t fb : 56; /**< [ 63: 8](R/W) FIS base address. */
+ uint64_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_7 : 8;
+ uint64_t fb : 56; /**< [ 63: 8](R/W) FIS base address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_fb_s cn; */
+};
+typedef union bdk_satax_uahc_p0_fb bdk_satax_uahc_p0_fb_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_FB(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_FB(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000108ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000108ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000108ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000108ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_FB", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_FB(a) bdk_satax_uahc_p0_fb_t
+#define bustype_BDK_SATAX_UAHC_P0_FB(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UAHC_P0_FB(a) "SATAX_UAHC_P0_FB"
+#define device_bar_BDK_SATAX_UAHC_P0_FB(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_FB(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_FB(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_fbs
+ *
+ * SATA UAHC FIS-Based Switching Control Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_fbs
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_fbs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_20_31 : 12;
+ uint32_t dwe : 4; /**< [ 19: 16](RO) Device with error. */
+ uint32_t ado : 4; /**< [ 15: 12](RO) Active device optimization. */
+ uint32_t dev : 4; /**< [ 11: 8](R/W) Device to issue. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t sde : 1; /**< [ 2: 2](RO) Single device error. */
+ uint32_t dec : 1; /**< [ 1: 1](R/W1C/H) Device error clear. */
+ uint32_t en : 1; /**< [ 0: 0](R/W) Enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t en : 1; /**< [ 0: 0](R/W) Enable. */
+ uint32_t dec : 1; /**< [ 1: 1](R/W1C/H) Device error clear. */
+ uint32_t sde : 1; /**< [ 2: 2](RO) Single device error. */
+ uint32_t reserved_3_7 : 5;
+ uint32_t dev : 4; /**< [ 11: 8](R/W) Device to issue. */
+ uint32_t ado : 4; /**< [ 15: 12](RO) Active device optimization. */
+ uint32_t dwe : 4; /**< [ 19: 16](RO) Device with error. */
+ uint32_t reserved_20_31 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_fbs_s cn; */
+};
+typedef union bdk_satax_uahc_p0_fbs bdk_satax_uahc_p0_fbs_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_FBS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_FBS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000140ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000140ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000140ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000140ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_FBS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_FBS(a) bdk_satax_uahc_p0_fbs_t
+#define bustype_BDK_SATAX_UAHC_P0_FBS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_FBS(a) "SATAX_UAHC_P0_FBS"
+#define device_bar_BDK_SATAX_UAHC_P0_FBS(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_FBS(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_FBS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_ie
+ *
+ * SATA UAHC Interrupt Enable Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_ie
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_ie_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cpde : 1; /**< [ 31: 31](R/W) Cold-port-detect enable. */
+ uint32_t tfee : 1; /**< [ 30: 30](R/W) Task-file-error enable. */
+ uint32_t hbfe : 1; /**< [ 29: 29](R/W) Host-bus fatal-error enable. */
+ uint32_t hbde : 1; /**< [ 28: 28](R/W) Host-bus data-error enable. */
+ uint32_t ife : 1; /**< [ 27: 27](R/W) Interface fatal-error enable. */
+ uint32_t infe : 1; /**< [ 26: 26](R/W) Interface non-fatal-error enable. */
+ uint32_t reserved_25 : 1;
+ uint32_t ofe : 1; /**< [ 24: 24](R/W) Overflow enable. */
+ uint32_t impe : 1; /**< [ 23: 23](R/W) Incorrect port-multiplier enable. */
+ uint32_t prce : 1; /**< [ 22: 22](R/W) PHY-ready-change enable. */
+ uint32_t reserved_8_21 : 14;
+ uint32_t dmpe : 1; /**< [ 7: 7](R/W) Device mechanical-presence enable. */
+ uint32_t pce : 1; /**< [ 6: 6](R/W) Port-connect-change enable. */
+ uint32_t dpe : 1; /**< [ 5: 5](R/W) Descriptor-processed enable. */
+ uint32_t ufe : 1; /**< [ 4: 4](R/W) Unknown-FIS-interrupt enable. */
+ uint32_t sdbe : 1; /**< [ 3: 3](R/W) Set device-bits-interrupt enable. */
+ uint32_t dse : 1; /**< [ 2: 2](R/W) DMA-setup FIS interrupt enable. */
+ uint32_t pse : 1; /**< [ 1: 1](R/W) PIO-setup FIS interrupt enable. */
+ uint32_t dhre : 1; /**< [ 0: 0](R/W) Device-to-host register FIS interrupt enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t dhre : 1; /**< [ 0: 0](R/W) Device-to-host register FIS interrupt enable. */
+ uint32_t pse : 1; /**< [ 1: 1](R/W) PIO-setup FIS interrupt enable. */
+ uint32_t dse : 1; /**< [ 2: 2](R/W) DMA-setup FIS interrupt enable. */
+ uint32_t sdbe : 1; /**< [ 3: 3](R/W) Set device-bits-interrupt enable. */
+ uint32_t ufe : 1; /**< [ 4: 4](R/W) Unknown-FIS-interrupt enable. */
+ uint32_t dpe : 1; /**< [ 5: 5](R/W) Descriptor-processed enable. */
+ uint32_t pce : 1; /**< [ 6: 6](R/W) Port-connect-change enable. */
+ uint32_t dmpe : 1; /**< [ 7: 7](R/W) Device mechanical-presence enable. */
+ uint32_t reserved_8_21 : 14;
+ uint32_t prce : 1; /**< [ 22: 22](R/W) PHY-ready-change enable. */
+ uint32_t impe : 1; /**< [ 23: 23](R/W) Incorrect port-multiplier enable. */
+ uint32_t ofe : 1; /**< [ 24: 24](R/W) Overflow enable. */
+ uint32_t reserved_25 : 1;
+ uint32_t infe : 1; /**< [ 26: 26](R/W) Interface non-fatal-error enable. */
+ uint32_t ife : 1; /**< [ 27: 27](R/W) Interface fatal-error enable. */
+ uint32_t hbde : 1; /**< [ 28: 28](R/W) Host-bus data-error enable. */
+ uint32_t hbfe : 1; /**< [ 29: 29](R/W) Host-bus fatal-error enable. */
+ uint32_t tfee : 1; /**< [ 30: 30](R/W) Task-file-error enable. */
+ uint32_t cpde : 1; /**< [ 31: 31](R/W) Cold-port-detect enable. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_ie_s cn; */
+};
+typedef union bdk_satax_uahc_p0_ie bdk_satax_uahc_p0_ie_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_IE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_IE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000114ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000114ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000114ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000114ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_IE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_IE(a) bdk_satax_uahc_p0_ie_t
+#define bustype_BDK_SATAX_UAHC_P0_IE(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_IE(a) "SATAX_UAHC_P0_IE"
+#define device_bar_BDK_SATAX_UAHC_P0_IE(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_IE(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_IE(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_is
+ *
+ * SATA UAHC Interrupt Status Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_is
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_is_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cpds : 1; /**< [ 31: 31](R/W1C/H) Cold-port detect status. */
+ uint32_t tfes : 1; /**< [ 30: 30](R/W1C/H) Task-file error status. */
+ uint32_t hbfs : 1; /**< [ 29: 29](R/W1C/H) Host-bus fatal-error status. */
+ uint32_t hbds : 1; /**< [ 28: 28](R/W1C/H) Host-bus data-error status. */
+ uint32_t ifs : 1; /**< [ 27: 27](R/W1C/H) Interface fatal-error status. */
+ uint32_t infs : 1; /**< [ 26: 26](R/W1C/H) Interface non-fatal-error status. */
+ uint32_t reserved_25 : 1;
+ uint32_t ofs : 1; /**< [ 24: 24](R/W1C/H) Overflow status. */
+ uint32_t imps : 1; /**< [ 23: 23](R/W1C/H) Incorrect port-multiplier status. */
+ uint32_t prcs : 1; /**< [ 22: 22](RO/H) PHY-ready change status. */
+ uint32_t reserved_8_21 : 14;
+ uint32_t dmps : 1; /**< [ 7: 7](R/W1C/H) Device mechanical-presence status. */
+ uint32_t pcs : 1; /**< [ 6: 6](RO/H) Port-connect-change status. */
+ uint32_t dps : 1; /**< [ 5: 5](R/W1C/H) Descriptor processed. */
+ uint32_t ufs : 1; /**< [ 4: 4](RO) Unknown FIS interrupt. */
+ uint32_t sdbs : 1; /**< [ 3: 3](R/W1C/H) Set device bits interrupt. */
+ uint32_t dss : 1; /**< [ 2: 2](R/W1C/H) DMA setup FIS interrupt. */
+ uint32_t pss : 1; /**< [ 1: 1](R/W1C/H) PIO setup FIS interrupt. */
+ uint32_t dhrs : 1; /**< [ 0: 0](R/W1C/H) Device-to-host register FIS interrupt. */
+#else /* Word 0 - Little Endian */
+ uint32_t dhrs : 1; /**< [ 0: 0](R/W1C/H) Device-to-host register FIS interrupt. */
+ uint32_t pss : 1; /**< [ 1: 1](R/W1C/H) PIO setup FIS interrupt. */
+ uint32_t dss : 1; /**< [ 2: 2](R/W1C/H) DMA setup FIS interrupt. */
+ uint32_t sdbs : 1; /**< [ 3: 3](R/W1C/H) Set device bits interrupt. */
+ uint32_t ufs : 1; /**< [ 4: 4](RO) Unknown FIS interrupt. */
+ uint32_t dps : 1; /**< [ 5: 5](R/W1C/H) Descriptor processed. */
+ uint32_t pcs : 1; /**< [ 6: 6](RO/H) Port-connect-change status. */
+ uint32_t dmps : 1; /**< [ 7: 7](R/W1C/H) Device mechanical-presence status. */
+ uint32_t reserved_8_21 : 14;
+ uint32_t prcs : 1; /**< [ 22: 22](RO/H) PHY-ready change status. */
+ uint32_t imps : 1; /**< [ 23: 23](R/W1C/H) Incorrect port-multiplier status. */
+ uint32_t ofs : 1; /**< [ 24: 24](R/W1C/H) Overflow status. */
+ uint32_t reserved_25 : 1;
+ uint32_t infs : 1; /**< [ 26: 26](R/W1C/H) Interface non-fatal-error status. */
+ uint32_t ifs : 1; /**< [ 27: 27](R/W1C/H) Interface fatal-error status. */
+ uint32_t hbds : 1; /**< [ 28: 28](R/W1C/H) Host-bus data-error status. */
+ uint32_t hbfs : 1; /**< [ 29: 29](R/W1C/H) Host-bus fatal-error status. */
+ uint32_t tfes : 1; /**< [ 30: 30](R/W1C/H) Task-file error status. */
+ uint32_t cpds : 1; /**< [ 31: 31](R/W1C/H) Cold-port detect status. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_is_s cn; */
+};
+typedef union bdk_satax_uahc_p0_is bdk_satax_uahc_p0_is_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_IS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_IS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000110ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000110ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000110ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000110ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_IS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_IS(a) bdk_satax_uahc_p0_is_t
+#define bustype_BDK_SATAX_UAHC_P0_IS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_IS(a) "SATAX_UAHC_P0_IS"
+#define device_bar_BDK_SATAX_UAHC_P0_IS(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_IS(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_IS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_phycr
+ *
+ * SATA UAHC PHY Control Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_phycr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_phycr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ctrl : 32; /**< [ 31: 0](R/W) Port PHY control. */
+#else /* Word 0 - Little Endian */
+ uint32_t ctrl : 32; /**< [ 31: 0](R/W) Port PHY control. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_phycr_s cn; */
+};
+typedef union bdk_satax_uahc_p0_phycr bdk_satax_uahc_p0_phycr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_PHYCR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_PHYCR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000178ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000178ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000178ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000178ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_PHYCR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_PHYCR(a) bdk_satax_uahc_p0_phycr_t
+#define bustype_BDK_SATAX_UAHC_P0_PHYCR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_PHYCR(a) "SATAX_UAHC_P0_PHYCR"
+#define device_bar_BDK_SATAX_UAHC_P0_PHYCR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_PHYCR(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_PHYCR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_physr
+ *
+ * SATA UAHC PHY Status Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_physr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_physr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t stat : 32; /**< [ 31: 0](RO) Port PHY status. */
+#else /* Word 0 - Little Endian */
+ uint32_t stat : 32; /**< [ 31: 0](RO) Port PHY status. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_physr_s cn; */
+};
+typedef union bdk_satax_uahc_p0_physr bdk_satax_uahc_p0_physr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_PHYSR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_PHYSR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x81000000017cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x81000000017cll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x81000000017cll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x81000000017cll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_PHYSR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_PHYSR(a) bdk_satax_uahc_p0_physr_t
+#define bustype_BDK_SATAX_UAHC_P0_PHYSR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_PHYSR(a) "SATAX_UAHC_P0_PHYSR"
+#define device_bar_BDK_SATAX_UAHC_P0_PHYSR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_PHYSR(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_PHYSR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_sact
+ *
+ * SATA UAHC SATA Active Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_sact
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_sact_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ds : 32; /**< [ 31: 0](R/W1S/H) Device status. */
+#else /* Word 0 - Little Endian */
+ uint32_t ds : 32; /**< [ 31: 0](R/W1S/H) Device status. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_sact_s cn; */
+};
+typedef union bdk_satax_uahc_p0_sact bdk_satax_uahc_p0_sact_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_SACT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_SACT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000134ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000134ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000134ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000134ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_SACT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_SACT(a) bdk_satax_uahc_p0_sact_t
+#define bustype_BDK_SATAX_UAHC_P0_SACT(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_SACT(a) "SATAX_UAHC_P0_SACT"
+#define device_bar_BDK_SATAX_UAHC_P0_SACT(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_SACT(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_SACT(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_sctl
+ *
+ * SATA UAHC SATA Control Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_sctl
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_sctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_11_31 : 21;
+ uint32_t ipm : 3; /**< [ 10: 8](R/W) Interface power-management transitions allowed. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t spd : 2; /**< [ 5: 4](R/W) Speed allowed. */
+ uint32_t reserved_3 : 1;
+ uint32_t det : 3; /**< [ 2: 0](R/W) Device-detection initialization. */
+#else /* Word 0 - Little Endian */
+ uint32_t det : 3; /**< [ 2: 0](R/W) Device-detection initialization. */
+ uint32_t reserved_3 : 1;
+ uint32_t spd : 2; /**< [ 5: 4](R/W) Speed allowed. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t ipm : 3; /**< [ 10: 8](R/W) Interface power-management transitions allowed. */
+ uint32_t reserved_11_31 : 21;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_satax_uahc_p0_sctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_10_31 : 22;
+ uint32_t ipm : 2; /**< [ 9: 8](R/W) Interface power-management transitions allowed. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t spd : 2; /**< [ 5: 4](R/W) Speed allowed. */
+ uint32_t reserved_3 : 1;
+ uint32_t det : 3; /**< [ 2: 0](R/W) Device-detection initialization. */
+#else /* Word 0 - Little Endian */
+ uint32_t det : 3; /**< [ 2: 0](R/W) Device-detection initialization. */
+ uint32_t reserved_3 : 1;
+ uint32_t spd : 2; /**< [ 5: 4](R/W) Speed allowed. */
+ uint32_t reserved_6_7 : 2;
+ uint32_t ipm : 2; /**< [ 9: 8](R/W) Interface power-management transitions allowed. */
+ uint32_t reserved_10_31 : 22;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_satax_uahc_p0_sctl_s cn9; */
+};
+typedef union bdk_satax_uahc_p0_sctl bdk_satax_uahc_p0_sctl_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_SCTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_SCTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x81000000012cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x81000000012cll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x81000000012cll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x81000000012cll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_SCTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_SCTL(a) bdk_satax_uahc_p0_sctl_t
+#define bustype_BDK_SATAX_UAHC_P0_SCTL(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_SCTL(a) "SATAX_UAHC_P0_SCTL"
+#define device_bar_BDK_SATAX_UAHC_P0_SCTL(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_SCTL(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_SCTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_serr
+ *
+ * SATA UAHC SATA Error Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_serr
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_serr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t diag_x : 1; /**< [ 26: 26](R/W1C/H) Exchanged. */
+ uint32_t diag_f : 1; /**< [ 25: 25](R/W1C/H) Unknown FIS type. */
+ uint32_t diag_t : 1; /**< [ 24: 24](R/W1C/H) Transport state transition error. */
+ uint32_t diag_s : 1; /**< [ 23: 23](R/W1C/H) Link sequence error. */
+ uint32_t diag_h : 1; /**< [ 22: 22](R/W1C/H) Handshake error. */
+ uint32_t diag_c : 1; /**< [ 21: 21](R/W1C/H) CRC error. */
+ uint32_t diag_d : 1; /**< [ 20: 20](R/W1C/H) Disparity error. */
+ uint32_t diag_b : 1; /**< [ 19: 19](R/W1C/H) 10/8 bit decode error. */
+ uint32_t diag_w : 1; /**< [ 18: 18](R/W1C/H) COMWAKE detected. */
+ uint32_t diag_i : 1; /**< [ 17: 17](R/W1C/H) PHY internal error. */
+ uint32_t diag_n : 1; /**< [ 16: 16](R/W1C/H) PHY ready change. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t err_e : 1; /**< [ 11: 11](R/W1C/H) Internal error. */
+ uint32_t err_p : 1; /**< [ 10: 10](R/W1C/H) Protocol error. */
+ uint32_t err_c : 1; /**< [ 9: 9](R/W1C/H) Non-recovered persistent communication error. */
+ uint32_t err_t : 1; /**< [ 8: 8](R/W1C/H) Non-recovered transient data integrity error. */
+ uint32_t reserved_2_7 : 6;
+ uint32_t err_m : 1; /**< [ 1: 1](R/W1C/H) Recovered communication error. */
+ uint32_t err_i : 1; /**< [ 0: 0](R/W1C/H) Recovered data integrity. */
+#else /* Word 0 - Little Endian */
+ uint32_t err_i : 1; /**< [ 0: 0](R/W1C/H) Recovered data integrity. */
+ uint32_t err_m : 1; /**< [ 1: 1](R/W1C/H) Recovered communication error. */
+ uint32_t reserved_2_7 : 6;
+ uint32_t err_t : 1; /**< [ 8: 8](R/W1C/H) Non-recovered transient data integrity error. */
+ uint32_t err_c : 1; /**< [ 9: 9](R/W1C/H) Non-recovered persistent communication error. */
+ uint32_t err_p : 1; /**< [ 10: 10](R/W1C/H) Protocol error. */
+ uint32_t err_e : 1; /**< [ 11: 11](R/W1C/H) Internal error. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t diag_n : 1; /**< [ 16: 16](R/W1C/H) PHY ready change. */
+ uint32_t diag_i : 1; /**< [ 17: 17](R/W1C/H) PHY internal error. */
+ uint32_t diag_w : 1; /**< [ 18: 18](R/W1C/H) COMWAKE detected. */
+ uint32_t diag_b : 1; /**< [ 19: 19](R/W1C/H) 10/8 bit decode error. */
+ uint32_t diag_d : 1; /**< [ 20: 20](R/W1C/H) Disparity error. */
+ uint32_t diag_c : 1; /**< [ 21: 21](R/W1C/H) CRC error. */
+ uint32_t diag_h : 1; /**< [ 22: 22](R/W1C/H) Handshake error. */
+ uint32_t diag_s : 1; /**< [ 23: 23](R/W1C/H) Link sequence error. */
+ uint32_t diag_t : 1; /**< [ 24: 24](R/W1C/H) Transport state transition error. */
+ uint32_t diag_f : 1; /**< [ 25: 25](R/W1C/H) Unknown FIS type. */
+ uint32_t diag_x : 1; /**< [ 26: 26](R/W1C/H) Exchanged. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_serr_s cn; */
+};
+typedef union bdk_satax_uahc_p0_serr bdk_satax_uahc_p0_serr_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_SERR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_SERR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000130ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000130ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000130ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000130ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_SERR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_SERR(a) bdk_satax_uahc_p0_serr_t
+#define bustype_BDK_SATAX_UAHC_P0_SERR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_SERR(a) "SATAX_UAHC_P0_SERR"
+#define device_bar_BDK_SATAX_UAHC_P0_SERR(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_SERR(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_SERR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_sig
+ *
+ * SATA UAHC Signature Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_sig
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_sig_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t sig : 32; /**< [ 31: 0](RO/H) Signature. */
+#else /* Word 0 - Little Endian */
+ uint32_t sig : 32; /**< [ 31: 0](RO/H) Signature. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_sig_s cn; */
+};
+typedef union bdk_satax_uahc_p0_sig bdk_satax_uahc_p0_sig_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_SIG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_SIG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000124ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000124ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000124ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000124ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_SIG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_SIG(a) bdk_satax_uahc_p0_sig_t
+#define bustype_BDK_SATAX_UAHC_P0_SIG(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_SIG(a) "SATAX_UAHC_P0_SIG"
+#define device_bar_BDK_SATAX_UAHC_P0_SIG(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_SIG(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_SIG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_sntf
+ *
+ * SATA UAHC SATA Notification Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_sntf
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_sntf_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t pmn : 16; /**< [ 15: 0](R/W1C/H) PM notify. */
+#else /* Word 0 - Little Endian */
+ uint32_t pmn : 16; /**< [ 15: 0](R/W1C/H) PM notify. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_sntf_s cn; */
+};
+typedef union bdk_satax_uahc_p0_sntf bdk_satax_uahc_p0_sntf_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_SNTF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_SNTF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x81000000013cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x81000000013cll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x81000000013cll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x81000000013cll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_SNTF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_SNTF(a) bdk_satax_uahc_p0_sntf_t
+#define bustype_BDK_SATAX_UAHC_P0_SNTF(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_SNTF(a) "SATAX_UAHC_P0_SNTF"
+#define device_bar_BDK_SATAX_UAHC_P0_SNTF(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_SNTF(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_SNTF(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_ssts
+ *
+ * SATA UAHC SATA Status Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_ssts
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_ssts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t ipm : 4; /**< [ 11: 8](RO/H) Interface power management. */
+ uint32_t spd : 4; /**< [ 7: 4](RO/H) Current interface speed. */
+ uint32_t det : 4; /**< [ 3: 0](RO/H) Device detection. */
+#else /* Word 0 - Little Endian */
+ uint32_t det : 4; /**< [ 3: 0](RO/H) Device detection. */
+ uint32_t spd : 4; /**< [ 7: 4](RO/H) Current interface speed. */
+ uint32_t ipm : 4; /**< [ 11: 8](RO/H) Interface power management. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_ssts_s cn; */
+};
+typedef union bdk_satax_uahc_p0_ssts bdk_satax_uahc_p0_ssts_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_SSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_SSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000128ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000128ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000128ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000128ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_SSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_SSTS(a) bdk_satax_uahc_p0_ssts_t
+#define bustype_BDK_SATAX_UAHC_P0_SSTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_SSTS(a) "SATAX_UAHC_P0_SSTS"
+#define device_bar_BDK_SATAX_UAHC_P0_SSTS(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_SSTS(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_SSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) sata#_uahc_p0_tfd
+ *
+ * SATA UAHC Task File Data Registers
+ * Internal:
+ * See DWC_ahsata databook v5.00.
+ */
+union bdk_satax_uahc_p0_tfd
+{
+ uint32_t u;
+ struct bdk_satax_uahc_p0_tfd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t tferr : 8; /**< [ 15: 8](RO) Copy of task-file error register. */
+ uint32_t sts : 8; /**< [ 7: 0](RO/H) Copy of task-file status register.
+ \<7\> = BSY: Indicates the interface is busy.
+ \<6:4\> = Command specific.
+ \<3\> = DRQ: Indicates a data transfer is requested.
+ \<2:1\> = Command specific.
+ \<0\> = ERR: Indicates an error during the transfer. */
+#else /* Word 0 - Little Endian */
+ uint32_t sts : 8; /**< [ 7: 0](RO/H) Copy of task-file status register.
+ \<7\> = BSY: Indicates the interface is busy.
+ \<6:4\> = Command specific.
+ \<3\> = DRQ: Indicates a data transfer is requested.
+ \<2:1\> = Command specific.
+ \<0\> = ERR: Indicates an error during the transfer. */
+ uint32_t tferr : 8; /**< [ 15: 8](RO) Copy of task-file error register. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uahc_p0_tfd_s cn; */
+};
+typedef union bdk_satax_uahc_p0_tfd bdk_satax_uahc_p0_tfd_t;
+
+static inline uint64_t BDK_SATAX_UAHC_P0_TFD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UAHC_P0_TFD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000000120ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000000120ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000000120ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000000120ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UAHC_P0_TFD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UAHC_P0_TFD(a) bdk_satax_uahc_p0_tfd_t
+#define bustype_BDK_SATAX_UAHC_P0_TFD(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_SATAX_UAHC_P0_TFD(a) "SATAX_UAHC_P0_TFD"
+#define device_bar_BDK_SATAX_UAHC_P0_TFD(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UAHC_P0_TFD(a) (a)
+#define arguments_BDK_SATAX_UAHC_P0_TFD(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_bist_status
+ *
+ * SATA UCTL BIST Status Register
+ * Results from BIST runs of SATA's memories.
+ * Wait for NDONE==0, then look at defect indication.
+ *
+ * Accessible always.
+ *
+ * Reset by NCB reset.
+ */
+union bdk_satax_uctl_bist_status
+{
+ uint64_t u;
+ struct bdk_satax_uctl_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_42_63 : 22;
+ uint64_t uctl_xm_r_bist_ndone : 1; /**< [ 41: 41](RO/H) BIST is not complete for the UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_w_bist_ndone : 1; /**< [ 40: 40](RO/H) BIST is not complete for the UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t uahc_p0_rxram_bist_ndone : 1;/**< [ 35: 35](RO/H) BIST is not complete for the UAHC Port 0 RxFIFO RAM. */
+ uint64_t reserved_34 : 1;
+ uint64_t uahc_p0_txram_bist_ndone : 1;/**< [ 33: 33](RO/H) BIST is not complete for the UAHC Port 0 TxFIFO RAM. */
+ uint64_t reserved_10_32 : 23;
+ uint64_t uctl_xm_r_bist_status : 1; /**< [ 9: 9](RO/H) BIST status of the UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_w_bist_status : 1; /**< [ 8: 8](RO/H) BIST status of the UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t uahc_p0_rxram_bist_status : 1;/**< [ 3: 3](RO/H) BIST status of the UAHC Port0 RxFIFO RAM. */
+ uint64_t reserved_2 : 1;
+ uint64_t uahc_p0_txram_bist_status : 1;/**< [ 1: 1](RO/H) BIST status of the UAHC Port0 TxFIFO RAM. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t uahc_p0_txram_bist_status : 1;/**< [ 1: 1](RO/H) BIST status of the UAHC Port0 TxFIFO RAM. */
+ uint64_t reserved_2 : 1;
+ uint64_t uahc_p0_rxram_bist_status : 1;/**< [ 3: 3](RO/H) BIST status of the UAHC Port0 RxFIFO RAM. */
+ uint64_t reserved_4_7 : 4;
+ uint64_t uctl_xm_w_bist_status : 1; /**< [ 8: 8](RO/H) BIST status of the UCTL AxiMaster write-data FIFO. */
+ uint64_t uctl_xm_r_bist_status : 1; /**< [ 9: 9](RO/H) BIST status of the UCTL AxiMaster read-data FIFO. */
+ uint64_t reserved_10_32 : 23;
+ uint64_t uahc_p0_txram_bist_ndone : 1;/**< [ 33: 33](RO/H) BIST is not complete for the UAHC Port 0 TxFIFO RAM. */
+ uint64_t reserved_34 : 1;
+ uint64_t uahc_p0_rxram_bist_ndone : 1;/**< [ 35: 35](RO/H) BIST is not complete for the UAHC Port 0 RxFIFO RAM. */
+ uint64_t reserved_36_39 : 4;
+ uint64_t uctl_xm_w_bist_ndone : 1; /**< [ 40: 40](RO/H) BIST is not complete for the UCTL AxiMaster write-data FIFO. */
+ uint64_t uctl_xm_r_bist_ndone : 1; /**< [ 41: 41](RO/H) BIST is not complete for the UCTL AxiMaster read-data FIFO. */
+ uint64_t reserved_42_63 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_bist_status_s cn; */
+};
+typedef union bdk_satax_uctl_bist_status bdk_satax_uctl_bist_status_t;
+
+static inline uint64_t BDK_SATAX_UCTL_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000100008ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000100008ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000100008ll + 0x1000000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("SATAX_UCTL_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_BIST_STATUS(a) bdk_satax_uctl_bist_status_t
+#define bustype_BDK_SATAX_UCTL_BIST_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_BIST_STATUS(a) "SATAX_UCTL_BIST_STATUS"
+#define device_bar_BDK_SATAX_UCTL_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SATAX_UCTL_BIST_STATUS(a) (a)
+#define arguments_BDK_SATAX_UCTL_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_bp_test
+ *
+ * INTERNAL: SATA UCTL Backpressure Test Register
+ */
+union bdk_satax_uctl_bp_test
+{
+ uint64_t u;
+ struct bdk_satax_uctl_bp_test_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Reserved.
+ \<62\> = When set, disables popping of NCBO FIFO, also credits won't be returned.
+ \<61\> = When set, disables popping of NCBI FIFO, also credits won't be returned.
+ \<60\> = When set, enables backpressure on the FPA(XPD) interface. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Reserved.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Reserved.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Reserved.
+ \<62\> = When set, disables popping of NCBO FIFO, also credits won't be returned.
+ \<61\> = When set, disables popping of NCBI FIFO, also credits won't be returned.
+ \<60\> = When set, enables backpressure on the FPA(XPD) interface. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_bp_test_s cn; */
+};
+typedef union bdk_satax_uctl_bp_test bdk_satax_uctl_bp_test_t;
+
+static inline uint64_t BDK_SATAX_UCTL_BP_TEST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_BP_TEST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100020ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_BP_TEST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_BP_TEST(a) bdk_satax_uctl_bp_test_t
+#define bustype_BDK_SATAX_UCTL_BP_TEST(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_BP_TEST(a) "SATAX_UCTL_BP_TEST"
+#define device_bar_BDK_SATAX_UCTL_BP_TEST(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_BP_TEST(a) (a)
+#define arguments_BDK_SATAX_UCTL_BP_TEST(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_cap_cfg
+ *
+ * SATA UCTL Capability Configuration Register
+ * This register allows for overriding the advertised AHCI power management
+ * capabilities, configuration registers, and unplug notifications to work around
+ * hardware issues without modifying standard drivers. For diagnostic use only.
+ */
+union bdk_satax_uctl_cap_cfg
+{
+ uint64_t u;
+ struct bdk_satax_uctl_cap_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t or_ahci_cap_en : 1; /**< [ 63: 63](R/W) Enable overriding advertised AHCI power management capabilities. */
+ uint64_t gbl_cap_salp : 1; /**< [ 62: 62](R/W) Override SATA()_UAHC_GBL_CAP[SALP]. */
+ uint64_t gbl_cap_ssc : 1; /**< [ 61: 61](R/W) Override SATA()_UAHC_GBL_CAP[SSC]. */
+ uint64_t gbl_cap2_sadm : 1; /**< [ 60: 60](R/W) Override SATA()_UAHC_GBL_CAP2[SADM]. */
+ uint64_t gbl_cap2_sds : 1; /**< [ 59: 59](R/W) Override SATA()_UAHC_GBL_CAP2[SDS]. */
+ uint64_t gbl_cap2_apst : 1; /**< [ 58: 58](R/W) Override SATA()_UAHC_GBL_CAP2[APST]. */
+ uint64_t reserved_56_57 : 2;
+ uint64_t or_ahci_pwr_en : 1; /**< [ 55: 55](R/W) Enable overriding programmed setting to AHCI power management config registers. */
+ uint64_t sctl_ipm : 3; /**< [ 54: 52](R/W) Override SATA()_UAHC_P0_SCTL[IPM]. */
+ uint64_t cmd_icc : 4; /**< [ 51: 48](R/W) Override SATA()_UAHC_P0_CMD[ICC]. */
+ uint64_t cmd_asp : 1; /**< [ 47: 47](R/W) Override SATA()_UAHC_P0_CMD[ASP]. */
+ uint64_t cmd_alpe : 1; /**< [ 46: 46](R/W) Override SATA()_UAHC_P0_CMD[ALPE]. */
+ uint64_t cmd_apste : 1; /**< [ 45: 45](R/W) Override SATA()_UAHC_P0_CMD[APSTE]. */
+ uint64_t reserved_40_44 : 5;
+ uint64_t or_uahc_int_en : 1; /**< [ 39: 39](R/W) Enable overriding notification of unplug event to force the interrupts. */
+ uint64_t p0_is_prcs : 1; /**< [ 38: 38](R/W) Override SATA()_UAHC_P0_IS[PRCS]. */
+ uint64_t p0_serr_diag_n : 1; /**< [ 37: 37](R/W) Override SATA()_UAHC_P0_SERR[DIAG_N]. */
+ uint64_t reserved_0_36 : 37;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_36 : 37;
+ uint64_t p0_serr_diag_n : 1; /**< [ 37: 37](R/W) Override SATA()_UAHC_P0_SERR[DIAG_N]. */
+ uint64_t p0_is_prcs : 1; /**< [ 38: 38](R/W) Override SATA()_UAHC_P0_IS[PRCS]. */
+ uint64_t or_uahc_int_en : 1; /**< [ 39: 39](R/W) Enable overriding notification of unplug event to force the interrupts. */
+ uint64_t reserved_40_44 : 5;
+ uint64_t cmd_apste : 1; /**< [ 45: 45](R/W) Override SATA()_UAHC_P0_CMD[APSTE]. */
+ uint64_t cmd_alpe : 1; /**< [ 46: 46](R/W) Override SATA()_UAHC_P0_CMD[ALPE]. */
+ uint64_t cmd_asp : 1; /**< [ 47: 47](R/W) Override SATA()_UAHC_P0_CMD[ASP]. */
+ uint64_t cmd_icc : 4; /**< [ 51: 48](R/W) Override SATA()_UAHC_P0_CMD[ICC]. */
+ uint64_t sctl_ipm : 3; /**< [ 54: 52](R/W) Override SATA()_UAHC_P0_SCTL[IPM]. */
+ uint64_t or_ahci_pwr_en : 1; /**< [ 55: 55](R/W) Enable overriding programmed setting to AHCI power management config registers. */
+ uint64_t reserved_56_57 : 2;
+ uint64_t gbl_cap2_apst : 1; /**< [ 58: 58](R/W) Override SATA()_UAHC_GBL_CAP2[APST]. */
+ uint64_t gbl_cap2_sds : 1; /**< [ 59: 59](R/W) Override SATA()_UAHC_GBL_CAP2[SDS]. */
+ uint64_t gbl_cap2_sadm : 1; /**< [ 60: 60](R/W) Override SATA()_UAHC_GBL_CAP2[SADM]. */
+ uint64_t gbl_cap_ssc : 1; /**< [ 61: 61](R/W) Override SATA()_UAHC_GBL_CAP[SSC]. */
+ uint64_t gbl_cap_salp : 1; /**< [ 62: 62](R/W) Override SATA()_UAHC_GBL_CAP[SALP]. */
+ uint64_t or_ahci_cap_en : 1; /**< [ 63: 63](R/W) Enable overriding advertised AHCI power management capabilities. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_cap_cfg_s cn; */
+};
+typedef union bdk_satax_uctl_cap_cfg bdk_satax_uctl_cap_cfg_t;
+
+static inline uint64_t BDK_SATAX_UCTL_CAP_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_CAP_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100001000e0ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_CAP_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_CAP_CFG(a) bdk_satax_uctl_cap_cfg_t
+#define bustype_BDK_SATAX_UCTL_CAP_CFG(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_CAP_CFG(a) "SATAX_UCTL_CAP_CFG"
+#define device_bar_BDK_SATAX_UCTL_CAP_CFG(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_CAP_CFG(a) (a)
+#define arguments_BDK_SATAX_UCTL_CAP_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_const
+ *
+ * SATA UCTL Constants Register
+ * This register contains constants for software discovery.
+ */
+union bdk_satax_uctl_const
+{
+ uint64_t u;
+ struct bdk_satax_uctl_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_const_s cn; */
+};
+typedef union bdk_satax_uctl_const bdk_satax_uctl_const_t;
+
+static inline uint64_t BDK_SATAX_UCTL_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100028ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_CONST(a) bdk_satax_uctl_const_t
+#define bustype_BDK_SATAX_UCTL_CONST(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_CONST(a) "SATAX_UCTL_CONST"
+#define device_bar_BDK_SATAX_UCTL_CONST(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_CONST(a) (a)
+#define arguments_BDK_SATAX_UCTL_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_csclk_active_pc
+ *
+ * SATA UCTL Conditional Sclk Clock Counter Register
+ * This register count csclk clock cycle.
+ * Reset by NCB reset.
+ */
+union bdk_satax_uctl_csclk_active_pc
+{
+ uint64_t u;
+ struct bdk_satax_uctl_csclk_active_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Counts conditional clock active cycles since reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Counts conditional clock active cycles since reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_csclk_active_pc_s cn; */
+};
+typedef union bdk_satax_uctl_csclk_active_pc bdk_satax_uctl_csclk_active_pc_t;
+
+static inline uint64_t BDK_SATAX_UCTL_CSCLK_ACTIVE_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_CSCLK_ACTIVE_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100018ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_CSCLK_ACTIVE_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_CSCLK_ACTIVE_PC(a) bdk_satax_uctl_csclk_active_pc_t
+#define bustype_BDK_SATAX_UCTL_CSCLK_ACTIVE_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_CSCLK_ACTIVE_PC(a) "SATAX_UCTL_CSCLK_ACTIVE_PC"
+#define device_bar_BDK_SATAX_UCTL_CSCLK_ACTIVE_PC(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_CSCLK_ACTIVE_PC(a) (a)
+#define arguments_BDK_SATAX_UCTL_CSCLK_ACTIVE_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_ctl
+ *
+ * SATA UCTL Control Register
+ * This register controls clocks, resets, power, and BIST for the SATA.
+ *
+ * Accessible always.
+ *
+ * Reset by NCB reset.
+ */
+union bdk_satax_uctl_ctl
+{
+ uint64_t u;
+ struct bdk_satax_uctl_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. There are two major modes of BIST: FULL and CLEAR.
+ 0 = FULL BIST is run by the BIST state machine.
+ 1 = CLEAR BIST is run by the BIST state machine. A clear-BIST run clears all entries in
+ SATA RAMs to 0x0.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts
+ [CLEAR_BIST] into the correct state and then perform another CSR write operation to set
+ [START_BIST] (keeping [CLEAR_BIST] constant). CLEAR BIST completion is indicated by
+ SATA()_UCTL_BIST_STATUS[NDONE*] clear.
+
+ A BIST clear operation takes almost 2,000 host-controller clock cycles for the largest
+ RAM. */
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Start BIST. The rising edge starts BIST on the memories in SATA. To run BIST, the host-
+ controller clock must be both configured and enabled, and should be configured to the
+ maximum available frequency given the available coprocessor clock and dividers.
+
+ Refer to Cold Reset for clock initialization procedures. BIST defect status can
+ be checked after FULL BIST completion, both of which are indicated in
+ SATA()_UCTL_BIST_STATUS. The FULL BIST run takes almost 80,000 host-controller
+ clock cycles for the largest RAM. */
+ uint64_t reserved_32_61 : 30;
+ uint64_t cmd_flr_en : 1; /**< [ 31: 31](R/W) Select an option for doing SATA FLR based on finishing existing commands or DMA transactions.
+ 0 = DMA-base FLR.
+ 1 = Command-base FLR.
+
+ Command-base option will require AHCI software to read SATA()_UAHC_P0_CI to make sure there is
+ no more command to process, then proceed FLR by negating PCC master enable signal.
+
+ This option has to be set before PCC master enable negates. Futher commands write to
+ SATA()_UAHC_P0_CI after this bit is set will not be executed.
+
+ To check if commands have finished, read SATA()_UCTL_CTL[CMD_FLR_DONE]. */
+ uint64_t a_clk_en : 1; /**< [ 30: 30](R/W) Host-controller clock enable. When set to one, the host-controller clock is generated. This
+ also enables access to UCTL registers 0x30-0xF8. */
+ uint64_t a_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the host-controller clock divider.
+ 0 = Use the divided coprocessor clock from the [A_CLKDIV_SEL] divider.
+ 1 = use the bypass clock from the GPIO pins (generally bypass is only used for scan
+ purposes).
+
+ This signal is a multiplexer-select signal; it does not enable the host-controller clock.
+ You must set [A_CLK_EN] separately. [A_CLK_BYP_SEL] select should not be changed unless
+ [A_CLK_EN] is disabled. The bypass clock can be selected and running even if the host-
+ controller clock dividers are not running. */
+ uint64_t a_clkdiv_rst : 1; /**< [ 28: 28](R/W) Host-controller-clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t cmd_flr_done : 1; /**< [ 27: 27](RO/H) This bit tells you if commands set before SATA()_UCTL_CTL[CMD_FLR_EN] are finished or not.
+ This bit is only valid after SATA()_UCTL_CTL[CMD_FLR_EN] is set. */
+ uint64_t a_clkdiv_sel : 3; /**< [ 26: 24](R/W) The host-controller clock frequency is the coprocessor-clock frequency divided by
+ [A_CLKDIV_SEL]. The host-controller clock frequency must be at or below 333MHz.
+ This field can be changed only when [A_CLKDIV_RST] = 1. The divider values are the
+ following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 3.
+ 0x3 = divide by 4.
+ 0x4 = divide by 6.
+ 0x5 = divide by 8.
+ 0x6 = divide by 16.
+ 0x7 = divide by 24. */
+ uint64_t reserved_6_23 : 18;
+ uint64_t dma_psn_ign : 1; /**< [ 5: 5](R/W) Handling of poison indication on DMA read responses.
+ 0 = Treat poison data the same way as fault, sending an AXI error to the SATA
+ controller.
+ 1 = Ignore poison and proceed with the transaction as if no problems. */
+ uint64_t reserved_2_4 : 3;
+ uint64_t sata_uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t sata_uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high. Resets UAHC DMA and register shims and the UCTL
+ registers 0x10_0030-0x10_00F8.
+
+ It does not reset UCTL registers 0x10_0000-0x10_0028.
+
+ The UCTL registers starting from 0x10_0030 can be accessed only after the host-controller
+ clock is active and [SATA_UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and GIB protocols. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high. Resets UAHC DMA and register shims and the UCTL
+ registers 0x10_0030-0x10_00F8.
+
+ It does not reset UCTL registers 0x10_0000-0x10_0028.
+
+ The UCTL registers starting from 0x10_0030 can be accessed only after the host-controller
+ clock is active and [SATA_UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and GIB protocols. */
+ uint64_t sata_uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t reserved_2_4 : 3;
+ uint64_t dma_psn_ign : 1; /**< [ 5: 5](R/W) Handling of poison indication on DMA read responses.
+ 0 = Treat poison data the same way as fault, sending an AXI error to the SATA
+ controller.
+ 1 = Ignore poison and proceed with the transaction as if no problems. */
+ uint64_t reserved_6_23 : 18;
+ uint64_t a_clkdiv_sel : 3; /**< [ 26: 24](R/W) The host-controller clock frequency is the coprocessor-clock frequency divided by
+ [A_CLKDIV_SEL]. The host-controller clock frequency must be at or below 333MHz.
+ This field can be changed only when [A_CLKDIV_RST] = 1. The divider values are the
+ following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 3.
+ 0x3 = divide by 4.
+ 0x4 = divide by 6.
+ 0x5 = divide by 8.
+ 0x6 = divide by 16.
+ 0x7 = divide by 24. */
+ uint64_t cmd_flr_done : 1; /**< [ 27: 27](RO/H) This bit tells you if commands set before SATA()_UCTL_CTL[CMD_FLR_EN] are finished or not.
+ This bit is only valid after SATA()_UCTL_CTL[CMD_FLR_EN] is set. */
+ uint64_t a_clkdiv_rst : 1; /**< [ 28: 28](R/W) Host-controller-clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t a_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the host-controller clock divider.
+ 0 = Use the divided coprocessor clock from the [A_CLKDIV_SEL] divider.
+ 1 = use the bypass clock from the GPIO pins (generally bypass is only used for scan
+ purposes).
+
+ This signal is a multiplexer-select signal; it does not enable the host-controller clock.
+ You must set [A_CLK_EN] separately. [A_CLK_BYP_SEL] select should not be changed unless
+ [A_CLK_EN] is disabled. The bypass clock can be selected and running even if the host-
+ controller clock dividers are not running. */
+ uint64_t a_clk_en : 1; /**< [ 30: 30](R/W) Host-controller clock enable. When set to one, the host-controller clock is generated. This
+ also enables access to UCTL registers 0x30-0xF8. */
+ uint64_t cmd_flr_en : 1; /**< [ 31: 31](R/W) Select an option for doing SATA FLR based on finishing existing commands or DMA transactions.
+ 0 = DMA-base FLR.
+ 1 = Command-base FLR.
+
+ Command-base option will require AHCI software to read SATA()_UAHC_P0_CI to make sure there is
+ no more command to process, then proceed FLR by negating PCC master enable signal.
+
+ This option has to be set before PCC master enable negates. Futher commands write to
+ SATA()_UAHC_P0_CI after this bit is set will not be executed.
+
+ To check if commands have finished, read SATA()_UCTL_CTL[CMD_FLR_DONE]. */
+ uint64_t reserved_32_61 : 30;
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Start BIST. The rising edge starts BIST on the memories in SATA. To run BIST, the host-
+ controller clock must be both configured and enabled, and should be configured to the
+ maximum available frequency given the available coprocessor clock and dividers.
+
+ Refer to Cold Reset for clock initialization procedures. BIST defect status can
+ be checked after FULL BIST completion, both of which are indicated in
+ SATA()_UCTL_BIST_STATUS. The FULL BIST run takes almost 80,000 host-controller
+ clock cycles for the largest RAM. */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. There are two major modes of BIST: FULL and CLEAR.
+ 0 = FULL BIST is run by the BIST state machine.
+ 1 = CLEAR BIST is run by the BIST state machine. A clear-BIST run clears all entries in
+ SATA RAMs to 0x0.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts
+ [CLEAR_BIST] into the correct state and then perform another CSR write operation to set
+ [START_BIST] (keeping [CLEAR_BIST] constant). CLEAR BIST completion is indicated by
+ SATA()_UCTL_BIST_STATUS[NDONE*] clear.
+
+ A BIST clear operation takes almost 2,000 host-controller clock cycles for the largest
+ RAM. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_satax_uctl_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. There are two major modes of BIST: FULL and CLEAR.
+ 0 = FULL BIST is run by the BIST state machine.
+ 1 = CLEAR BIST is run by the BIST state machine. A clear-BIST run clears all entries in
+ SATA RAMs to 0x0.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts
+ [CLEAR_BIST] into the correct state and then perform another CSR write operation to set
+ [START_BIST] (keeping [CLEAR_BIST] constant). CLEAR BIST completion is indicated by
+ SATA()_UCTL_BIST_STATUS[NDONE*] clear.
+
+ A BIST clear operation takes almost 2,000 host-controller clock cycles for the largest
+ RAM. */
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Start BIST. The rising edge starts BIST on the memories in SATA. To run BIST, the host-
+ controller clock must be both configured and enabled, and should be configured to the
+ maximum available frequency given the available coprocessor clock and dividers.
+
+ Refer to Cold Reset for clock initialization procedures. BIST defect status can
+ be checked after FULL BIST completion, both of which are indicated in
+ SATA()_UCTL_BIST_STATUS. The FULL BIST run takes almost 80,000 host-controller
+ clock cycles for the largest RAM. */
+ uint64_t reserved_31_61 : 31;
+ uint64_t a_clk_en : 1; /**< [ 30: 30](R/W) Host-controller clock enable. When set to one, the host-controller clock is generated. This
+ also enables access to UCTL registers 0x30-0xF8. */
+ uint64_t a_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the host-controller clock divider.
+ 0 = Use the divided coprocessor clock from the [A_CLKDIV_SEL] divider.
+ 1 = use the bypass clock from the GPIO pins (generally bypass is only used for scan
+ purposes).
+
+ This signal is a multiplexer-select signal; it does not enable the host-controller clock.
+ You must set [A_CLK_EN] separately. [A_CLK_BYP_SEL] select should not be changed unless
+ [A_CLK_EN] is disabled. The bypass clock can be selected and running even if the host-
+ controller clock dividers are not running. */
+ uint64_t a_clkdiv_rst : 1; /**< [ 28: 28](R/W) Host-controller-clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t reserved_27 : 1;
+ uint64_t a_clkdiv_sel : 3; /**< [ 26: 24](R/W) The host-controller clock frequency is the coprocessor-clock frequency divided by
+ [A_CLKDIV_SEL]. The host-controller clock frequency must be at or below 333MHz.
+ This field can be changed only when [A_CLKDIV_RST] = 1. The divider values are the
+ following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 3.
+ 0x3 = divide by 4.
+ 0x4 = divide by 6.
+ 0x5 = divide by 8.
+ 0x6 = divide by 16.
+ 0x7 = divide by 24. */
+ uint64_t reserved_5_23 : 19;
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the SATA UCTL interface clock (coprocessor clock). This enables access to UAHC
+ registers via the NCB, as well as UCTL registers starting from 0x10_0030. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t sata_uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t sata_uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high. Resets UAHC DMA and register shims and the UCTL
+ registers 0x10_0030-0x10_00F8.
+
+ It does not reset UCTL registers 0x10_0000-0x10_0028.
+
+ The UCTL registers starting from 0x10_0030 can be accessed only after the host-controller
+ clock is active and [SATA_UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and GIB protocols. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high. Resets UAHC DMA and register shims and the UCTL
+ registers 0x10_0030-0x10_00F8.
+
+ It does not reset UCTL registers 0x10_0000-0x10_0028.
+
+ The UCTL registers starting from 0x10_0030 can be accessed only after the host-controller
+ clock is active and [SATA_UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and GIB protocols. */
+ uint64_t sata_uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the SATA UCTL interface clock (coprocessor clock). This enables access to UAHC
+ registers via the NCB, as well as UCTL registers starting from 0x10_0030. */
+ uint64_t reserved_5_23 : 19;
+ uint64_t a_clkdiv_sel : 3; /**< [ 26: 24](R/W) The host-controller clock frequency is the coprocessor-clock frequency divided by
+ [A_CLKDIV_SEL]. The host-controller clock frequency must be at or below 333MHz.
+ This field can be changed only when [A_CLKDIV_RST] = 1. The divider values are the
+ following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 3.
+ 0x3 = divide by 4.
+ 0x4 = divide by 6.
+ 0x5 = divide by 8.
+ 0x6 = divide by 16.
+ 0x7 = divide by 24. */
+ uint64_t reserved_27 : 1;
+ uint64_t a_clkdiv_rst : 1; /**< [ 28: 28](R/W) Host-controller-clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t a_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the host-controller clock divider.
+ 0 = Use the divided coprocessor clock from the [A_CLKDIV_SEL] divider.
+ 1 = use the bypass clock from the GPIO pins (generally bypass is only used for scan
+ purposes).
+
+ This signal is a multiplexer-select signal; it does not enable the host-controller clock.
+ You must set [A_CLK_EN] separately. [A_CLK_BYP_SEL] select should not be changed unless
+ [A_CLK_EN] is disabled. The bypass clock can be selected and running even if the host-
+ controller clock dividers are not running. */
+ uint64_t a_clk_en : 1; /**< [ 30: 30](R/W) Host-controller clock enable. When set to one, the host-controller clock is generated. This
+ also enables access to UCTL registers 0x30-0xF8. */
+ uint64_t reserved_31_61 : 31;
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Start BIST. The rising edge starts BIST on the memories in SATA. To run BIST, the host-
+ controller clock must be both configured and enabled, and should be configured to the
+ maximum available frequency given the available coprocessor clock and dividers.
+
+ Refer to Cold Reset for clock initialization procedures. BIST defect status can
+ be checked after FULL BIST completion, both of which are indicated in
+ SATA()_UCTL_BIST_STATUS. The FULL BIST run takes almost 80,000 host-controller
+ clock cycles for the largest RAM. */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. There are two major modes of BIST: FULL and CLEAR.
+ 0 = FULL BIST is run by the BIST state machine.
+ 1 = CLEAR BIST is run by the BIST state machine. A clear-BIST run clears all entries in
+ SATA RAMs to 0x0.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts
+ [CLEAR_BIST] into the correct state and then perform another CSR write operation to set
+ [START_BIST] (keeping [CLEAR_BIST] constant). CLEAR BIST completion is indicated by
+ SATA()_UCTL_BIST_STATUS[NDONE*] clear.
+
+ A BIST clear operation takes almost 2,000 host-controller clock cycles for the largest
+ RAM. */
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_satax_uctl_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t cmd_flr_en : 1; /**< [ 31: 31](R/W) Select an option for doing SATA FLR based on finishing existing commands or DMA transactions.
+ 0 = DMA-base FLR.
+ 1 = Command-base FLR.
+
+ Command-base option will require AHCI software to read SATA()_UAHC_P0_CI to make sure there is
+ no more command to process, then proceed FLR by negating PCC master enable signal.
+
+ This option has to be set before PCC master enable negates. Futher commands write to
+ SATA()_UAHC_P0_CI after this bit is set will not be executed.
+
+ To check if commands have finished, read SATA()_UCTL_CTL[CMD_FLR_DONE]. */
+ uint64_t a_clk_en : 1; /**< [ 30: 30](R/W) Host-controller clock enable. When set to one, the host-controller clock is generated. This
+ also enables access to UCTL registers 0x30-0xF8. */
+ uint64_t a_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the host-controller clock divider.
+ 0 = Use the divided coprocessor clock from the [A_CLKDIV_SEL] divider.
+ 1 = use the bypass clock from the GPIO pins (generally bypass is only used for scan
+ purposes).
+
+ This signal is a multiplexer-select signal; it does not enable the host-controller clock.
+ You must set [A_CLK_EN] separately. [A_CLK_BYP_SEL] select should not be changed unless
+ [A_CLK_EN] is disabled. The bypass clock can be selected and running even if the host-
+ controller clock dividers are not running. */
+ uint64_t a_clkdiv_rst : 1; /**< [ 28: 28](R/W) Host-controller-clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t cmd_flr_done : 1; /**< [ 27: 27](RO/H) This bit tells you if commands set before SATA()_UCTL_CTL[CMD_FLR_EN] are finished or not.
+ This bit is only valid after SATA()_UCTL_CTL[CMD_FLR_EN] is set. */
+ uint64_t a_clkdiv_sel : 3; /**< [ 26: 24](R/W) The host-controller clock frequency is the coprocessor-clock frequency divided by
+ [A_CLKDIV_SEL]. The host-controller clock frequency must be at or below 333MHz.
+ This field can be changed only when [A_CLKDIV_RST] = 1. The divider values are the
+ following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 3.
+ 0x3 = divide by 4.
+ 0x4 = divide by 6.
+ 0x5 = divide by 8.
+ 0x6 = divide by 16.
+ 0x7 = divide by 24. */
+ uint64_t reserved_6_23 : 18;
+ uint64_t dma_psn_ign : 1; /**< [ 5: 5](R/W) Handling of poison indication on DMA read responses.
+ 0 = Treat poison data the same way as fault, sending an AXI error to the SATA
+ controller.
+ 1 = Ignore poison and proceed with the transaction as if no problems. */
+ uint64_t csclk_force : 1; /**< [ 4: 4](R/W) Force conditional clock to be running. For diagnostic use only.
+ 0 = No override.
+ 1 = Override the enable of conditional clock to force it running. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t sata_uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t sata_uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high. Resets UAHC DMA and register shims and the UCTL
+ registers 0x10_0030-0x10_00F8.
+
+ It does not reset UCTL registers 0x10_0000-0x10_0028. These can be accessed when
+ [SATA_UCTL_RST] is asserted.
+
+ The UCTL registers starting from 0x10_0030 can be accessed only after the host-controller
+ clock is active and [SATA_UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and GIB protocols. */
+#else /* Word 0 - Little Endian */
+ uint64_t sata_uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high. Resets UAHC DMA and register shims and the UCTL
+ registers 0x10_0030-0x10_00F8.
+
+ It does not reset UCTL registers 0x10_0000-0x10_0028. These can be accessed when
+ [SATA_UCTL_RST] is asserted.
+
+ The UCTL registers starting from 0x10_0030 can be accessed only after the host-controller
+ clock is active and [SATA_UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and GIB protocols. */
+ uint64_t sata_uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t csclk_force : 1; /**< [ 4: 4](R/W) Force conditional clock to be running. For diagnostic use only.
+ 0 = No override.
+ 1 = Override the enable of conditional clock to force it running. */
+ uint64_t dma_psn_ign : 1; /**< [ 5: 5](R/W) Handling of poison indication on DMA read responses.
+ 0 = Treat poison data the same way as fault, sending an AXI error to the SATA
+ controller.
+ 1 = Ignore poison and proceed with the transaction as if no problems. */
+ uint64_t reserved_6_23 : 18;
+ uint64_t a_clkdiv_sel : 3; /**< [ 26: 24](R/W) The host-controller clock frequency is the coprocessor-clock frequency divided by
+ [A_CLKDIV_SEL]. The host-controller clock frequency must be at or below 333MHz.
+ This field can be changed only when [A_CLKDIV_RST] = 1. The divider values are the
+ following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 3.
+ 0x3 = divide by 4.
+ 0x4 = divide by 6.
+ 0x5 = divide by 8.
+ 0x6 = divide by 16.
+ 0x7 = divide by 24. */
+ uint64_t cmd_flr_done : 1; /**< [ 27: 27](RO/H) This bit tells you if commands set before SATA()_UCTL_CTL[CMD_FLR_EN] are finished or not.
+ This bit is only valid after SATA()_UCTL_CTL[CMD_FLR_EN] is set. */
+ uint64_t a_clkdiv_rst : 1; /**< [ 28: 28](R/W) Host-controller-clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t a_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the host-controller clock divider.
+ 0 = Use the divided coprocessor clock from the [A_CLKDIV_SEL] divider.
+ 1 = use the bypass clock from the GPIO pins (generally bypass is only used for scan
+ purposes).
+
+ This signal is a multiplexer-select signal; it does not enable the host-controller clock.
+ You must set [A_CLK_EN] separately. [A_CLK_BYP_SEL] select should not be changed unless
+ [A_CLK_EN] is disabled. The bypass clock can be selected and running even if the host-
+ controller clock dividers are not running. */
+ uint64_t a_clk_en : 1; /**< [ 30: 30](R/W) Host-controller clock enable. When set to one, the host-controller clock is generated. This
+ also enables access to UCTL registers 0x30-0xF8. */
+ uint64_t cmd_flr_en : 1; /**< [ 31: 31](R/W) Select an option for doing SATA FLR based on finishing existing commands or DMA transactions.
+ 0 = DMA-base FLR.
+ 1 = Command-base FLR.
+
+ Command-base option will require AHCI software to read SATA()_UAHC_P0_CI to make sure there is
+ no more command to process, then proceed FLR by negating PCC master enable signal.
+
+ This option has to be set before PCC master enable negates. Futher commands write to
+ SATA()_UAHC_P0_CI after this bit is set will not be executed.
+
+ To check if commands have finished, read SATA()_UCTL_CTL[CMD_FLR_DONE]. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_satax_uctl_ctl bdk_satax_uctl_ctl_t;
+
+static inline uint64_t BDK_SATAX_UCTL_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000100000ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000100000ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000100000ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100000ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_CTL(a) bdk_satax_uctl_ctl_t
+#define bustype_BDK_SATAX_UCTL_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_CTL(a) "SATAX_UCTL_CTL"
+#define device_bar_BDK_SATAX_UCTL_CTL(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_CTL(a) (a)
+#define arguments_BDK_SATAX_UCTL_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_ecc
+ *
+ * SATA UCTL ECC Control/Debug Register
+ * This register can be used to disable ECC correction, insert ECC errors, and debug ECC
+ * failures.
+ *
+ * Fields ECC_ERR* are captured when there are no outstanding ECC errors indicated in INTSTAT
+ * and a new ECC error arrives. Prioritization for multiple events occurring on the same cycle is
+ * indicated by the ECC_ERR_SOURCE enumeration: highest encoded value has highest priority.
+ *
+ * Fields *ECC_DIS: Disables ECC correction, SBE and DBE errors are still reported.
+ * If ECC_DIS is 0x1, then no data-correction occurs.
+ *
+ * Fields *ECC_FLIP_SYND: Flip the syndrom[1:0] bits to generate 1-bit/2-bits error for testing.
+ *
+ * Accessible only when SATA()_UCTL_CTL[A_CLK_EN].
+ *
+ * Reset by NCB reset or SATA()_UCTL_CTL[SATA_UCTL_RST].
+ */
+union bdk_satax_uctl_ecc
+{
+ uint64_t u;
+ struct bdk_satax_uctl_ecc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_62_63 : 2;
+ uint64_t ecc_err_source : 4; /**< [ 61: 58](RO/H) Source of ECC error, see SATA_UCTL_ECC_ERR_SOURCE_E. */
+ uint64_t ecc_err_syndrome : 18; /**< [ 57: 40](RO/H) Syndrome bits of the ECC error. */
+ uint64_t ecc_err_address : 8; /**< [ 39: 32](RO/H) RAM address of the ECC error. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t uctl_xm_r_ecc_flip_synd : 2;/**< [ 20: 19](R/W) Insert ECC error for testing purposes. */
+ uint64_t uctl_xm_r_ecc_cor_dis : 1; /**< [ 18: 18](R/W) Enables ECC correction on UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_w_ecc_flip_synd : 2;/**< [ 17: 16](R/W) Insert ECC error for testing purposes. */
+ uint64_t uctl_xm_w_ecc_cor_dis : 1; /**< [ 15: 15](R/W) Enables ECC correction on UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_9_14 : 6;
+ uint64_t uahc_rx_ecc_flip_synd : 2; /**< [ 8: 7](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_rx_ecc_cor_dis : 1; /**< [ 6: 6](R/W) Enables ECC correction on UAHC RxFIFO RAMs. */
+ uint64_t uahc_tx_ecc_flip_synd : 2; /**< [ 5: 4](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_tx_ecc_cor_dis : 1; /**< [ 3: 3](R/W) Enables ECC correction on UAHC TxFIFO RAMs. */
+ uint64_t uahc_fb_ecc_flip_synd : 2; /**< [ 2: 1](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_fb_ecc_cor_dis : 1; /**< [ 0: 0](R/W) Enables ECC correction on UAHC FBS RAM. */
+#else /* Word 0 - Little Endian */
+ uint64_t uahc_fb_ecc_cor_dis : 1; /**< [ 0: 0](R/W) Enables ECC correction on UAHC FBS RAM. */
+ uint64_t uahc_fb_ecc_flip_synd : 2; /**< [ 2: 1](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_tx_ecc_cor_dis : 1; /**< [ 3: 3](R/W) Enables ECC correction on UAHC TxFIFO RAMs. */
+ uint64_t uahc_tx_ecc_flip_synd : 2; /**< [ 5: 4](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_rx_ecc_cor_dis : 1; /**< [ 6: 6](R/W) Enables ECC correction on UAHC RxFIFO RAMs. */
+ uint64_t uahc_rx_ecc_flip_synd : 2; /**< [ 8: 7](R/W) Insert ECC error for testing purposes. */
+ uint64_t reserved_9_14 : 6;
+ uint64_t uctl_xm_w_ecc_cor_dis : 1; /**< [ 15: 15](R/W) Enables ECC correction on UCTL AxiMaster write-data FIFO. */
+ uint64_t uctl_xm_w_ecc_flip_synd : 2;/**< [ 17: 16](R/W) Insert ECC error for testing purposes. */
+ uint64_t uctl_xm_r_ecc_cor_dis : 1; /**< [ 18: 18](R/W) Enables ECC correction on UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_r_ecc_flip_synd : 2;/**< [ 20: 19](R/W) Insert ECC error for testing purposes. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t ecc_err_address : 8; /**< [ 39: 32](RO/H) RAM address of the ECC error. */
+ uint64_t ecc_err_syndrome : 18; /**< [ 57: 40](RO/H) Syndrome bits of the ECC error. */
+ uint64_t ecc_err_source : 4; /**< [ 61: 58](RO/H) Source of ECC error, see SATA_UCTL_ECC_ERR_SOURCE_E. */
+ uint64_t reserved_62_63 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_ecc_s cn; */
+};
+typedef union bdk_satax_uctl_ecc bdk_satax_uctl_ecc_t;
+
+static inline uint64_t BDK_SATAX_UCTL_ECC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_ECC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100001000f0ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100001000f0ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100001000f0ll + 0x1000000000ll * ((a) & 0xf);
+ __bdk_csr_fatal("SATAX_UCTL_ECC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_ECC(a) bdk_satax_uctl_ecc_t
+#define bustype_BDK_SATAX_UCTL_ECC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_ECC(a) "SATAX_UCTL_ECC"
+#define device_bar_BDK_SATAX_UCTL_ECC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SATAX_UCTL_ECC(a) (a)
+#define arguments_BDK_SATAX_UCTL_ECC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_intena_w1c
+ *
+ * SATA UCTL Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_satax_uctl_intena_w1c
+{
+ uint64_t u;
+ struct bdk_satax_uctl_intena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t reserved_5 : 1;
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t reserved_5 : 1;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_satax_uctl_intena_w1c_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_2_4 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_2_4 : 3;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_satax_uctl_intena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for SATA(0..1)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_satax_uctl_intena_w1c_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_satax_uctl_intena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Reads or clears enable for SATA(0..5)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_satax_uctl_intena_w1c bdk_satax_uctl_intena_w1c_t;
+
+static inline uint64_t BDK_SATAX_UCTL_INTENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_INTENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000100040ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000100040ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000100040ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100040ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_INTENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_INTENA_W1C(a) bdk_satax_uctl_intena_w1c_t
+#define bustype_BDK_SATAX_UCTL_INTENA_W1C(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_INTENA_W1C(a) "SATAX_UCTL_INTENA_W1C"
+#define device_bar_BDK_SATAX_UCTL_INTENA_W1C(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_INTENA_W1C(a) (a)
+#define arguments_BDK_SATAX_UCTL_INTENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_intena_w1s
+ *
+ * SATA UCTL Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_satax_uctl_intena_w1s
+{
+ uint64_t u;
+ struct bdk_satax_uctl_intena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t reserved_5 : 1;
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t reserved_5 : 1;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_satax_uctl_intena_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_2_4 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_2_4 : 3;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_satax_uctl_intena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for SATA(0..1)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_satax_uctl_intena_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_satax_uctl_intena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets enable for SATA(0..5)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_satax_uctl_intena_w1s bdk_satax_uctl_intena_w1s_t;
+
+static inline uint64_t BDK_SATAX_UCTL_INTENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_INTENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000100048ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000100048ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000100048ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100048ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_INTENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_INTENA_W1S(a) bdk_satax_uctl_intena_w1s_t
+#define bustype_BDK_SATAX_UCTL_INTENA_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_INTENA_W1S(a) "SATAX_UCTL_INTENA_W1S"
+#define device_bar_BDK_SATAX_UCTL_INTENA_W1S(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_INTENA_W1S(a) (a)
+#define arguments_BDK_SATAX_UCTL_INTENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_intstat
+ *
+ * SATA UCTL Interrupt Status Register
+ * Summary of different bits of interrupts.
+ *
+ * Accessible always.
+ *
+ * Reset NCB reset.
+ */
+union bdk_satax_uctl_intstat
+{
+ uint64_t u;
+ struct bdk_satax_uctl_intstat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Detected double-bit error on the UAHC Rx FIFO. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Detected single-bit error on the UAHC Rx FIFO. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Detected double-bit error on the UAHC Tx FIFO. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Detected single-bit error on the UAHC Tx FIFO. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Detected double-bit error on the UAHC FBS memory. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Detected single-bit error on the UAHC FBS memory. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t reserved_5 : 1;
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Detected single-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Detected double-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Detected single-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Detected bad DMA access from UAHC to NCB. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates
+ the assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see description in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE].
+
+ The hardware does not translate the request correctly and results may violate NCB
+ protocols. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Detected bad DMA access from UAHC to NCB. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates
+ the assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see description in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE].
+
+ The hardware does not translate the request correctly and results may violate NCB
+ protocols. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Detected single-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Detected double-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Detected single-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t reserved_5 : 1;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Detected single-bit error on the UAHC FBS memory. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Detected double-bit error on the UAHC FBS memory. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Detected single-bit error on the UAHC Tx FIFO. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Detected double-bit error on the UAHC Tx FIFO. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Detected single-bit error on the UAHC Rx FIFO. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Detected double-bit error on the UAHC Rx FIFO. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_satax_uctl_intstat_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Detected double-bit error on the UAHC Rx FIFO. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Detected single-bit error on the UAHC Rx FIFO. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Detected double-bit error on the UAHC Tx FIFO. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Detected single-bit error on the UAHC Tx FIFO. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Detected double-bit error on the UAHC FBS memory. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Detected single-bit error on the UAHC FBS memory. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1C/H) Detected double-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Detected single-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Detected double-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Detected single-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Detected bad DMA access from UAHC to NCB. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates
+ the assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see description in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE].
+
+ The hardware does not translate the request correctly and results may violate NCB
+ protocols. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Detected bad DMA access from UAHC to NCB. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates
+ the assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see description in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE].
+
+ The hardware does not translate the request correctly and results may violate NCB
+ protocols. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1C/H) Detected single-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1C/H) Detected double-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1C/H) Detected single-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1C/H) Detected double-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1C/H) Detected single-bit error on the UAHC FBS memory. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1C/H) Detected double-bit error on the UAHC FBS memory. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1C/H) Detected single-bit error on the UAHC Tx FIFO. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1C/H) Detected double-bit error on the UAHC Tx FIFO. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1C/H) Detected single-bit error on the UAHC Rx FIFO. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1C/H) Detected double-bit error on the UAHC Rx FIFO. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_satax_uctl_intstat_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Received DMA read response with poisoned data from NCBO.
+ Hardware also sets SATA()_UCTL_RAS[DMA_PSN]. */
+ uint64_t reserved_2_4 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Detected bad DMA access from UAHC to NCB. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates
+ the assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see description in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE].
+
+ The hardware does not translate the request correctly and results may violate NCB
+ protocols. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1C/H) Detected bad DMA access from UAHC to NCB. The error information is logged in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates
+ the assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see description in
+ SATA()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE].
+
+ The hardware does not translate the request correctly and results may violate NCB
+ protocols. */
+ uint64_t reserved_2_4 : 3;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Received DMA read response with poisoned data from NCBO.
+ Hardware also sets SATA()_UCTL_RAS[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_satax_uctl_intstat bdk_satax_uctl_intstat_t;
+
+static inline uint64_t BDK_SATAX_UCTL_INTSTAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_INTSTAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000100030ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000100030ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000100030ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100030ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_INTSTAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_INTSTAT(a) bdk_satax_uctl_intstat_t
+#define bustype_BDK_SATAX_UCTL_INTSTAT(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_INTSTAT(a) "SATAX_UCTL_INTSTAT"
+#define device_bar_BDK_SATAX_UCTL_INTSTAT(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_INTSTAT(a) (a)
+#define arguments_BDK_SATAX_UCTL_INTSTAT(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_intstat_w1s
+ *
+ * SATA UCTL Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_satax_uctl_intstat_w1s
+{
+ uint64_t u;
+ struct bdk_satax_uctl_intstat_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t reserved_5 : 1;
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t reserved_5 : 1;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_satax_uctl_intstat_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets SATA(0..3)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SATA(0..3)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SATA(0..3)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_2_4 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SATA(0..3)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..3)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..3)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SATA(0..3)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_2_4 : 3;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SATA(0..3)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SATA(0..3)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets SATA(0..3)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn9;
+ struct bdk_satax_uctl_intstat_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets SATA(0..1)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_satax_uctl_intstat_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets SATA(0..15)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_satax_uctl_intstat_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XS_NCB_OOB]. */
+#else /* Word 0 - Little Endian */
+ uint64_t xs_ncb_oob : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xm_w_sbe : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t uahc_fb_sbe : 1; /**< [ 8: 8](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_FB_SBE]. */
+ uint64_t uahc_fb_dbe : 1; /**< [ 9: 9](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_FB_DBE]. */
+ uint64_t uahc_tx_sbe : 1; /**< [ 10: 10](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_TX_SBE]. */
+ uint64_t uahc_tx_dbe : 1; /**< [ 11: 11](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_TX_DBE]. */
+ uint64_t uahc_rx_sbe : 1; /**< [ 12: 12](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_RX_SBE]. */
+ uint64_t uahc_rx_dbe : 1; /**< [ 13: 13](R/W1S/H) Reads or sets SATA(0..5)_UCTL_INTSTAT[UAHC_RX_DBE]. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_satax_uctl_intstat_w1s bdk_satax_uctl_intstat_w1s_t;
+
+static inline uint64_t BDK_SATAX_UCTL_INTSTAT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_INTSTAT_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000100038ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000100038ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000100038ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100038ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_INTSTAT_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_INTSTAT_W1S(a) bdk_satax_uctl_intstat_w1s_t
+#define bustype_BDK_SATAX_UCTL_INTSTAT_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_INTSTAT_W1S(a) "SATAX_UCTL_INTSTAT_W1S"
+#define device_bar_BDK_SATAX_UCTL_INTSTAT_W1S(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_INTSTAT_W1S(a) (a)
+#define arguments_BDK_SATAX_UCTL_INTSTAT_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_ras
+ *
+ * SATA UCTL RAS Register
+ * This register is intended for delivery of RAS events to the SCP, so should be
+ * ignored by OS drivers.
+ */
+union bdk_satax_uctl_ras
+{
+ uint64_t u;
+ struct bdk_satax_uctl_ras_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1C/H) Received DMA read response with poisoned data from NCBO.
+ Hardware also sets SATA()_UCTL_INTSTAT[DMA_PSN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1C/H) Received DMA read response with poisoned data from NCBO.
+ Hardware also sets SATA()_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_ras_s cn; */
+};
+typedef union bdk_satax_uctl_ras bdk_satax_uctl_ras_t;
+
+static inline uint64_t BDK_SATAX_UCTL_RAS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_RAS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100050ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_RAS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_RAS(a) bdk_satax_uctl_ras_t
+#define bustype_BDK_SATAX_UCTL_RAS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_RAS(a) "SATAX_UCTL_RAS"
+#define device_bar_BDK_SATAX_UCTL_RAS(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_RAS(a) (a)
+#define arguments_BDK_SATAX_UCTL_RAS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_ras_ena_w1c
+ *
+ * SATA UCTL RAS Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_satax_uctl_ras_ena_w1c
+{
+ uint64_t u;
+ struct bdk_satax_uctl_ras_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_RAS[DMA_PSN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SATA(0..3)_UCTL_RAS[DMA_PSN]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_ras_ena_w1c_s cn; */
+};
+typedef union bdk_satax_uctl_ras_ena_w1c bdk_satax_uctl_ras_ena_w1c_t;
+
+static inline uint64_t BDK_SATAX_UCTL_RAS_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_RAS_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100060ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_RAS_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_RAS_ENA_W1C(a) bdk_satax_uctl_ras_ena_w1c_t
+#define bustype_BDK_SATAX_UCTL_RAS_ENA_W1C(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_RAS_ENA_W1C(a) "SATAX_UCTL_RAS_ENA_W1C"
+#define device_bar_BDK_SATAX_UCTL_RAS_ENA_W1C(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_RAS_ENA_W1C(a) (a)
+#define arguments_BDK_SATAX_UCTL_RAS_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_ras_ena_w1s
+ *
+ * SATA UCTL RAS Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_satax_uctl_ras_ena_w1s
+{
+ uint64_t u;
+ struct bdk_satax_uctl_ras_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_RAS[DMA_PSN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SATA(0..3)_UCTL_RAS[DMA_PSN]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_ras_ena_w1s_s cn; */
+};
+typedef union bdk_satax_uctl_ras_ena_w1s bdk_satax_uctl_ras_ena_w1s_t;
+
+static inline uint64_t BDK_SATAX_UCTL_RAS_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_RAS_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100068ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_RAS_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_RAS_ENA_W1S(a) bdk_satax_uctl_ras_ena_w1s_t
+#define bustype_BDK_SATAX_UCTL_RAS_ENA_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_RAS_ENA_W1S(a) "SATAX_UCTL_RAS_ENA_W1S"
+#define device_bar_BDK_SATAX_UCTL_RAS_ENA_W1S(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_RAS_ENA_W1S(a) (a)
+#define arguments_BDK_SATAX_UCTL_RAS_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_ras_w1s
+ *
+ * SATA UCTL RAS Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_satax_uctl_ras_w1s
+{
+ uint64_t u;
+ struct bdk_satax_uctl_ras_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..3)_UCTL_RAS[DMA_PSN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SATA(0..3)_UCTL_RAS[DMA_PSN]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_ras_w1s_s cn; */
+};
+typedef union bdk_satax_uctl_ras_w1s bdk_satax_uctl_ras_w1s_t;
+
+static inline uint64_t BDK_SATAX_UCTL_RAS_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_RAS_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100058ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_RAS_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_RAS_W1S(a) bdk_satax_uctl_ras_w1s_t
+#define bustype_BDK_SATAX_UCTL_RAS_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_RAS_W1S(a) "SATAX_UCTL_RAS_W1S"
+#define device_bar_BDK_SATAX_UCTL_RAS_W1S(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_RAS_W1S(a) (a)
+#define arguments_BDK_SATAX_UCTL_RAS_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_shim_cfg
+ *
+ * SATA UCTL Shim Configuration Register
+ * This register allows configuration of various shim (UCTL) features.
+ *
+ * Fields XS_NCB_OOB_* are captured when there are no outstanding OOB errors indicated in INTSTAT
+ * and a new OOB error arrives.
+ *
+ * Fields XS_BAD_DMA_* are captured when there are no outstanding DMA errors indicated in INTSTAT
+ * and a new DMA error arrives.
+ *
+ * Accessible only when SATA()_UCTL_CTL[A_CLK_EN].
+ *
+ * Reset by NCB reset or SATA()_UCTL_CTL[SATA_UCTL_RST].
+ */
+union bdk_satax_uctl_shim_cfg
+{
+ uint64_t u;
+ struct bdk_satax_uctl_shim_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t xs_ncb_oob_wrn : 1; /**< [ 63: 63](RO/H) Read/write error log for out-of-bound UAHC register access.
+ 0 = read, 1 = write. */
+ uint64_t reserved_60_62 : 3;
+ uint64_t xs_ncb_oob_osrc : 12; /**< [ 59: 48](RO/H) SRCID error log for out-of-bound UAHC register access. The NCB outbound SRCID for the OOB
+ error.
+ \<59:58\> = chipID.
+ \<57\> = Request source: 0 = core, 1 = NCB-device.
+ \<56:51\> = core/NCB-device number. Note that for NCB devices, \<56\> is always 0.
+ \<50:48\> = SubID. */
+ uint64_t xm_bad_dma_wrn : 1; /**< [ 47: 47](RO/H) Read/write error log for bad DMA access from UAHC.
+ 0 = read error log, 1 = write error log. */
+ uint64_t reserved_44_46 : 3;
+ uint64_t xm_bad_dma_type : 4; /**< [ 43: 40](RO/H) ErrType error log for bad DMA access from UAHC. Encodes the type of error encountered
+ (error largest encoded value has priority). See SATA_UCTL_XM_BAD_DMA_TYPE_E. */
+ uint64_t reserved_14_39 : 26;
+ uint64_t dma_read_cmd : 2; /**< [ 13: 12](R/W) Selects the NCB read command used by DMA accesses. See SATA_UCTL_DMA_READ_CMD_E. */
+ uint64_t reserved_11 : 1;
+ uint64_t dma_write_cmd : 1; /**< [ 10: 10](R/W) Selects the NCB write command used by DMA accesses. See enum SATA_UCTL_DMA_WRITE_CMD_E. */
+ uint64_t reserved_0_9 : 10;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_9 : 10;
+ uint64_t dma_write_cmd : 1; /**< [ 10: 10](R/W) Selects the NCB write command used by DMA accesses. See enum SATA_UCTL_DMA_WRITE_CMD_E. */
+ uint64_t reserved_11 : 1;
+ uint64_t dma_read_cmd : 2; /**< [ 13: 12](R/W) Selects the NCB read command used by DMA accesses. See SATA_UCTL_DMA_READ_CMD_E. */
+ uint64_t reserved_14_39 : 26;
+ uint64_t xm_bad_dma_type : 4; /**< [ 43: 40](RO/H) ErrType error log for bad DMA access from UAHC. Encodes the type of error encountered
+ (error largest encoded value has priority). See SATA_UCTL_XM_BAD_DMA_TYPE_E. */
+ uint64_t reserved_44_46 : 3;
+ uint64_t xm_bad_dma_wrn : 1; /**< [ 47: 47](RO/H) Read/write error log for bad DMA access from UAHC.
+ 0 = read error log, 1 = write error log. */
+ uint64_t xs_ncb_oob_osrc : 12; /**< [ 59: 48](RO/H) SRCID error log for out-of-bound UAHC register access. The NCB outbound SRCID for the OOB
+ error.
+ \<59:58\> = chipID.
+ \<57\> = Request source: 0 = core, 1 = NCB-device.
+ \<56:51\> = core/NCB-device number. Note that for NCB devices, \<56\> is always 0.
+ \<50:48\> = SubID. */
+ uint64_t reserved_60_62 : 3;
+ uint64_t xs_ncb_oob_wrn : 1; /**< [ 63: 63](RO/H) Read/write error log for out-of-bound UAHC register access.
+ 0 = read, 1 = write. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_shim_cfg_s cn; */
+};
+typedef union bdk_satax_uctl_shim_cfg bdk_satax_uctl_shim_cfg_t;
+
+static inline uint64_t BDK_SATAX_UCTL_SHIM_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_SHIM_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100001000e8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100001000e8ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100001000e8ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100001000e8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_SHIM_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_SHIM_CFG(a) bdk_satax_uctl_shim_cfg_t
+#define bustype_BDK_SATAX_UCTL_SHIM_CFG(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_SHIM_CFG(a) "SATAX_UCTL_SHIM_CFG"
+#define device_bar_BDK_SATAX_UCTL_SHIM_CFG(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_SHIM_CFG(a) (a)
+#define arguments_BDK_SATAX_UCTL_SHIM_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_spare0
+ *
+ * INTERNAL: SATA UCTL Spare Register 0
+ *
+ * This register is spare.
+ *
+ * Accessible always.
+ *
+ * Reset NCB reset.
+ */
+union bdk_satax_uctl_spare0
+{
+ uint64_t u;
+ struct bdk_satax_uctl_spare0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#else /* Word 0 - Little Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_spare0_s cn; */
+};
+typedef union bdk_satax_uctl_spare0 bdk_satax_uctl_spare0_t;
+
+static inline uint64_t BDK_SATAX_UCTL_SPARE0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_SPARE0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x810000100010ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x810000100010ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x810000100010ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x810000100010ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_SPARE0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_SPARE0(a) bdk_satax_uctl_spare0_t
+#define bustype_BDK_SATAX_UCTL_SPARE0(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_SPARE0(a) "SATAX_UCTL_SPARE0"
+#define device_bar_BDK_SATAX_UCTL_SPARE0(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_SPARE0(a) (a)
+#define arguments_BDK_SATAX_UCTL_SPARE0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sata#_uctl_spare1
+ *
+ * INTERNAL: SATA UCTL Spare Register 1
+ *
+ * This register is spare.
+ *
+ * Accessible only when SATA()_UCTL_CTL[A_CLK_EN].
+ *
+ * Reset by NCB reset or SATA()_UCTL_CTL[SATA_UCTL_RST].
+ */
+union bdk_satax_uctl_spare1
+{
+ uint64_t u;
+ struct bdk_satax_uctl_spare1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#else /* Word 0 - Little Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_satax_uctl_spare1_s cn; */
+};
+typedef union bdk_satax_uctl_spare1 bdk_satax_uctl_spare1_t;
+
+static inline uint64_t BDK_SATAX_UCTL_SPARE1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SATAX_UCTL_SPARE1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8100001000f8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=5))
+ return 0x8100001000f8ll + 0x1000000000ll * ((a) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=15))
+ return 0x8100001000f8ll + 0x1000000000ll * ((a) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=3))
+ return 0x8100001000f8ll + 0x1000000000ll * ((a) & 0x3);
+ __bdk_csr_fatal("SATAX_UCTL_SPARE1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SATAX_UCTL_SPARE1(a) bdk_satax_uctl_spare1_t
+#define bustype_BDK_SATAX_UCTL_SPARE1(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SATAX_UCTL_SPARE1(a) "SATAX_UCTL_SPARE1"
+#define device_bar_BDK_SATAX_UCTL_SPARE1(a) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SATAX_UCTL_SPARE1(a) (a)
+#define arguments_BDK_SATAX_UCTL_SPARE1(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_SATA_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sli.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sli.h
new file mode 100644
index 0000000000..e2e780351f
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-sli.h
@@ -0,0 +1,7573 @@
+#ifndef __BDK_CSRS_SLI_H__
+#define __BDK_CSRS_SLI_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium SLI.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration sdp_in_rams_e
+ *
+ * SDP Input RAMs Field Enumeration
+ * Enumerates the relative bit positions within SDP(0)_ECC(1)_CTL[CDIS].
+ */
+#define BDK_SDP_IN_RAMS_E_CNTS (3)
+#define BDK_SDP_IN_RAMS_E_DB (4)
+#define BDK_SDP_IN_RAMS_E_DBELL (0xd)
+#define BDK_SDP_IN_RAMS_E_DIR (5)
+#define BDK_SDP_IN_RAMS_E_DMARSP0 (0)
+#define BDK_SDP_IN_RAMS_E_DMARSP1 (1)
+#define BDK_SDP_IN_RAMS_E_GTHR0 (8)
+#define BDK_SDP_IN_RAMS_E_GTHR1 (9)
+#define BDK_SDP_IN_RAMS_E_IHFD0 (6)
+#define BDK_SDP_IN_RAMS_E_IHFD1 (7)
+#define BDK_SDP_IN_RAMS_E_IND (0xb)
+#define BDK_SDP_IN_RAMS_E_INFO (0xa)
+#define BDK_SDP_IN_RAMS_E_LEVELS (0xc)
+#define BDK_SDP_IN_RAMS_E_MBOX (0x10)
+#define BDK_SDP_IN_RAMS_E_PERF (2)
+#define BDK_SDP_IN_RAMS_E_PKTRSP (0xf)
+#define BDK_SDP_IN_RAMS_E_X2P (0xe)
+
+/**
+ * Enumeration sdp_out_rams_e
+ *
+ * SDP Output RAMs Field Enumeration
+ * Enumerates the relative bit positions within SDP(0)_ECC(0)_CTL[CDIS].
+ */
+#define BDK_SDP_OUT_RAMS_E_BISIZE (0)
+#define BDK_SDP_OUT_RAMS_E_BPF0 (0xd)
+#define BDK_SDP_OUT_RAMS_E_BPF1 (0xe)
+#define BDK_SDP_OUT_RAMS_E_CNTS (2)
+#define BDK_SDP_OUT_RAMS_E_DB (4)
+#define BDK_SDP_OUT_RAMS_E_DBELL (3)
+#define BDK_SDP_OUT_RAMS_E_DPLF_DIR (6)
+#define BDK_SDP_OUT_RAMS_E_DPLF_IND (9)
+#define BDK_SDP_OUT_RAMS_E_IB (7)
+#define BDK_SDP_OUT_RAMS_E_INFO (0xa)
+#define BDK_SDP_OUT_RAMS_E_IPLF_DIR (5)
+#define BDK_SDP_OUT_RAMS_E_IPLF_IND (8)
+#define BDK_SDP_OUT_RAMS_E_LEVELS (0xb)
+#define BDK_SDP_OUT_RAMS_E_MSIX_ADDR (0x11)
+#define BDK_SDP_OUT_RAMS_E_MSIX_DATA (0x12)
+#define BDK_SDP_OUT_RAMS_E_P2X (0xc)
+#define BDK_SDP_OUT_RAMS_E_PERF (1)
+#define BDK_SDP_OUT_RAMS_E_TRACK0 (0xf)
+#define BDK_SDP_OUT_RAMS_E_TRACK1 (0x10)
+
+/**
+ * Enumeration sli_bar_e
+ *
+ * SLI Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_SLI_BAR_E_SLIX_PF_BAR0_CN81XX(a) (0x874000000000ll + 0x1000000000ll * (a))
+#define BDK_SLI_BAR_E_SLIX_PF_BAR0_CN81XX_SIZE 0x2000000ull
+#define BDK_SLI_BAR_E_SLIX_PF_BAR0_CN88XX(a) (0x874000000000ll + 0x1000000000ll * (a))
+#define BDK_SLI_BAR_E_SLIX_PF_BAR0_CN88XX_SIZE 0x2000000ull
+#define BDK_SLI_BAR_E_SLIX_PF_BAR0_CN83XX(a) (0x874000000000ll + 0x1000000000ll * (a))
+#define BDK_SLI_BAR_E_SLIX_PF_BAR0_CN83XX_SIZE 0x800000000ull
+#define BDK_SLI_BAR_E_SLIX_PF_BAR4_CN81XX(a) (0x874010000000ll + 0x1000000000ll * (a))
+#define BDK_SLI_BAR_E_SLIX_PF_BAR4_CN81XX_SIZE 0x100000ull
+#define BDK_SLI_BAR_E_SLIX_PF_BAR4_CN88XX(a) (0x874010000000ll + 0x1000000000ll * (a))
+#define BDK_SLI_BAR_E_SLIX_PF_BAR4_CN88XX_SIZE 0x100000ull
+#define BDK_SLI_BAR_E_SLIX_PF_BAR4_CN83XX(a) (0x874c00000000ll + 0x1000000000ll * (a))
+#define BDK_SLI_BAR_E_SLIX_PF_BAR4_CN83XX_SIZE 0x100000ull
+
+/**
+ * Enumeration sli_endianswap_e
+ *
+ * SLI/SDP Endian Swap Mode Enumeration
+ * Enumerates the endian swap modes that SLI and SDP support.
+ */
+#define BDK_SLI_ENDIANSWAP_E_BYTE_SWAP_32B (2)
+#define BDK_SLI_ENDIANSWAP_E_BYTE_SWAP_64B (1)
+#define BDK_SLI_ENDIANSWAP_E_LW_SWAP_64B (3)
+#define BDK_SLI_ENDIANSWAP_E_PASS_THRU (0)
+
+/**
+ * Enumeration sli_int_vec_e
+ *
+ * SLI MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_SLI_INT_VEC_E_MACX(a) (1 + (a))
+#define BDK_SLI_INT_VEC_E_MBE (0)
+#define BDK_SLI_INT_VEC_E_SDP_ECCX_LINT(a) (0xe + (a))
+#define BDK_SLI_INT_VEC_E_SDP_EPFX_FLR_VF_LINT(a) (0 + (a))
+#define BDK_SLI_INT_VEC_E_SDP_EPFX_IRERR_LINT(a) (0xa + (a))
+#define BDK_SLI_INT_VEC_E_SDP_EPFX_ORERR_LINT(a) (0xc + (a))
+#define BDK_SLI_INT_VEC_E_SLI_EPFX_DMA_VF_LINT(a) (8 + (a))
+#define BDK_SLI_INT_VEC_E_SLI_EPFX_MISC_LINT(a) (2 + (a))
+#define BDK_SLI_INT_VEC_E_SLI_EPFX_PP_VF_LINT(a) (6 + (a))
+#define BDK_SLI_INT_VEC_E_SLI_MBE (0x10)
+
+/**
+ * Enumeration sli_rams_e
+ *
+ * SLI RAM Field Enumeration
+ * Enumerates the relative bit positions within SLI()_MEM_CTL[CDIS].
+ */
+#define BDK_SLI_RAMS_E_CPL0_FIF (3)
+#define BDK_SLI_RAMS_E_CPL1_FIF (2)
+#define BDK_SLI_RAMS_E_CPL2_FIF (1)
+#define BDK_SLI_RAMS_E_CPL3_FIF (0)
+#define BDK_SLI_RAMS_E_DSI_FIF (0x1e)
+#define BDK_SLI_RAMS_E_NOD_FIF (0x1d)
+#define BDK_SLI_RAMS_E_P2NP0C_FIF (0xf)
+#define BDK_SLI_RAMS_E_P2NP0N_FIF (0xe)
+#define BDK_SLI_RAMS_E_P2NP0P_FIF (0xd)
+#define BDK_SLI_RAMS_E_P2NP1C_FIF (0xc)
+#define BDK_SLI_RAMS_E_P2NP1N_FIF (0xb)
+#define BDK_SLI_RAMS_E_P2NP1P_FIF (0xa)
+#define BDK_SLI_RAMS_E_P2NP2C_FIF (9)
+#define BDK_SLI_RAMS_E_P2NP2N_FIF (8)
+#define BDK_SLI_RAMS_E_P2NP2P_FIF (7)
+#define BDK_SLI_RAMS_E_P2NP3C_FIF (6)
+#define BDK_SLI_RAMS_E_P2NP3N_FIF (5)
+#define BDK_SLI_RAMS_E_P2NP3P_FIF (4)
+#define BDK_SLI_RAMS_E_REG_FIF (0x1c)
+#define BDK_SLI_RAMS_E_SNCF0_FIF (0x1b)
+#define BDK_SLI_RAMS_E_SNCF1_FIF (0x18)
+#define BDK_SLI_RAMS_E_SNCF2_FIF (0x15)
+#define BDK_SLI_RAMS_E_SNCF3_FIF (0x12)
+#define BDK_SLI_RAMS_E_SNDFH0_FIF (0x1a)
+#define BDK_SLI_RAMS_E_SNDFH1_FIF (0x17)
+#define BDK_SLI_RAMS_E_SNDFH2_FIF (0x14)
+#define BDK_SLI_RAMS_E_SNDFH3_FIF (0x11)
+#define BDK_SLI_RAMS_E_SNDFL0_FIF (0x19)
+#define BDK_SLI_RAMS_E_SNDFL1_FIF (0x16)
+#define BDK_SLI_RAMS_E_SNDFL2_FIF (0x13)
+#define BDK_SLI_RAMS_E_SNDFL3_FIF (0x10)
+
+/**
+ * Structure sli_s2m_op_s
+ *
+ * SLI to MAC Operation Structure
+ * Core initiated load and store operations that are initiating MAC transactions form an address
+ * with this structure. 8-bit, 16-bit, 32-bit and 64-bit reads and writes, in addition to atomics
+ * are supported to this region.
+ */
+union bdk_sli_s2m_op_s
+{
+ uint64_t u;
+ struct bdk_sli_s2m_op_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t io : 1; /**< [ 47: 47] Indicates IO space. */
+ uint64_t reserved_46 : 1;
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t did_hi : 4; /**< [ 43: 40] SLI device ID high bits. Specifies which SLI:
+ 0x8 = SLI0.
+ 0x9 = SLI1.
+
+ else = Reserved. */
+ uint64_t region : 8; /**< [ 39: 32] SLI region. Indexes into SLI()_S2M_REG()_ACC. */
+ uint64_t addr : 32; /**< [ 31: 0] Register address within the device. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 32; /**< [ 31: 0] Register address within the device. */
+ uint64_t region : 8; /**< [ 39: 32] SLI region. Indexes into SLI()_S2M_REG()_ACC. */
+ uint64_t did_hi : 4; /**< [ 43: 40] SLI device ID high bits. Specifies which SLI:
+ 0x8 = SLI0.
+ 0x9 = SLI1.
+
+ else = Reserved. */
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. */
+ uint64_t reserved_46 : 1;
+ uint64_t io : 1; /**< [ 47: 47] Indicates IO space. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sli_s2m_op_s_s cn81xx; */
+ /* struct bdk_sli_s2m_op_s_s cn88xx; */
+ struct bdk_sli_s2m_op_s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t io : 1; /**< [ 47: 47] Indicates IO space. */
+ uint64_t reserved_46 : 1;
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. Must be zero for CN83XX. */
+ uint64_t did_hi : 4; /**< [ 43: 40] SLI device ID high bits. Must be 0x8 for CN83XX. */
+ uint64_t region : 8; /**< [ 39: 32] SLI region. Indexes into SLI()_S2M_REG()_ACC. */
+ uint64_t addr : 32; /**< [ 31: 0] Register address within the device. */
+#else /* Word 0 - Little Endian */
+ uint64_t addr : 32; /**< [ 31: 0] Register address within the device. */
+ uint64_t region : 8; /**< [ 39: 32] SLI region. Indexes into SLI()_S2M_REG()_ACC. */
+ uint64_t did_hi : 4; /**< [ 43: 40] SLI device ID high bits. Must be 0x8 for CN83XX. */
+ uint64_t node : 2; /**< [ 45: 44] CCPI node number. Must be zero for CN83XX. */
+ uint64_t reserved_46 : 1;
+ uint64_t io : 1; /**< [ 47: 47] Indicates IO space. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+
+/**
+ * Structure sli_sdp_addr_s
+ *
+ * INTERNAL: SLI/SDP Address Structure
+ *
+ * Address decoding for SLI/SDP CSR address space
+ */
+union bdk_sli_sdp_addr_s
+{
+ uint64_t u;
+ struct bdk_sli_sdp_addr_s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t bit47_46 : 2; /**< [ 47: 46] NA */
+ uint64_t nn : 2; /**< [ 45: 44] NA */
+ uint64_t did : 8; /**< [ 43: 36] SLI DID */
+ uint64_t region : 2; /**< [ 35: 34] NA */
+ uint64_t r33_32 : 2; /**< [ 33: 32] NA */
+ uint64_t ncbonly : 1; /**< [ 31: 31] Set to 1 for registers that can only be accessed by AP cores */
+ uint64_t r30_26 : 5; /**< [ 30: 26] */
+ uint64_t epf : 3; /**< [ 25: 23] EPF targeted by AP cores */
+ uint64_t ring : 6; /**< [ 22: 17] SDP Packet Ring */
+ uint64_t space : 2; /**< [ 16: 15] SDP and SLI decode space:
+ 0x2 = SDP ring space.
+ 0x0 = SDP common space.
+ 0x1 = SLI common space. */
+ uint64_t offset : 11; /**< [ 14: 4] Register offset */
+ uint64_t bit3_0 : 4; /**< [ 3: 0] NA */
+#else /* Word 0 - Little Endian */
+ uint64_t bit3_0 : 4; /**< [ 3: 0] NA */
+ uint64_t offset : 11; /**< [ 14: 4] Register offset */
+ uint64_t space : 2; /**< [ 16: 15] SDP and SLI decode space:
+ 0x2 = SDP ring space.
+ 0x0 = SDP common space.
+ 0x1 = SLI common space. */
+ uint64_t ring : 6; /**< [ 22: 17] SDP Packet Ring */
+ uint64_t epf : 3; /**< [ 25: 23] EPF targeted by AP cores */
+ uint64_t r30_26 : 5; /**< [ 30: 26] */
+ uint64_t ncbonly : 1; /**< [ 31: 31] Set to 1 for registers that can only be accessed by AP cores */
+ uint64_t r33_32 : 2; /**< [ 33: 32] NA */
+ uint64_t region : 2; /**< [ 35: 34] NA */
+ uint64_t did : 8; /**< [ 43: 36] SLI DID */
+ uint64_t nn : 2; /**< [ 45: 44] NA */
+ uint64_t bit47_46 : 2; /**< [ 47: 46] NA */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sli_sdp_addr_s_s cn; */
+};
+
+/**
+ * Register (NCB) sdp#_bist#_status
+ *
+ * SDP BIST Status Register
+ * This register contains results from BIST runs of MAC's memories: 0 = pass (or BIST in
+ * progress/never run), 1 = fail.
+ */
+union bdk_sdpx_bistx_status
+{
+ uint64_t u;
+ struct bdk_sdpx_bistx_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t bstatus : 32; /**< [ 31: 0](RO/H) BIST status. One bit per memory.
+ SDP()_BIST(0)_STATUS enumerated by SDP_OUT_RAMS_E and SDP()_BIST(1)_STATUS
+ enumerated by SDP_IN_RAMS_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t bstatus : 32; /**< [ 31: 0](RO/H) BIST status. One bit per memory.
+ SDP()_BIST(0)_STATUS enumerated by SDP_OUT_RAMS_E and SDP()_BIST(1)_STATUS
+ enumerated by SDP_IN_RAMS_E. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_bistx_status_s cn; */
+};
+typedef union bdk_sdpx_bistx_status bdk_sdpx_bistx_status_t;
+
+static inline uint64_t BDK_SDPX_BISTX_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_BISTX_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880120ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_BISTX_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_BISTX_STATUS(a,b) bdk_sdpx_bistx_status_t
+#define bustype_BDK_SDPX_BISTX_STATUS(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_BISTX_STATUS(a,b) "SDPX_BISTX_STATUS"
+#define device_bar_BDK_SDPX_BISTX_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_BISTX_STATUS(a,b) (a)
+#define arguments_BDK_SDPX_BISTX_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_ecc#_ctl
+ *
+ * SDP ECC Control Register
+ * This register controls the ECC of the SDP memories.
+ */
+union bdk_sdpx_eccx_ctl
+{
+ uint64_t u;
+ struct bdk_sdpx_eccx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t cdis : 32; /**< [ 31: 0](R/W) Disables ECC correction on each RAM.
+ SDP()_ECC(0)_CTL enumerated by SDP_OUT_RAMS_E and SDP()_ECC(1)_CTL
+ enumerated by SDP_IN_RAMS_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t cdis : 32; /**< [ 31: 0](R/W) Disables ECC correction on each RAM.
+ SDP()_ECC(0)_CTL enumerated by SDP_OUT_RAMS_E and SDP()_ECC(1)_CTL
+ enumerated by SDP_IN_RAMS_E. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_eccx_ctl_s cn; */
+};
+typedef union bdk_sdpx_eccx_ctl bdk_sdpx_eccx_ctl_t;
+
+static inline uint64_t BDK_SDPX_ECCX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_ECCX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x8740008800a0ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_ECCX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_ECCX_CTL(a,b) bdk_sdpx_eccx_ctl_t
+#define bustype_BDK_SDPX_ECCX_CTL(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_ECCX_CTL(a,b) "SDPX_ECCX_CTL"
+#define device_bar_BDK_SDPX_ECCX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_ECCX_CTL(a,b) (a)
+#define arguments_BDK_SDPX_ECCX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_ecc#_flip
+ *
+ * SDP ECC Control Register
+ * This register controls the ECC of the SDP memories.
+ */
+union bdk_sdpx_eccx_flip
+{
+ uint64_t u;
+ struct bdk_sdpx_eccx_flip_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t flip1 : 32; /**< [ 63: 32](R/W) Flips syndrome bit 1 on writes.
+ SDP()_ECC(0)_FLIP enumerated by SDP_OUT_RAMS_E and SDP()_ECC(1)_FLIP
+ enumerated by SDP_IN_RAMS_E. */
+ uint64_t flip0 : 32; /**< [ 31: 0](R/W) Flips syndrome bit 0 on writes.
+ SDP()_ECC(0)_FLIP enumerated by SDP_OUT_RAMS_E and SDP()_ECC(1)_FLIP
+ enumerated by SDP_IN_RAMS_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t flip0 : 32; /**< [ 31: 0](R/W) Flips syndrome bit 0 on writes.
+ SDP()_ECC(0)_FLIP enumerated by SDP_OUT_RAMS_E and SDP()_ECC(1)_FLIP
+ enumerated by SDP_IN_RAMS_E. */
+ uint64_t flip1 : 32; /**< [ 63: 32](R/W) Flips syndrome bit 1 on writes.
+ SDP()_ECC(0)_FLIP enumerated by SDP_OUT_RAMS_E and SDP()_ECC(1)_FLIP
+ enumerated by SDP_IN_RAMS_E. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_eccx_flip_s cn; */
+};
+typedef union bdk_sdpx_eccx_flip bdk_sdpx_eccx_flip_t;
+
+static inline uint64_t BDK_SDPX_ECCX_FLIP(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_ECCX_FLIP(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880100ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_ECCX_FLIP", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_ECCX_FLIP(a,b) bdk_sdpx_eccx_flip_t
+#define bustype_BDK_SDPX_ECCX_FLIP(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_ECCX_FLIP(a,b) "SDPX_ECCX_FLIP"
+#define device_bar_BDK_SDPX_ECCX_FLIP(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_ECCX_FLIP(a,b) (a)
+#define arguments_BDK_SDPX_ECCX_FLIP(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_ecc#_lint
+ *
+ * SDP ECC Interrupt Status Register
+ * This register contains the ECC interrupt-summary bits of the SDP.
+ */
+union bdk_sdpx_eccx_lint
+{
+ uint64_t u;
+ struct bdk_sdpx_eccx_lint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1C/H) Double-bit error detected in internal RAM. One bit per memory.
+ SDP()_ECC(0)_LINT enumerated by SDP_OUT_RAMS_E and SDP()_ECC(1)_LINT
+ enumerated by SDP_IN_RAMS_E. */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1C/H) Single-bit error detected in internal RAM. One bit per memory.
+ SDP()_ECC(0)_LINT enumerated by SDP_OUT_RAMS_E and SDP()_ECC(1)_LINT
+ enumerated by SDP_IN_RAMS_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1C/H) Single-bit error detected in internal RAM. One bit per memory.
+ SDP()_ECC(0)_LINT enumerated by SDP_OUT_RAMS_E and SDP()_ECC(1)_LINT
+ enumerated by SDP_IN_RAMS_E. */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1C/H) Double-bit error detected in internal RAM. One bit per memory.
+ SDP()_ECC(0)_LINT enumerated by SDP_OUT_RAMS_E and SDP()_ECC(1)_LINT
+ enumerated by SDP_IN_RAMS_E. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_eccx_lint_s cn; */
+};
+typedef union bdk_sdpx_eccx_lint bdk_sdpx_eccx_lint_t;
+
+static inline uint64_t BDK_SDPX_ECCX_LINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_ECCX_LINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880020ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_ECCX_LINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_ECCX_LINT(a,b) bdk_sdpx_eccx_lint_t
+#define bustype_BDK_SDPX_ECCX_LINT(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_ECCX_LINT(a,b) "SDPX_ECCX_LINT"
+#define device_bar_BDK_SDPX_ECCX_LINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_ECCX_LINT(a,b) (a)
+#define arguments_BDK_SDPX_ECCX_LINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_ecc#_lint_ena_w1c
+ *
+ * SDP ECC Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_sdpx_eccx_lint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_sdpx_eccx_lint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1C/H) Reads or clears enable for SDP(0)_ECC(0..1)_LINT[DBE]. */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1C/H) Reads or clears enable for SDP(0)_ECC(0..1)_LINT[SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1C/H) Reads or clears enable for SDP(0)_ECC(0..1)_LINT[SBE]. */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1C/H) Reads or clears enable for SDP(0)_ECC(0..1)_LINT[DBE]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_eccx_lint_ena_w1c_s cn; */
+};
+typedef union bdk_sdpx_eccx_lint_ena_w1c bdk_sdpx_eccx_lint_ena_w1c_t;
+
+static inline uint64_t BDK_SDPX_ECCX_LINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_ECCX_LINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880060ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_ECCX_LINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_ECCX_LINT_ENA_W1C(a,b) bdk_sdpx_eccx_lint_ena_w1c_t
+#define bustype_BDK_SDPX_ECCX_LINT_ENA_W1C(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_ECCX_LINT_ENA_W1C(a,b) "SDPX_ECCX_LINT_ENA_W1C"
+#define device_bar_BDK_SDPX_ECCX_LINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_ECCX_LINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SDPX_ECCX_LINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_ecc#_lint_ena_w1s
+ *
+ * SDP ECC Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_sdpx_eccx_lint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_eccx_lint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1S/H) Reads or sets enable for SDP(0)_ECC(0..1)_LINT[DBE]. */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1S/H) Reads or sets enable for SDP(0)_ECC(0..1)_LINT[SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1S/H) Reads or sets enable for SDP(0)_ECC(0..1)_LINT[SBE]. */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1S/H) Reads or sets enable for SDP(0)_ECC(0..1)_LINT[DBE]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_eccx_lint_ena_w1s_s cn; */
+};
+typedef union bdk_sdpx_eccx_lint_ena_w1s bdk_sdpx_eccx_lint_ena_w1s_t;
+
+static inline uint64_t BDK_SDPX_ECCX_LINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_ECCX_LINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880080ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_ECCX_LINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_ECCX_LINT_ENA_W1S(a,b) bdk_sdpx_eccx_lint_ena_w1s_t
+#define bustype_BDK_SDPX_ECCX_LINT_ENA_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_ECCX_LINT_ENA_W1S(a,b) "SDPX_ECCX_LINT_ENA_W1S"
+#define device_bar_BDK_SDPX_ECCX_LINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_ECCX_LINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SDPX_ECCX_LINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_ecc#_lint_w1s
+ *
+ * SDP ECC Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_sdpx_eccx_lint_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_eccx_lint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1S/H) Reads or sets SDP(0)_ECC(0..1)_LINT[DBE]. */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1S/H) Reads or sets SDP(0)_ECC(0..1)_LINT[SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1S/H) Reads or sets SDP(0)_ECC(0..1)_LINT[SBE]. */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1S/H) Reads or sets SDP(0)_ECC(0..1)_LINT[DBE]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_eccx_lint_w1s_s cn; */
+};
+typedef union bdk_sdpx_eccx_lint_w1s bdk_sdpx_eccx_lint_w1s_t;
+
+static inline uint64_t BDK_SDPX_ECCX_LINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_ECCX_LINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880040ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_ECCX_LINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_ECCX_LINT_W1S(a,b) bdk_sdpx_eccx_lint_w1s_t
+#define bustype_BDK_SDPX_ECCX_LINT_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_ECCX_LINT_W1S(a,b) "SDPX_ECCX_LINT_W1S"
+#define device_bar_BDK_SDPX_ECCX_LINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_ECCX_LINT_W1S(a,b) (a)
+#define arguments_BDK_SDPX_ECCX_LINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_flr_vf_lint
+ *
+ * SDP Function Level Reset VF Bit Array Registers
+ * These registers are only valid for PEM0 PF0 and PEM2 PF0.
+ */
+union bdk_sdpx_epfx_flr_vf_lint
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_flr_vf_lint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) When a VF causes an FLR the appropriate VF indexed bit is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) When a VF causes an FLR the appropriate VF indexed bit is set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_flr_vf_lint_s cn; */
+};
+typedef union bdk_sdpx_epfx_flr_vf_lint bdk_sdpx_epfx_flr_vf_lint_t;
+
+static inline uint64_t BDK_SDPX_EPFX_FLR_VF_LINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_FLR_VF_LINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880c00ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_FLR_VF_LINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_FLR_VF_LINT(a,b) bdk_sdpx_epfx_flr_vf_lint_t
+#define bustype_BDK_SDPX_EPFX_FLR_VF_LINT(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_FLR_VF_LINT(a,b) "SDPX_EPFX_FLR_VF_LINT"
+#define device_bar_BDK_SDPX_EPFX_FLR_VF_LINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_FLR_VF_LINT(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_FLR_VF_LINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_flr_vf_lint_ena_w1c
+ *
+ * SDP Function Level Reset VF Bit Array Local Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_sdpx_epfx_flr_vf_lint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_flr_vf_lint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_FLR_VF_LINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_FLR_VF_LINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_flr_vf_lint_ena_w1c_s cn; */
+};
+typedef union bdk_sdpx_epfx_flr_vf_lint_ena_w1c bdk_sdpx_epfx_flr_vf_lint_ena_w1c_t;
+
+static inline uint64_t BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880e00ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_FLR_VF_LINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1C(a,b) bdk_sdpx_epfx_flr_vf_lint_ena_w1c_t
+#define bustype_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1C(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1C(a,b) "SDPX_EPFX_FLR_VF_LINT_ENA_W1C"
+#define device_bar_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_flr_vf_lint_ena_w1s
+ *
+ * SDP Function Level Reset VF Bit Array Local Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_sdpx_epfx_flr_vf_lint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_flr_vf_lint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_FLR_VF_LINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_FLR_VF_LINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_flr_vf_lint_ena_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_flr_vf_lint_ena_w1s bdk_sdpx_epfx_flr_vf_lint_ena_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880f00ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_FLR_VF_LINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1S(a,b) bdk_sdpx_epfx_flr_vf_lint_ena_w1s_t
+#define bustype_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1S(a,b) "SDPX_EPFX_FLR_VF_LINT_ENA_W1S"
+#define device_bar_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_FLR_VF_LINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_flr_vf_lint_w1s
+ *
+ * SDP Function Level Reset VF Bit Array Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_sdpx_epfx_flr_vf_lint_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_flr_vf_lint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_FLR_VF_LINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_FLR_VF_LINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_flr_vf_lint_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_flr_vf_lint_w1s bdk_sdpx_epfx_flr_vf_lint_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_FLR_VF_LINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_FLR_VF_LINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880d00ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_FLR_VF_LINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_FLR_VF_LINT_W1S(a,b) bdk_sdpx_epfx_flr_vf_lint_w1s_t
+#define bustype_BDK_SDPX_EPFX_FLR_VF_LINT_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_FLR_VF_LINT_W1S(a,b) "SDPX_EPFX_FLR_VF_LINT_W1S"
+#define device_bar_BDK_SDPX_EPFX_FLR_VF_LINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_FLR_VF_LINT_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_FLR_VF_LINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_irerr_lint
+ *
+ * SDP Input Error Status Register
+ * This register indicates if an error has been detected on an input ring.
+ * The given register associated with an EPF will be reset due to a PF FLR or MAC Reset.
+ * These registers are not affected by VF FLR.
+ */
+union bdk_sdpx_epfx_irerr_lint
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_irerr_lint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Error has been detected on input ring i. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Error has been detected on input ring i. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_irerr_lint_s cn; */
+};
+typedef union bdk_sdpx_epfx_irerr_lint bdk_sdpx_epfx_irerr_lint_t;
+
+static inline uint64_t BDK_SDPX_EPFX_IRERR_LINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_IRERR_LINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880400ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_IRERR_LINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_IRERR_LINT(a,b) bdk_sdpx_epfx_irerr_lint_t
+#define bustype_BDK_SDPX_EPFX_IRERR_LINT(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_IRERR_LINT(a,b) "SDPX_EPFX_IRERR_LINT"
+#define device_bar_BDK_SDPX_EPFX_IRERR_LINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_IRERR_LINT(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_IRERR_LINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_irerr_lint_ena_w1c
+ *
+ * SDP Input Error Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_sdpx_epfx_irerr_lint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_irerr_lint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_IRERR_LINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_IRERR_LINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_irerr_lint_ena_w1c_s cn; */
+};
+typedef union bdk_sdpx_epfx_irerr_lint_ena_w1c bdk_sdpx_epfx_irerr_lint_ena_w1c_t;
+
+static inline uint64_t BDK_SDPX_EPFX_IRERR_LINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_IRERR_LINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880600ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_IRERR_LINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1C(a,b) bdk_sdpx_epfx_irerr_lint_ena_w1c_t
+#define bustype_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1C(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1C(a,b) "SDPX_EPFX_IRERR_LINT_ENA_W1C"
+#define device_bar_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_irerr_lint_ena_w1s
+ *
+ * SDP Input Error Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_sdpx_epfx_irerr_lint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_irerr_lint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_IRERR_LINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_IRERR_LINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_irerr_lint_ena_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_irerr_lint_ena_w1s bdk_sdpx_epfx_irerr_lint_ena_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_IRERR_LINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_IRERR_LINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880700ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_IRERR_LINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1S(a,b) bdk_sdpx_epfx_irerr_lint_ena_w1s_t
+#define bustype_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1S(a,b) "SDPX_EPFX_IRERR_LINT_ENA_W1S"
+#define device_bar_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_IRERR_LINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_irerr_lint_w1s
+ *
+ * SDP Input Error Status Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_sdpx_epfx_irerr_lint_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_irerr_lint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_IRERR_LINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_IRERR_LINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_irerr_lint_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_irerr_lint_w1s bdk_sdpx_epfx_irerr_lint_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_IRERR_LINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_IRERR_LINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880500ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_IRERR_LINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_IRERR_LINT_W1S(a,b) bdk_sdpx_epfx_irerr_lint_w1s_t
+#define bustype_BDK_SDPX_EPFX_IRERR_LINT_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_IRERR_LINT_W1S(a,b) "SDPX_EPFX_IRERR_LINT_W1S"
+#define device_bar_BDK_SDPX_EPFX_IRERR_LINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_IRERR_LINT_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_IRERR_LINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_irerr_rint
+ *
+ * SDP Input Error Status Register
+ * This register indicates if an error has been detected on an input ring.
+ * The given register associated with an EPF will be reset due to a PF FLR or MAC Reset.
+ * These registers are not affected by VF FLR.
+ */
+union bdk_sdpx_epfx_irerr_rint
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_irerr_rint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Error has been detected on input ring i. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Error has been detected on input ring i. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_irerr_rint_s cn; */
+};
+typedef union bdk_sdpx_epfx_irerr_rint bdk_sdpx_epfx_irerr_rint_t;
+
+static inline uint64_t BDK_SDPX_EPFX_IRERR_RINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_IRERR_RINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080020080ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_IRERR_RINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_IRERR_RINT(a,b) bdk_sdpx_epfx_irerr_rint_t
+#define bustype_BDK_SDPX_EPFX_IRERR_RINT(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_IRERR_RINT(a,b) "SDPX_EPFX_IRERR_RINT"
+#define device_bar_BDK_SDPX_EPFX_IRERR_RINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_IRERR_RINT(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_IRERR_RINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_irerr_rint_ena_w1c
+ *
+ * SDP Input Error Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_sdpx_epfx_irerr_rint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_irerr_rint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_IRERR_RINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_IRERR_RINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_irerr_rint_ena_w1c_s cn; */
+};
+typedef union bdk_sdpx_epfx_irerr_rint_ena_w1c bdk_sdpx_epfx_irerr_rint_ena_w1c_t;
+
+static inline uint64_t BDK_SDPX_EPFX_IRERR_RINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_IRERR_RINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x8740800200a0ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_IRERR_RINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1C(a,b) bdk_sdpx_epfx_irerr_rint_ena_w1c_t
+#define bustype_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1C(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1C(a,b) "SDPX_EPFX_IRERR_RINT_ENA_W1C"
+#define device_bar_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_irerr_rint_ena_w1s
+ *
+ * SDP Input Error Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_sdpx_epfx_irerr_rint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_irerr_rint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_IRERR_RINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_IRERR_RINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_irerr_rint_ena_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_irerr_rint_ena_w1s bdk_sdpx_epfx_irerr_rint_ena_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_IRERR_RINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_IRERR_RINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x8740800200b0ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_IRERR_RINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1S(a,b) bdk_sdpx_epfx_irerr_rint_ena_w1s_t
+#define bustype_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1S(a,b) "SDPX_EPFX_IRERR_RINT_ENA_W1S"
+#define device_bar_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_IRERR_RINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_irerr_rint_w1s
+ *
+ * SDP Input Error Status Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_sdpx_epfx_irerr_rint_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_irerr_rint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_IRERR_RINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_IRERR_RINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_irerr_rint_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_irerr_rint_w1s bdk_sdpx_epfx_irerr_rint_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_IRERR_RINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_IRERR_RINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080020090ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_IRERR_RINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_IRERR_RINT_W1S(a,b) bdk_sdpx_epfx_irerr_rint_w1s_t
+#define bustype_BDK_SDPX_EPFX_IRERR_RINT_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_IRERR_RINT_W1S(a,b) "SDPX_EPFX_IRERR_RINT_W1S"
+#define device_bar_BDK_SDPX_EPFX_IRERR_RINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_IRERR_RINT_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_IRERR_RINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_mbox_rint
+ *
+ * SDP Mailbox Interrupt Status Register
+ * This register indicates which VF/ring has signaled an interrupt.
+ * The given register associated with an EPF will be reset due to a PF FLR or MAC Reset.
+ * These registers are not affected by VF FLR.
+ */
+union bdk_sdpx_epfx_mbox_rint
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_mbox_rint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_num : 64; /**< [ 63: 0](R/W1C/H) Each bit indicates a ring from 0-63. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_num : 64; /**< [ 63: 0](R/W1C/H) Each bit indicates a ring from 0-63. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_mbox_rint_s cn; */
+};
+typedef union bdk_sdpx_epfx_mbox_rint bdk_sdpx_epfx_mbox_rint_t;
+
+static inline uint64_t BDK_SDPX_EPFX_MBOX_RINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_MBOX_RINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080020000ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_MBOX_RINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_MBOX_RINT(a,b) bdk_sdpx_epfx_mbox_rint_t
+#define bustype_BDK_SDPX_EPFX_MBOX_RINT(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_MBOX_RINT(a,b) "SDPX_EPFX_MBOX_RINT"
+#define device_bar_BDK_SDPX_EPFX_MBOX_RINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_MBOX_RINT(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_MBOX_RINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_mbox_rint_ena_w1c
+ *
+ * SDP Mailbox Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_sdpx_epfx_mbox_rint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_mbox_rint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_num : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_MBOX_RINT[RING_NUM]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_num : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_MBOX_RINT[RING_NUM]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_mbox_rint_ena_w1c_s cn; */
+};
+typedef union bdk_sdpx_epfx_mbox_rint_ena_w1c bdk_sdpx_epfx_mbox_rint_ena_w1c_t;
+
+static inline uint64_t BDK_SDPX_EPFX_MBOX_RINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_MBOX_RINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080020020ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_MBOX_RINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1C(a,b) bdk_sdpx_epfx_mbox_rint_ena_w1c_t
+#define bustype_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1C(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1C(a,b) "SDPX_EPFX_MBOX_RINT_ENA_W1C"
+#define device_bar_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_mbox_rint_ena_w1s
+ *
+ * SDP Mailbox Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_sdpx_epfx_mbox_rint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_mbox_rint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_num : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_MBOX_RINT[RING_NUM]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_num : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_MBOX_RINT[RING_NUM]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_mbox_rint_ena_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_mbox_rint_ena_w1s bdk_sdpx_epfx_mbox_rint_ena_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_MBOX_RINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_MBOX_RINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080020030ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_MBOX_RINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1S(a,b) bdk_sdpx_epfx_mbox_rint_ena_w1s_t
+#define bustype_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1S(a,b) "SDPX_EPFX_MBOX_RINT_ENA_W1S"
+#define device_bar_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_MBOX_RINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_mbox_rint_w1s
+ *
+ * SDP Mailbox Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_sdpx_epfx_mbox_rint_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_mbox_rint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_num : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_MBOX_RINT[RING_NUM]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_num : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_MBOX_RINT[RING_NUM]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_mbox_rint_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_mbox_rint_w1s bdk_sdpx_epfx_mbox_rint_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_MBOX_RINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_MBOX_RINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080020010ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_MBOX_RINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_MBOX_RINT_W1S(a,b) bdk_sdpx_epfx_mbox_rint_w1s_t
+#define bustype_BDK_SDPX_EPFX_MBOX_RINT_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_MBOX_RINT_W1S(a,b) "SDPX_EPFX_MBOX_RINT_W1S"
+#define device_bar_BDK_SDPX_EPFX_MBOX_RINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_MBOX_RINT_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_MBOX_RINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_orerr_lint
+ *
+ * SDP Output Error Status Register
+ * This register indicates if an error has been detected on an output ring.
+ * The given register associated with an EPF will be reset due to a PF FLR or MAC Reset.
+ * These registers are not affected by VF FLR.
+ */
+union bdk_sdpx_epfx_orerr_lint
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_orerr_lint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Error has been detected on output ring i. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Error has been detected on output ring i. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_orerr_lint_s cn; */
+};
+typedef union bdk_sdpx_epfx_orerr_lint bdk_sdpx_epfx_orerr_lint_t;
+
+static inline uint64_t BDK_SDPX_EPFX_ORERR_LINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_ORERR_LINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880800ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_ORERR_LINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_ORERR_LINT(a,b) bdk_sdpx_epfx_orerr_lint_t
+#define bustype_BDK_SDPX_EPFX_ORERR_LINT(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_ORERR_LINT(a,b) "SDPX_EPFX_ORERR_LINT"
+#define device_bar_BDK_SDPX_EPFX_ORERR_LINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_ORERR_LINT(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_ORERR_LINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_orerr_lint_ena_w1c
+ *
+ * SDP Output Error Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_sdpx_epfx_orerr_lint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_orerr_lint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_ORERR_LINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_ORERR_LINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_orerr_lint_ena_w1c_s cn; */
+};
+typedef union bdk_sdpx_epfx_orerr_lint_ena_w1c bdk_sdpx_epfx_orerr_lint_ena_w1c_t;
+
+static inline uint64_t BDK_SDPX_EPFX_ORERR_LINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_ORERR_LINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880a00ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_ORERR_LINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1C(a,b) bdk_sdpx_epfx_orerr_lint_ena_w1c_t
+#define bustype_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1C(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1C(a,b) "SDPX_EPFX_ORERR_LINT_ENA_W1C"
+#define device_bar_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_orerr_lint_ena_w1s
+ *
+ * SDP Output Error Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_sdpx_epfx_orerr_lint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_orerr_lint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_ORERR_LINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_ORERR_LINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_orerr_lint_ena_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_orerr_lint_ena_w1s bdk_sdpx_epfx_orerr_lint_ena_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_ORERR_LINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_ORERR_LINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880b00ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_ORERR_LINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1S(a,b) bdk_sdpx_epfx_orerr_lint_ena_w1s_t
+#define bustype_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1S(a,b) "SDPX_EPFX_ORERR_LINT_ENA_W1S"
+#define device_bar_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_ORERR_LINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sdp#_epf#_orerr_lint_w1s
+ *
+ * SDP Output Error Status Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_sdpx_epfx_orerr_lint_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_orerr_lint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_ORERR_LINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_ORERR_LINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_orerr_lint_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_orerr_lint_w1s bdk_sdpx_epfx_orerr_lint_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_ORERR_LINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_ORERR_LINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000880900ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_ORERR_LINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_ORERR_LINT_W1S(a,b) bdk_sdpx_epfx_orerr_lint_w1s_t
+#define bustype_BDK_SDPX_EPFX_ORERR_LINT_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SDPX_EPFX_ORERR_LINT_W1S(a,b) "SDPX_EPFX_ORERR_LINT_W1S"
+#define device_bar_BDK_SDPX_EPFX_ORERR_LINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_ORERR_LINT_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_ORERR_LINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_orerr_rint
+ *
+ * SDP Output Error Status Register
+ * This register indicates if an error has been detected on an output ring.
+ * The given register associated with an EPF will be reset due to a PF FLR or MAC Reset.
+ * These registers are not affected by VF FLR.
+ */
+union bdk_sdpx_epfx_orerr_rint
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_orerr_rint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Error has been detected on ring output ring i. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Error has been detected on ring output ring i. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_orerr_rint_s cn; */
+};
+typedef union bdk_sdpx_epfx_orerr_rint bdk_sdpx_epfx_orerr_rint_t;
+
+static inline uint64_t BDK_SDPX_EPFX_ORERR_RINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_ORERR_RINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080020100ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_ORERR_RINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_ORERR_RINT(a,b) bdk_sdpx_epfx_orerr_rint_t
+#define bustype_BDK_SDPX_EPFX_ORERR_RINT(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_ORERR_RINT(a,b) "SDPX_EPFX_ORERR_RINT"
+#define device_bar_BDK_SDPX_EPFX_ORERR_RINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_ORERR_RINT(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_ORERR_RINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_orerr_rint_ena_w1c
+ *
+ * SDP Output Error Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_sdpx_epfx_orerr_rint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_orerr_rint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_ORERR_RINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SDP(0)_EPF(0..1)_ORERR_RINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_orerr_rint_ena_w1c_s cn; */
+};
+typedef union bdk_sdpx_epfx_orerr_rint_ena_w1c bdk_sdpx_epfx_orerr_rint_ena_w1c_t;
+
+static inline uint64_t BDK_SDPX_EPFX_ORERR_RINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_ORERR_RINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080020120ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_ORERR_RINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1C(a,b) bdk_sdpx_epfx_orerr_rint_ena_w1c_t
+#define bustype_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1C(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1C(a,b) "SDPX_EPFX_ORERR_RINT_ENA_W1C"
+#define device_bar_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_orerr_rint_ena_w1s
+ *
+ * SDP Output Error Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_sdpx_epfx_orerr_rint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_orerr_rint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_ORERR_RINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SDP(0)_EPF(0..1)_ORERR_RINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_orerr_rint_ena_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_orerr_rint_ena_w1s bdk_sdpx_epfx_orerr_rint_ena_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_ORERR_RINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_ORERR_RINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080020130ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_ORERR_RINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1S(a,b) bdk_sdpx_epfx_orerr_rint_ena_w1s_t
+#define bustype_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1S(a,b) "SDPX_EPFX_ORERR_RINT_ENA_W1S"
+#define device_bar_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_ORERR_RINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_orerr_rint_w1s
+ *
+ * SDP Output Error Status Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_sdpx_epfx_orerr_rint_w1s
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_orerr_rint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_ORERR_RINT[RING_ERR]. */
+#else /* Word 0 - Little Endian */
+ uint64_t ring_err : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SDP(0)_EPF(0..1)_ORERR_RINT[RING_ERR]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_orerr_rint_w1s_s cn; */
+};
+typedef union bdk_sdpx_epfx_orerr_rint_w1s bdk_sdpx_epfx_orerr_rint_w1s_t;
+
+static inline uint64_t BDK_SDPX_EPFX_ORERR_RINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_ORERR_RINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080020110ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SDPX_EPFX_ORERR_RINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_ORERR_RINT_W1S(a,b) bdk_sdpx_epfx_orerr_rint_w1s_t
+#define bustype_BDK_SDPX_EPFX_ORERR_RINT_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_ORERR_RINT_W1S(a,b) "SDPX_EPFX_ORERR_RINT_W1S"
+#define device_bar_BDK_SDPX_EPFX_ORERR_RINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_ORERR_RINT_W1S(a,b) (a)
+#define arguments_BDK_SDPX_EPFX_ORERR_RINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_all_int_status
+ *
+ * SDP Combined Interrupt Summary Status Register
+ * This register contains interrupt status on a per-VF basis. All rings for a given VF
+ * are located in a single register. Note that access to any ring offset within a given
+ * VF will return the same value. When the PF reads any ring in this register it will
+ * return the same value (64 bits each representing one ring.)
+ *
+ * Internal:
+ * These interrupt bits may be set for some rings even after a PF/VF FLR.
+ * They are not cleared becase the CNTS and LEVELS registers are not reset
+ * and we wish to make the interrupt state consistent with CNTS/LEVELS even after FLR.
+ * The CNTS register must be cleared by software as part of initialization after a reset
+ * (including FLR) which will cause the interrupt state in this register to clear.
+ */
+union bdk_sdpx_epfx_rx_all_int_status
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_all_int_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t intr : 64; /**< [ 63: 0](RO) These bits are interpreted differently for PF access and VF access.
+
+ For a PF read:
+
+ Each of the 64 bits corresponds to a ring number that is signalling an
+ interrupt. [INTR]\<ring\> reads as one whenever any of the following are true for
+ the respective ring R(ring):
+
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT],
+ * SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET],
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT],
+ * Or, SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set.
+
+ Reading this register will isolate the ring(s) that is signalling the interrupt.
+ To determine the specific interrupt, other registers must be read.
+
+ For a VF read:
+
+ In this mode, this register identifies the ring number "i" and specific
+ interrupt being signaled.
+
+ Bits \<7:0\> indicate an input interrupt being signaled, where bit i is set if
+ for the respective ring R(i):
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT].
+
+ Bits \<15:8\> indicate an output interrupt being signaled, where bit i is set if
+ for the respective ring R(i):
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT].
+ * Or, SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET].
+
+ Bits \<23:16\> indicate a mailbox interrupt being signaled, where bit i is set if
+ for the respective ring R(i):
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set.
+
+ Bits \<63:24\> are reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 64; /**< [ 63: 0](RO) These bits are interpreted differently for PF access and VF access.
+
+ For a PF read:
+
+ Each of the 64 bits corresponds to a ring number that is signalling an
+ interrupt. [INTR]\<ring\> reads as one whenever any of the following are true for
+ the respective ring R(ring):
+
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT],
+ * SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET],
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT],
+ * Or, SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set.
+
+ Reading this register will isolate the ring(s) that is signalling the interrupt.
+ To determine the specific interrupt, other registers must be read.
+
+ For a VF read:
+
+ In this mode, this register identifies the ring number "i" and specific
+ interrupt being signaled.
+
+ Bits \<7:0\> indicate an input interrupt being signaled, where bit i is set if
+ for the respective ring R(i):
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT].
+
+ Bits \<15:8\> indicate an output interrupt being signaled, where bit i is set if
+ for the respective ring R(i):
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT].
+ * Or, SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET].
+
+ Bits \<23:16\> indicate a mailbox interrupt being signaled, where bit i is set if
+ for the respective ring R(i):
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set.
+
+ Bits \<63:24\> are reserved. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_all_int_status_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_all_int_status bdk_sdpx_epfx_rx_all_int_status_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_ALL_INT_STATUS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_ALL_INT_STATUS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010300ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_ALL_INT_STATUS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_ALL_INT_STATUS(a,b,c) bdk_sdpx_epfx_rx_all_int_status_t
+#define bustype_BDK_SDPX_EPFX_RX_ALL_INT_STATUS(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_ALL_INT_STATUS(a,b,c) "SDPX_EPFX_RX_ALL_INT_STATUS"
+#define device_bar_BDK_SDPX_EPFX_RX_ALL_INT_STATUS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_ALL_INT_STATUS(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_ALL_INT_STATUS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_err_type
+ *
+ * SDP Ring Error Type Register
+ * These registers indicate which type of error(s) have been detected when
+ * SDP()_EPF()_IRERR_LINT\<i\> / SDP()_EPF()_ORERR_RINT\<i\> / SDP()_EPF()_ORERR_LINT\<i\> /
+ * SDP()_EPF()_ORERR_RINT\<i\> is set. Multiple bits can be set at the same time
+ * if multiple errors have occurred for that ring.
+ *
+ * All 64 registers associated with an EPF will be reset due to a PF FLR or MAC Reset.
+ * These registers are not affected by VF FLR.
+ */
+union bdk_sdpx_epfx_rx_err_type
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_err_type_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_35_63 : 29;
+ uint64_t port_dis : 1; /**< [ 34: 34](R/W1C/H) Output packet arrives targeting a port which is not enabled. */
+ uint64_t dbell_empty : 1; /**< [ 33: 33](R/W1C/H) The watermark value is set too small, allowing doorbell count to drop below 8. */
+ uint64_t oring_dma_err : 1; /**< [ 32: 32](R/W1C/H) DMA read error response on output pointer pair fetch. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t illegal_fsz : 1; /**< [ 7: 7](R/W1C/H) Illegal FSZ specified in instruction.
+ For direct gather, FSZ must be \<= 32 for 64B instructions and 0 for 32B instructions.
+ For direct data/indirect gather, FSZ must be \<= 55 for 64B instructions and \<= 23 for 32B
+ instructions. This check is done before any length checks. */
+ uint64_t pkt_dma_err : 1; /**< [ 6: 6](R/W1C/H) DMA read error response on packet fetch. */
+ uint64_t inst_dma_err : 1; /**< [ 5: 5](R/W1C/H) DMA read error response on instruction fetch. */
+ uint64_t pkt_toosmall : 1; /**< [ 4: 4](R/W1C/H) Attempted packet read with LEN=0 or LEN \< FSZ. */
+ uint64_t dir_len_toosmall : 1; /**< [ 3: 3](R/W1C/H) Direct gather combined LEN fields are less than the packet length specified. */
+ uint64_t ind_dma_err : 1; /**< [ 2: 2](R/W1C/H) DMA read error response on indirect gather list fetch. This could also be caused by
+ an unaligned gather list, in which case SDP()_DIAG[IN_IND_UNALIGNED] will also be set. */
+ uint64_t ind_zero_det : 1; /**< [ 1: 1](R/W1C/H) Indirect gather list contains length of 0. */
+ uint64_t ind_toosmall : 1; /**< [ 0: 0](R/W1C/H) Indirect gather list length specified less than (packet length - FSZ) in instruction. */
+#else /* Word 0 - Little Endian */
+ uint64_t ind_toosmall : 1; /**< [ 0: 0](R/W1C/H) Indirect gather list length specified less than (packet length - FSZ) in instruction. */
+ uint64_t ind_zero_det : 1; /**< [ 1: 1](R/W1C/H) Indirect gather list contains length of 0. */
+ uint64_t ind_dma_err : 1; /**< [ 2: 2](R/W1C/H) DMA read error response on indirect gather list fetch. This could also be caused by
+ an unaligned gather list, in which case SDP()_DIAG[IN_IND_UNALIGNED] will also be set. */
+ uint64_t dir_len_toosmall : 1; /**< [ 3: 3](R/W1C/H) Direct gather combined LEN fields are less than the packet length specified. */
+ uint64_t pkt_toosmall : 1; /**< [ 4: 4](R/W1C/H) Attempted packet read with LEN=0 or LEN \< FSZ. */
+ uint64_t inst_dma_err : 1; /**< [ 5: 5](R/W1C/H) DMA read error response on instruction fetch. */
+ uint64_t pkt_dma_err : 1; /**< [ 6: 6](R/W1C/H) DMA read error response on packet fetch. */
+ uint64_t illegal_fsz : 1; /**< [ 7: 7](R/W1C/H) Illegal FSZ specified in instruction.
+ For direct gather, FSZ must be \<= 32 for 64B instructions and 0 for 32B instructions.
+ For direct data/indirect gather, FSZ must be \<= 55 for 64B instructions and \<= 23 for 32B
+ instructions. This check is done before any length checks. */
+ uint64_t reserved_8_31 : 24;
+ uint64_t oring_dma_err : 1; /**< [ 32: 32](R/W1C/H) DMA read error response on output pointer pair fetch. */
+ uint64_t dbell_empty : 1; /**< [ 33: 33](R/W1C/H) The watermark value is set too small, allowing doorbell count to drop below 8. */
+ uint64_t port_dis : 1; /**< [ 34: 34](R/W1C/H) Output packet arrives targeting a port which is not enabled. */
+ uint64_t reserved_35_63 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_err_type_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_err_type bdk_sdpx_epfx_rx_err_type_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_ERR_TYPE(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_ERR_TYPE(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010400ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_ERR_TYPE", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_ERR_TYPE(a,b,c) bdk_sdpx_epfx_rx_err_type_t
+#define bustype_BDK_SDPX_EPFX_RX_ERR_TYPE(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_ERR_TYPE(a,b,c) "SDPX_EPFX_RX_ERR_TYPE"
+#define device_bar_BDK_SDPX_EPFX_RX_ERR_TYPE(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_ERR_TYPE(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_ERR_TYPE(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_in_byte_cnt
+ *
+ * SDP Packet Input Byte Count Register
+ * This register contains byte counts per ring that have been read into SDP.
+ * The counter will wrap when it reaches its maximum value. It should be cleared
+ * before the ring is enabled for an accurate count.
+ */
+union bdk_sdpx_epfx_rx_in_byte_cnt
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_in_byte_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Byte count, can be reset by software by writing SDP()_EPF()_R()_IN_PKT_CNT[CNT]
+ with 0xFFFFFFFFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Byte count, can be reset by software by writing SDP()_EPF()_R()_IN_PKT_CNT[CNT]
+ with 0xFFFFFFFFF. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_in_byte_cnt_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_in_byte_cnt bdk_sdpx_epfx_rx_in_byte_cnt_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_BYTE_CNT(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_BYTE_CNT(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010090ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_IN_BYTE_CNT", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_IN_BYTE_CNT(a,b,c) bdk_sdpx_epfx_rx_in_byte_cnt_t
+#define bustype_BDK_SDPX_EPFX_RX_IN_BYTE_CNT(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_IN_BYTE_CNT(a,b,c) "SDPX_EPFX_RX_IN_BYTE_CNT"
+#define device_bar_BDK_SDPX_EPFX_RX_IN_BYTE_CNT(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_IN_BYTE_CNT(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_IN_BYTE_CNT(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_in_cnts
+ *
+ * SDP Input Instruction Ring Counts Register
+ * This register contains the counters for the input instruction rings.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring.
+ */
+union bdk_sdpx_epfx_rx_in_cnts
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_in_cnts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t out_int : 1; /**< [ 62: 62](RO/H) Returns a 1 when:
+ * SDP()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT].
+ * Or, SDP()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET].
+
+ To clear the bit, the CNTS register must be written to clear the underlying condition. */
+ uint64_t in_int : 1; /**< [ 61: 61](RO/H) Returns a 1 when:
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT]
+
+ To clear the bit, the SDP()_EPF()_R()_IN_CNTS register must be written to clear the
+ underlying condition. */
+ uint64_t mbox_int : 1; /**< [ 60: 60](RO/H) Returns a 1 when:
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set
+
+ To clear the bit, write SDP()_EPF()_R()_MBOX_PF_VF_INT[INTR] with 1.
+ This bit is also cleared due to an FLR. */
+ uint64_t resend : 1; /**< [ 59: 59](WO/H) A write of 1 will resend an MSI-X interrupt message if any of the following
+ conditions are true for the respective ring:
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT].
+ * SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET].
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT].
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set. */
+ uint64_t reserved_32_58 : 27;
+ uint64_t cnt : 32; /**< [ 31: 0](R/W/H) Packet counter. Hardware adds to [CNT] as it reads packets. On a write
+ to this CSR, hardware subtracts the amount written to the [CNT] field from
+ [CNT], which will clear PKT_IN()_INT_STATUS[INTR] if [CNT] becomes \<=
+ SDP()_EPF()_R()_IN_INT_LEVELS[CNT]. This register should be cleared before
+ enabling a ring by reading the current value and writing it back. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 32; /**< [ 31: 0](R/W/H) Packet counter. Hardware adds to [CNT] as it reads packets. On a write
+ to this CSR, hardware subtracts the amount written to the [CNT] field from
+ [CNT], which will clear PKT_IN()_INT_STATUS[INTR] if [CNT] becomes \<=
+ SDP()_EPF()_R()_IN_INT_LEVELS[CNT]. This register should be cleared before
+ enabling a ring by reading the current value and writing it back. */
+ uint64_t reserved_32_58 : 27;
+ uint64_t resend : 1; /**< [ 59: 59](WO/H) A write of 1 will resend an MSI-X interrupt message if any of the following
+ conditions are true for the respective ring:
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT].
+ * SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET].
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT].
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set. */
+ uint64_t mbox_int : 1; /**< [ 60: 60](RO/H) Returns a 1 when:
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set
+
+ To clear the bit, write SDP()_EPF()_R()_MBOX_PF_VF_INT[INTR] with 1.
+ This bit is also cleared due to an FLR. */
+ uint64_t in_int : 1; /**< [ 61: 61](RO/H) Returns a 1 when:
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT]
+
+ To clear the bit, the SDP()_EPF()_R()_IN_CNTS register must be written to clear the
+ underlying condition. */
+ uint64_t out_int : 1; /**< [ 62: 62](RO/H) Returns a 1 when:
+ * SDP()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT].
+ * Or, SDP()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET].
+
+ To clear the bit, the CNTS register must be written to clear the underlying condition. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_in_cnts_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_in_cnts bdk_sdpx_epfx_rx_in_cnts_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_CNTS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_CNTS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010050ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_IN_CNTS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_IN_CNTS(a,b,c) bdk_sdpx_epfx_rx_in_cnts_t
+#define bustype_BDK_SDPX_EPFX_RX_IN_CNTS(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_IN_CNTS(a,b,c) "SDPX_EPFX_RX_IN_CNTS"
+#define device_bar_BDK_SDPX_EPFX_RX_IN_CNTS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_IN_CNTS(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_IN_CNTS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_in_control
+ *
+ * SDP Input Instruction Ring Control Register
+ * This register is the control for read operations on the input instruction rings.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring. Also, this register cannot be written
+ * while either of the following conditions is true:
+ * * [IDLE] is clear.
+ * * Or, SDP()_EPF()_R()_IN_ENABLE[ENB] is set.
+ */
+union bdk_sdpx_epfx_rx_in_control
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_in_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_52_63 : 12;
+ uint64_t rpvf : 4; /**< [ 51: 48](RO/H) The number of rings assigned to this VF.
+ Read only copy of SDP()_EPF()_RINFO[RPVF] */
+ uint64_t reserved_29_47 : 19;
+ uint64_t idle : 1; /**< [ 28: 28](RO/H) Asserted when this ring has no packets in-flight. */
+ uint64_t reserved_27 : 1;
+ uint64_t rdsize : 2; /**< [ 26: 25](R/W) Number of instructions to be read in one read request. Two-bit values are:
+ 0x0 = 1 instruction.
+ 0x1 = 2 instructions.
+ 0x2 = 4 instructions.
+ 0x3 = 8 instructions. */
+ uint64_t is64b : 1; /**< [ 24: 24](R/W) If 1, the ring uses 64-byte instructions.
+ If 0, the ring uses 32-byte instructions. */
+ uint64_t reserved_9_23 : 15;
+ uint64_t d_nsr : 1; /**< [ 8: 8](R/W/H) [D_NSR] is ADDRTYPE\<1\> for first direct and gather DPTR reads. ADDRTYPE\<1\> is the
+ no-snoop attribute for PCIe. */
+ uint64_t d_esr : 2; /**< [ 7: 6](R/W/H) [D_ESR] is ES\<1:0\> for first direct and gather DPTR reads.
+ ES\<1:0\> is the endian-swap attribute for these MAC memory space reads.
+ Enumerated by SLI_ENDIANSWAP_E. */
+ uint64_t d_ror : 1; /**< [ 5: 5](R/W/H) [D_ROR] is ADDRTYPE\<0\> for first direct and gather DPTR reads. ADDRTYPE\<0\> is the
+ relaxed-order attribute for PCIe. */
+ uint64_t reserved_4 : 1;
+ uint64_t nsr : 1; /**< [ 3: 3](R/W/H) [NSR] is ADDRTYPE\<1\> for input instruction reads (from
+ SDP()_EPF()_R()_IN_INSTR_BADDR) and first indirect DPTR reads. ADDRTYPE\<1\>
+ is the no-snoop attribute for PCIe. */
+ uint64_t esr : 2; /**< [ 2: 1](R/W/H) [ESR] is ES\<1:0\> for input instruction reads (from
+ SDP()_EPF()_R()_IN_INSTR_BADDR) and first indirect DPTR reads. ES\<1:0\> is
+ the endian-swap attribute for these MAC memory space reads.
+ Enumerated by SLI_ENDIANSWAP_E. */
+ uint64_t ror : 1; /**< [ 0: 0](R/W/H) [ROR] is ADDRTYPE\<0\> for input instruction reads (from
+ SDP()_EPF()_R()_IN_INSTR_BADDR) and first indirect DPTR reads.
+ ADDRTYPE\<0\> is the relaxed-order attribute for PCIe. */
+#else /* Word 0 - Little Endian */
+ uint64_t ror : 1; /**< [ 0: 0](R/W/H) [ROR] is ADDRTYPE\<0\> for input instruction reads (from
+ SDP()_EPF()_R()_IN_INSTR_BADDR) and first indirect DPTR reads.
+ ADDRTYPE\<0\> is the relaxed-order attribute for PCIe. */
+ uint64_t esr : 2; /**< [ 2: 1](R/W/H) [ESR] is ES\<1:0\> for input instruction reads (from
+ SDP()_EPF()_R()_IN_INSTR_BADDR) and first indirect DPTR reads. ES\<1:0\> is
+ the endian-swap attribute for these MAC memory space reads.
+ Enumerated by SLI_ENDIANSWAP_E. */
+ uint64_t nsr : 1; /**< [ 3: 3](R/W/H) [NSR] is ADDRTYPE\<1\> for input instruction reads (from
+ SDP()_EPF()_R()_IN_INSTR_BADDR) and first indirect DPTR reads. ADDRTYPE\<1\>
+ is the no-snoop attribute for PCIe. */
+ uint64_t reserved_4 : 1;
+ uint64_t d_ror : 1; /**< [ 5: 5](R/W/H) [D_ROR] is ADDRTYPE\<0\> for first direct and gather DPTR reads. ADDRTYPE\<0\> is the
+ relaxed-order attribute for PCIe. */
+ uint64_t d_esr : 2; /**< [ 7: 6](R/W/H) [D_ESR] is ES\<1:0\> for first direct and gather DPTR reads.
+ ES\<1:0\> is the endian-swap attribute for these MAC memory space reads.
+ Enumerated by SLI_ENDIANSWAP_E. */
+ uint64_t d_nsr : 1; /**< [ 8: 8](R/W/H) [D_NSR] is ADDRTYPE\<1\> for first direct and gather DPTR reads. ADDRTYPE\<1\> is the
+ no-snoop attribute for PCIe. */
+ uint64_t reserved_9_23 : 15;
+ uint64_t is64b : 1; /**< [ 24: 24](R/W) If 1, the ring uses 64-byte instructions.
+ If 0, the ring uses 32-byte instructions. */
+ uint64_t rdsize : 2; /**< [ 26: 25](R/W) Number of instructions to be read in one read request. Two-bit values are:
+ 0x0 = 1 instruction.
+ 0x1 = 2 instructions.
+ 0x2 = 4 instructions.
+ 0x3 = 8 instructions. */
+ uint64_t reserved_27 : 1;
+ uint64_t idle : 1; /**< [ 28: 28](RO/H) Asserted when this ring has no packets in-flight. */
+ uint64_t reserved_29_47 : 19;
+ uint64_t rpvf : 4; /**< [ 51: 48](RO/H) The number of rings assigned to this VF.
+ Read only copy of SDP()_EPF()_RINFO[RPVF] */
+ uint64_t reserved_52_63 : 12;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_in_control_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_in_control bdk_sdpx_epfx_rx_in_control_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_CONTROL(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_CONTROL(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010000ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_IN_CONTROL", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_IN_CONTROL(a,b,c) bdk_sdpx_epfx_rx_in_control_t
+#define bustype_BDK_SDPX_EPFX_RX_IN_CONTROL(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_IN_CONTROL(a,b,c) "SDPX_EPFX_RX_IN_CONTROL"
+#define device_bar_BDK_SDPX_EPFX_RX_IN_CONTROL(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_IN_CONTROL(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_IN_CONTROL(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_in_enable
+ *
+ * SDP Input Instruction Ring Enable Register
+ * This register is the enable for read operations on the input instruction rings.
+ */
+union bdk_sdpx_epfx_rx_in_enable
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_in_enable_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t enb : 1; /**< [ 0: 0](R/W/H) Enable for the input ring. Various errors and FLR events can clear this bit.
+ Software can also clear this bit at anytime. The bit may not be set unless
+ SDP()_EPF()_R()_IN_CONTROL[IDLE] == 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t enb : 1; /**< [ 0: 0](R/W/H) Enable for the input ring. Various errors and FLR events can clear this bit.
+ Software can also clear this bit at anytime. The bit may not be set unless
+ SDP()_EPF()_R()_IN_CONTROL[IDLE] == 0. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_in_enable_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_in_enable bdk_sdpx_epfx_rx_in_enable_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_ENABLE(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_ENABLE(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010010ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_IN_ENABLE", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_IN_ENABLE(a,b,c) bdk_sdpx_epfx_rx_in_enable_t
+#define bustype_BDK_SDPX_EPFX_RX_IN_ENABLE(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_IN_ENABLE(a,b,c) "SDPX_EPFX_RX_IN_ENABLE"
+#define device_bar_BDK_SDPX_EPFX_RX_IN_ENABLE(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_IN_ENABLE(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_IN_ENABLE(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_in_instr_baddr
+ *
+ * SDP Input Instruction Ring Base Address Register
+ * This register contains the base address for the input instruction ring.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring. Also, this register cannot be written
+ * while either of the following conditions is true:
+ * * SDP()_EPF()_R()_IN_CONTROL[IDLE] is clear.
+ * * Or, SDP()_EPF()_R()_IN_ENABLE[ENB] is set.
+ */
+union bdk_sdpx_epfx_rx_in_instr_baddr
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_in_instr_baddr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 60; /**< [ 63: 4](R/W) Base address for input instruction ring. Must be 16-byte aligned. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t addr : 60; /**< [ 63: 4](R/W) Base address for input instruction ring. Must be 16-byte aligned. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_in_instr_baddr_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_in_instr_baddr bdk_sdpx_epfx_rx_in_instr_baddr_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_INSTR_BADDR(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_INSTR_BADDR(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010020ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_IN_INSTR_BADDR", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_IN_INSTR_BADDR(a,b,c) bdk_sdpx_epfx_rx_in_instr_baddr_t
+#define bustype_BDK_SDPX_EPFX_RX_IN_INSTR_BADDR(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_IN_INSTR_BADDR(a,b,c) "SDPX_EPFX_RX_IN_INSTR_BADDR"
+#define device_bar_BDK_SDPX_EPFX_RX_IN_INSTR_BADDR(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_IN_INSTR_BADDR(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_IN_INSTR_BADDR(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_in_instr_dbell
+ *
+ * SDP Input Instruction Ring Input Doorbell Registers
+ * This register contains the doorbell and base-address offset for the next read operation.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring.
+ */
+union bdk_sdpx_epfx_rx_in_instr_dbell
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_in_instr_dbell_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t aoff : 32; /**< [ 63: 32](RO/H) Address offset. The offset from the SDP()_EPF()_R()_IN_INSTR_BADDR where the
+ next pointer is read. A write of 0xFFFFFFFF to [DBELL] clears [DBELL] and [AOFF]. */
+ uint64_t dbell : 32; /**< [ 31: 0](R/W/H) Pointer list doorbell count. Write operations to this field increments the present
+ value here. Read operations return the present value. The value of this field is
+ decremented as read operations are issued for instructions. A write of 0xFFFFFFFF
+ to this field clears [DBELL] and [AOFF]. This register should be cleared before
+ enabling a ring. */
+#else /* Word 0 - Little Endian */
+ uint64_t dbell : 32; /**< [ 31: 0](R/W/H) Pointer list doorbell count. Write operations to this field increments the present
+ value here. Read operations return the present value. The value of this field is
+ decremented as read operations are issued for instructions. A write of 0xFFFFFFFF
+ to this field clears [DBELL] and [AOFF]. This register should be cleared before
+ enabling a ring. */
+ uint64_t aoff : 32; /**< [ 63: 32](RO/H) Address offset. The offset from the SDP()_EPF()_R()_IN_INSTR_BADDR where the
+ next pointer is read. A write of 0xFFFFFFFF to [DBELL] clears [DBELL] and [AOFF]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_in_instr_dbell_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_in_instr_dbell bdk_sdpx_epfx_rx_in_instr_dbell_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_INSTR_DBELL(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_INSTR_DBELL(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010040ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_IN_INSTR_DBELL", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_IN_INSTR_DBELL(a,b,c) bdk_sdpx_epfx_rx_in_instr_dbell_t
+#define bustype_BDK_SDPX_EPFX_RX_IN_INSTR_DBELL(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_IN_INSTR_DBELL(a,b,c) "SDPX_EPFX_RX_IN_INSTR_DBELL"
+#define device_bar_BDK_SDPX_EPFX_RX_IN_INSTR_DBELL(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_IN_INSTR_DBELL(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_IN_INSTR_DBELL(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_in_instr_rsize
+ *
+ * SDP Input Instruction Ring Size Register
+ * This register contains the input instruction ring size.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring. Also, this register cannot be written
+ * while either of the following conditions is true:
+ * * SDP()_EPF()_R()_IN_CONTROL[IDLE] is clear.
+ * * or, SDP()_EPF()_R()_IN_ENABLE[ENB] is set.
+ */
+union bdk_sdpx_epfx_rx_in_instr_rsize
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_in_instr_rsize_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t rsize : 32; /**< [ 31: 0](R/W) Ring size (number of instructions). */
+#else /* Word 0 - Little Endian */
+ uint64_t rsize : 32; /**< [ 31: 0](R/W) Ring size (number of instructions). */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_in_instr_rsize_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_in_instr_rsize bdk_sdpx_epfx_rx_in_instr_rsize_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_INSTR_RSIZE(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_INSTR_RSIZE(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010030ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_IN_INSTR_RSIZE", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_IN_INSTR_RSIZE(a,b,c) bdk_sdpx_epfx_rx_in_instr_rsize_t
+#define bustype_BDK_SDPX_EPFX_RX_IN_INSTR_RSIZE(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_IN_INSTR_RSIZE(a,b,c) "SDPX_EPFX_RX_IN_INSTR_RSIZE"
+#define device_bar_BDK_SDPX_EPFX_RX_IN_INSTR_RSIZE(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_IN_INSTR_RSIZE(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_IN_INSTR_RSIZE(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_in_int_levels
+ *
+ * SDP Input Instruction Interrupt Levels Register
+ * This register contains input instruction interrupt levels.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring.
+ */
+union bdk_sdpx_epfx_rx_in_int_levels
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_in_int_levels_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< [ 31: 0](R/W) Input packet counter interrupt threshold. An MSI-X interrupt will be generated
+ whenever SDP()_EPF()_R()_IN_CNTS[CNT] \> [CNT]. Whenever software changes the value of
+ [CNT], it should also subsequently write the corresponding SDP()_R()_IN_CNTS[CNT] CSR
+ (with a value of zero if desired) to ensure that the hardware correspondingly updates
+ SDP()_EPF()_R()_IN_CNTS[IN_INT] */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 32; /**< [ 31: 0](R/W) Input packet counter interrupt threshold. An MSI-X interrupt will be generated
+ whenever SDP()_EPF()_R()_IN_CNTS[CNT] \> [CNT]. Whenever software changes the value of
+ [CNT], it should also subsequently write the corresponding SDP()_R()_IN_CNTS[CNT] CSR
+ (with a value of zero if desired) to ensure that the hardware correspondingly updates
+ SDP()_EPF()_R()_IN_CNTS[IN_INT] */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_in_int_levels_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_in_int_levels bdk_sdpx_epfx_rx_in_int_levels_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_INT_LEVELS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_INT_LEVELS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010060ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_IN_INT_LEVELS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_IN_INT_LEVELS(a,b,c) bdk_sdpx_epfx_rx_in_int_levels_t
+#define bustype_BDK_SDPX_EPFX_RX_IN_INT_LEVELS(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_IN_INT_LEVELS(a,b,c) "SDPX_EPFX_RX_IN_INT_LEVELS"
+#define device_bar_BDK_SDPX_EPFX_RX_IN_INT_LEVELS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_IN_INT_LEVELS(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_IN_INT_LEVELS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_in_int_status
+ *
+ * SDP Ring Input Packet Interrupt Status Register
+ * This register contains interrupt status on a per-VF basis. All rings for a given VF
+ * are located in a single register. Note that access to any ring offset within a given
+ * VF will return the same value. When the PF reads any ring in this register it will
+ * return the same value (64 bits each representing one ring.)
+ */
+union bdk_sdpx_epfx_rx_in_int_status
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_in_int_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t intr : 64; /**< [ 63: 0](RO) Interrupt bits for VF rings (0..i). [INTR[i]] reads as one whenever:
+
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT]
+
+ [INTR] can cause an MSI-X interrupt.
+
+ Note that "i" depends on the SDP()_EPF()_RINFO configuration.
+
+ Internal:
+ These interrupt bits are not cleared due to FLR becase the CNTS and
+ LEVELS registers are not reset and we wish to make the interrupt state
+ consistent with CNTS/LEVELS even after FLR. The CNTS register must be
+ cleared by software as part of initialization after a reset (including FLR)
+ which will cause the interrupt state to clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 64; /**< [ 63: 0](RO) Interrupt bits for VF rings (0..i). [INTR[i]] reads as one whenever:
+
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT]
+
+ [INTR] can cause an MSI-X interrupt.
+
+ Note that "i" depends on the SDP()_EPF()_RINFO configuration.
+
+ Internal:
+ These interrupt bits are not cleared due to FLR becase the CNTS and
+ LEVELS registers are not reset and we wish to make the interrupt state
+ consistent with CNTS/LEVELS even after FLR. The CNTS register must be
+ cleared by software as part of initialization after a reset (including FLR)
+ which will cause the interrupt state to clear. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_in_int_status_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_in_int_status bdk_sdpx_epfx_rx_in_int_status_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_INT_STATUS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_INT_STATUS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010070ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_IN_INT_STATUS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_IN_INT_STATUS(a,b,c) bdk_sdpx_epfx_rx_in_int_status_t
+#define bustype_BDK_SDPX_EPFX_RX_IN_INT_STATUS(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_IN_INT_STATUS(a,b,c) "SDPX_EPFX_RX_IN_INT_STATUS"
+#define device_bar_BDK_SDPX_EPFX_RX_IN_INT_STATUS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_IN_INT_STATUS(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_IN_INT_STATUS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_in_pkt_cnt
+ *
+ * SDP Packet Input Packet Count Register
+ * This register contains packet counts per ring that have been read into SDP.
+ * The counter will wrap when it reaches its maximum value. It should be cleared
+ * before the ring is enabled for an accurate count.
+ */
+union bdk_sdpx_epfx_rx_in_pkt_cnt
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_in_pkt_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t cnt : 36; /**< [ 35: 0](R/W/H) Packet count, can be written by software to any value. If a value of 0xFFFFFFFFF is
+ written to this field, it will cause this field as well as SDP()_EPF()_R()_IN_BYTE_CNT to
+ clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 36; /**< [ 35: 0](R/W/H) Packet count, can be written by software to any value. If a value of 0xFFFFFFFFF is
+ written to this field, it will cause this field as well as SDP()_EPF()_R()_IN_BYTE_CNT to
+ clear. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_in_pkt_cnt_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_in_pkt_cnt bdk_sdpx_epfx_rx_in_pkt_cnt_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_PKT_CNT(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_IN_PKT_CNT(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010080ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_IN_PKT_CNT", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_IN_PKT_CNT(a,b,c) bdk_sdpx_epfx_rx_in_pkt_cnt_t
+#define bustype_BDK_SDPX_EPFX_RX_IN_PKT_CNT(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_IN_PKT_CNT(a,b,c) "SDPX_EPFX_RX_IN_PKT_CNT"
+#define device_bar_BDK_SDPX_EPFX_RX_IN_PKT_CNT(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_IN_PKT_CNT(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_IN_PKT_CNT(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_mbox_pf_vf_data
+ *
+ * SDP PF to VF Mailbox Data Registers
+ * These registers are used for communication of data from the PF to VF.
+ * A write to this register from the PF will cause the corresponding bit in
+ * SDP()_EPF()_R()_MBOX_PF_VF_INT[INTR] to be set, along with other bits in
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS, SDP()_EPF()_R()_OUT_CNTS[MBOX_INT], and
+ * SDP()_EPF()_R()_IN_CNTS[MBOX_INT].
+ */
+union bdk_sdpx_epfx_rx_mbox_pf_vf_data
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_mbox_pf_vf_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Communication data from PF to VF. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Communication data from PF to VF. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_mbox_pf_vf_data_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_mbox_pf_vf_data bdk_sdpx_epfx_rx_mbox_pf_vf_data_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_MBOX_PF_VF_DATA(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_MBOX_PF_VF_DATA(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010210ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_MBOX_PF_VF_DATA", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_MBOX_PF_VF_DATA(a,b,c) bdk_sdpx_epfx_rx_mbox_pf_vf_data_t
+#define bustype_BDK_SDPX_EPFX_RX_MBOX_PF_VF_DATA(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_MBOX_PF_VF_DATA(a,b,c) "SDPX_EPFX_RX_MBOX_PF_VF_DATA"
+#define device_bar_BDK_SDPX_EPFX_RX_MBOX_PF_VF_DATA(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_MBOX_PF_VF_DATA(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_MBOX_PF_VF_DATA(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_mbox_pf_vf_int
+ *
+ * SDP Packet PF to VF Mailbox Interrupt Register
+ * These registers contain interrupt status and enable for the PF to VF mailbox communication
+ * registers. A write to SDP()_EPF()_R()_MBOX_VF_PF_DATA from the PF will cause the [INTR] bit
+ * in this register to set, along with corresponding bits in SDP()_EPF()_R()_MBOX_RINT_STATUS,
+ * SDP()_EPF()_R()_OUT_CNTS[MBOX_INT], and SDP()_EPF()_R()_IN_CNTS[MBOX_INT].
+ * All of these bits are cleared by writing 1 to the [INTR] bit in this register.
+ * If the [ENAB] bit is set, then an MSI-X interrupt will also be generated when the [INTR] bit
+ * is set. This register is cleared also due to an FLR.
+ */
+union bdk_sdpx_epfx_rx_mbox_pf_vf_int
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_mbox_pf_vf_int_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_2_63 : 62;
+ uint64_t enab : 1; /**< [ 1: 1](R/W) PF to VF mailbox interrupt enable. */
+ uint64_t intr : 1; /**< [ 0: 0](R/W1C/H) PF to VF mailbox interrupt signal. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 1; /**< [ 0: 0](R/W1C/H) PF to VF mailbox interrupt signal. */
+ uint64_t enab : 1; /**< [ 1: 1](R/W) PF to VF mailbox interrupt enable. */
+ uint64_t reserved_2_63 : 62;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_mbox_pf_vf_int_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_mbox_pf_vf_int bdk_sdpx_epfx_rx_mbox_pf_vf_int_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_MBOX_PF_VF_INT(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_MBOX_PF_VF_INT(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010220ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_MBOX_PF_VF_INT", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_MBOX_PF_VF_INT(a,b,c) bdk_sdpx_epfx_rx_mbox_pf_vf_int_t
+#define bustype_BDK_SDPX_EPFX_RX_MBOX_PF_VF_INT(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_MBOX_PF_VF_INT(a,b,c) "SDPX_EPFX_RX_MBOX_PF_VF_INT"
+#define device_bar_BDK_SDPX_EPFX_RX_MBOX_PF_VF_INT(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_MBOX_PF_VF_INT(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_MBOX_PF_VF_INT(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_mbox_rint_status
+ *
+ * SDP Mailbox Interrupt Status Register
+ * This register contains PF-\>VF mailbox interrupt status on a per-VF basis.
+ * All rings for a given VF are located in a single register. Note that access to any ring offset
+ * within a given VF will return the same value. When the PF reads any ring in this register it
+ * will return the same value (64 bits each representing one ring.)
+ */
+union bdk_sdpx_epfx_rx_mbox_rint_status
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_mbox_rint_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t intr : 64; /**< [ 63: 0](RO) Interrupt bits for VF rings (0..i). [INTR[i]] reads as one whenever a mailbox
+ interrupt has been signaled by the PF and not cleared by the VF.
+ These bits are cleared by writing SDP()_EPF()_R()_MBOX_PF_VF_INT[INTR]
+ them with a 1, or due to an FLR.
+
+ [INTR] can cause an MSI-X interrupt.
+
+ Note that "i" depends on the SDP()_EPF()_RINFO configuration. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 64; /**< [ 63: 0](RO) Interrupt bits for VF rings (0..i). [INTR[i]] reads as one whenever a mailbox
+ interrupt has been signaled by the PF and not cleared by the VF.
+ These bits are cleared by writing SDP()_EPF()_R()_MBOX_PF_VF_INT[INTR]
+ them with a 1, or due to an FLR.
+
+ [INTR] can cause an MSI-X interrupt.
+
+ Note that "i" depends on the SDP()_EPF()_RINFO configuration. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_mbox_rint_status_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_mbox_rint_status bdk_sdpx_epfx_rx_mbox_rint_status_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_MBOX_RINT_STATUS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_MBOX_RINT_STATUS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010200ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_MBOX_RINT_STATUS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_MBOX_RINT_STATUS(a,b,c) bdk_sdpx_epfx_rx_mbox_rint_status_t
+#define bustype_BDK_SDPX_EPFX_RX_MBOX_RINT_STATUS(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_MBOX_RINT_STATUS(a,b,c) "SDPX_EPFX_RX_MBOX_RINT_STATUS"
+#define device_bar_BDK_SDPX_EPFX_RX_MBOX_RINT_STATUS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_MBOX_RINT_STATUS(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_MBOX_RINT_STATUS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_mbox_vf_pf_data
+ *
+ * SDP VF to PF Mailbox Data Registers
+ * These registers are used for communication of data from the VF to PF.
+ * A write by the VF to this register will cause the corresponding bit in
+ * SDP()_MBOX_EPF()_INT to be set to be set, and an MSI-X message to be generated.
+ * To clear the interrupt condition, the PF should write a 1 to SDP()_MBOX_EPF()_INT.
+ */
+union bdk_sdpx_epfx_rx_mbox_vf_pf_data
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_mbox_vf_pf_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Communication data from VF to PF. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) Communication data from VF to PF. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_mbox_vf_pf_data_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_mbox_vf_pf_data bdk_sdpx_epfx_rx_mbox_vf_pf_data_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_MBOX_VF_PF_DATA(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_MBOX_VF_PF_DATA(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010230ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_MBOX_VF_PF_DATA", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_MBOX_VF_PF_DATA(a,b,c) bdk_sdpx_epfx_rx_mbox_vf_pf_data_t
+#define bustype_BDK_SDPX_EPFX_RX_MBOX_VF_PF_DATA(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_MBOX_VF_PF_DATA(a,b,c) "SDPX_EPFX_RX_MBOX_VF_PF_DATA"
+#define device_bar_BDK_SDPX_EPFX_RX_MBOX_VF_PF_DATA(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_MBOX_VF_PF_DATA(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_MBOX_VF_PF_DATA(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_out_byte_cnt
+ *
+ * SDP Packet Output Byte Count Register
+ * This register contains byte counts per ring that have been written to memory by SDP.
+ * The counter will wrap when it reaches its maximum value. It should be cleared
+ * before the ring is enabled for an accurate count.
+ */
+union bdk_sdpx_epfx_rx_out_byte_cnt
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_out_byte_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Byte count, can be reset by software by writing SDP()_EPF()_R()_OUT_PKT_CNT[CNT]
+ with 0xFFFFFFFFF. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 48; /**< [ 47: 0](R/W/H) Byte count, can be reset by software by writing SDP()_EPF()_R()_OUT_PKT_CNT[CNT]
+ with 0xFFFFFFFFF. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_out_byte_cnt_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_out_byte_cnt bdk_sdpx_epfx_rx_out_byte_cnt_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_BYTE_CNT(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_BYTE_CNT(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010190ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_OUT_BYTE_CNT", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_OUT_BYTE_CNT(a,b,c) bdk_sdpx_epfx_rx_out_byte_cnt_t
+#define bustype_BDK_SDPX_EPFX_RX_OUT_BYTE_CNT(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_OUT_BYTE_CNT(a,b,c) "SDPX_EPFX_RX_OUT_BYTE_CNT"
+#define device_bar_BDK_SDPX_EPFX_RX_OUT_BYTE_CNT(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_OUT_BYTE_CNT(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_OUT_BYTE_CNT(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_out_cnts
+ *
+ * SDP Packet Output Counts Register
+ * This register contains the counters for SDP output ports.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring.
+ */
+union bdk_sdpx_epfx_rx_out_cnts
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_out_cnts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_63 : 1;
+ uint64_t out_int : 1; /**< [ 62: 62](RO/H) Returns a 1 when:
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT].
+ * Or, SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET].
+
+ To clear the bit, the CNTS register must be written to clear the underlying condition. */
+ uint64_t in_int : 1; /**< [ 61: 61](RO/H) Returns a 1 when:
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT].
+
+ To clear the bit, the SDP()_EPF()_R()_IN_CNTS register must be written to clear the
+ underlying condition. */
+ uint64_t mbox_int : 1; /**< [ 60: 60](RO/H) Returns a 1 when:
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set.
+
+ To clear the bit, write SDP()_EPF()_R()_MBOX_PF_VF_INT[INTR] with 1.
+ This bit is also cleared due to an FLR. */
+ uint64_t resend : 1; /**< [ 59: 59](WO/H) A write of 1 will resend an MSI-X interrupt message if any of the following
+ conditions are true for the respective ring R():
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT],
+ * SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET],
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT],
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set. */
+ uint64_t reserved_54_58 : 5;
+ uint64_t timer : 22; /**< [ 53: 32](RO/H) Timer, incremented every 1024 coprocessor-clock cycles when [CNT] is
+ not zero. The hardware clears [TIMER] when [CNT]
+ goes to 0. The first increment of this count can occur between 0 to
+ 1023 coprocessor-clock cycles after [CNT] becomes nonzero. */
+ uint64_t cnt : 32; /**< [ 31: 0](R/W/H) Packet counter. Hardware adds to [CNT] as it sends packets out. On a write
+ to this CSR, hardware subtracts the amount written to the [CNT] field from
+ [CNT], which will clear SDP()_EPF()_R()_OUT_INT_STATUS[INTR] if [CNT] becomes \<=
+ SDP()_EPF()_R()_OUT_INT_LEVELS[CNT]. When SDP()_EPF()_R()_OUT_INT_LEVELS[BMODE] is clear,
+ the hardware adds 1 to [CNT] per packet. When SDP()_EPF()_R()_OUT_INT_LEVELS[BMODE] is
+ set,
+ the hardware adds the packet length to [CNT] per packet. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 32; /**< [ 31: 0](R/W/H) Packet counter. Hardware adds to [CNT] as it sends packets out. On a write
+ to this CSR, hardware subtracts the amount written to the [CNT] field from
+ [CNT], which will clear SDP()_EPF()_R()_OUT_INT_STATUS[INTR] if [CNT] becomes \<=
+ SDP()_EPF()_R()_OUT_INT_LEVELS[CNT]. When SDP()_EPF()_R()_OUT_INT_LEVELS[BMODE] is clear,
+ the hardware adds 1 to [CNT] per packet. When SDP()_EPF()_R()_OUT_INT_LEVELS[BMODE] is
+ set,
+ the hardware adds the packet length to [CNT] per packet. */
+ uint64_t timer : 22; /**< [ 53: 32](RO/H) Timer, incremented every 1024 coprocessor-clock cycles when [CNT] is
+ not zero. The hardware clears [TIMER] when [CNT]
+ goes to 0. The first increment of this count can occur between 0 to
+ 1023 coprocessor-clock cycles after [CNT] becomes nonzero. */
+ uint64_t reserved_54_58 : 5;
+ uint64_t resend : 1; /**< [ 59: 59](WO/H) A write of 1 will resend an MSI-X interrupt message if any of the following
+ conditions are true for the respective ring R():
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT],
+ * SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET],
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT],
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set. */
+ uint64_t mbox_int : 1; /**< [ 60: 60](RO/H) Returns a 1 when:
+ * SDP()_EPF()_R()_MBOX_RINT_STATUS[INTR] is set.
+
+ To clear the bit, write SDP()_EPF()_R()_MBOX_PF_VF_INT[INTR] with 1.
+ This bit is also cleared due to an FLR. */
+ uint64_t in_int : 1; /**< [ 61: 61](RO/H) Returns a 1 when:
+ * SDP()_EPF()_R()_IN_CNTS[CNT] \> SDP()_EPF()_R()_IN_INT_LEVELS[CNT].
+
+ To clear the bit, the SDP()_EPF()_R()_IN_CNTS register must be written to clear the
+ underlying condition. */
+ uint64_t out_int : 1; /**< [ 62: 62](RO/H) Returns a 1 when:
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT].
+ * Or, SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET].
+
+ To clear the bit, the CNTS register must be written to clear the underlying condition. */
+ uint64_t reserved_63 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_out_cnts_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_out_cnts bdk_sdpx_epfx_rx_out_cnts_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_CNTS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_CNTS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010100ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_OUT_CNTS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_OUT_CNTS(a,b,c) bdk_sdpx_epfx_rx_out_cnts_t
+#define bustype_BDK_SDPX_EPFX_RX_OUT_CNTS(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_OUT_CNTS(a,b,c) "SDPX_EPFX_RX_OUT_CNTS"
+#define device_bar_BDK_SDPX_EPFX_RX_OUT_CNTS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_OUT_CNTS(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_OUT_CNTS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_out_control
+ *
+ * SDP Packet Output Control Register
+ * This register contains control bits for output packet rings.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring. Also, this register cannot be written
+ * while either of the following conditions is true:
+ * * SDP()_EPF()_R()_OUT_CONTROL[IDLE] is clear.
+ * * Or, SDP()_EPF()_R()_OUT_ENABLE[ENB] is set.
+ */
+union bdk_sdpx_epfx_rx_out_control
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_out_control_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_37_63 : 27;
+ uint64_t idle : 1; /**< [ 36: 36](RO/H) Asserted when this ring has no packets in-flight. */
+ uint64_t es_i : 2; /**< [ 35: 34](R/W) [ES_I] is ES\<1:0\> for info buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ES\<1:0\> is the
+ endian-swap attribute for these MAC memory space writes. */
+ uint64_t nsr_i : 1; /**< [ 33: 33](R/W) [NSR] is ADDRTYPE\<1\> for info buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ADDRTYPE\<1\> is
+ the no-snoop attribute for PCIe. */
+ uint64_t ror_i : 1; /**< [ 32: 32](R/W) [ROR] is ADDRTYPE\<0\> for info buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ADDRTYPE\<0\> is
+ the relaxed-order attribute for PCIe. */
+ uint64_t es_d : 2; /**< [ 31: 30](R/W) [ES] is ES\<1:0\> for data buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ES\<1:0\> is the
+ endian-swap attribute for these MAC memory space writes. */
+ uint64_t nsr_d : 1; /**< [ 29: 29](R/W) [NSR] is ADDRTYPE\<1\> for data buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ADDRTYPE\<1\> is
+ the no-snoop attribute for PCIe. */
+ uint64_t ror_d : 1; /**< [ 28: 28](R/W) [ROR] is ADDRTYPE\<0\> for data buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ADDRTYPE\<0\> is
+ the relaxed-order attribute for PCIe. */
+ uint64_t es_p : 2; /**< [ 27: 26](R/W) [ES_P] is ES\<1:0\> for the packet output ring reads that fetch buffer/info pointer pairs
+ (from SLI_PKT()_SLIST_BADDR[ADDR]+). ES\<1:0\> is the endian-swap attribute for these
+ MAC memory space reads. */
+ uint64_t nsr_p : 1; /**< [ 25: 25](R/W) [NSR_P] is ADDRTYPE\<1\> for the packet output ring reads that fetch buffer/info pointer
+ pairs (from SLI_PKT()_SLIST_BADDR[ADDR]+). ADDRTYPE\<1\> is the no-snoop attribute for PCIe. */
+ uint64_t ror_p : 1; /**< [ 24: 24](R/W) [ROR_P] is ADDRTYPE\<0\> for the packet output ring reads that fetch buffer/info pointer
+ pairs (from SLI_PKT()_SLIST_BADDR[ADDR]+). ADDRTYPE\<0\> is the relaxed-order attribute
+ for PCIe. */
+ uint64_t imode : 1; /**< [ 23: 23](R/W) When IMODE=1, packet output ring is in info-pointer mode; otherwise the packet output ring
+ is in buffer-pointer-only mode. */
+ uint64_t isize : 7; /**< [ 22: 16](R/W/H) Info bytes size (bytes) for the output port. Legal sizes are 0 to 120. Not used
+ in buffer-pointer-only mode. If a value is written that is between 120-127 then
+ a value of 120 will be forced by hardware. */
+ uint64_t bsize : 16; /**< [ 15: 0](R/W/H) Buffer size (bytes) for the output ring. The minimum size is 128 bytes; if a value
+ smaller than 128 is written, hardware will force a value of 128. */
+#else /* Word 0 - Little Endian */
+ uint64_t bsize : 16; /**< [ 15: 0](R/W/H) Buffer size (bytes) for the output ring. The minimum size is 128 bytes; if a value
+ smaller than 128 is written, hardware will force a value of 128. */
+ uint64_t isize : 7; /**< [ 22: 16](R/W/H) Info bytes size (bytes) for the output port. Legal sizes are 0 to 120. Not used
+ in buffer-pointer-only mode. If a value is written that is between 120-127 then
+ a value of 120 will be forced by hardware. */
+ uint64_t imode : 1; /**< [ 23: 23](R/W) When IMODE=1, packet output ring is in info-pointer mode; otherwise the packet output ring
+ is in buffer-pointer-only mode. */
+ uint64_t ror_p : 1; /**< [ 24: 24](R/W) [ROR_P] is ADDRTYPE\<0\> for the packet output ring reads that fetch buffer/info pointer
+ pairs (from SLI_PKT()_SLIST_BADDR[ADDR]+). ADDRTYPE\<0\> is the relaxed-order attribute
+ for PCIe. */
+ uint64_t nsr_p : 1; /**< [ 25: 25](R/W) [NSR_P] is ADDRTYPE\<1\> for the packet output ring reads that fetch buffer/info pointer
+ pairs (from SLI_PKT()_SLIST_BADDR[ADDR]+). ADDRTYPE\<1\> is the no-snoop attribute for PCIe. */
+ uint64_t es_p : 2; /**< [ 27: 26](R/W) [ES_P] is ES\<1:0\> for the packet output ring reads that fetch buffer/info pointer pairs
+ (from SLI_PKT()_SLIST_BADDR[ADDR]+). ES\<1:0\> is the endian-swap attribute for these
+ MAC memory space reads. */
+ uint64_t ror_d : 1; /**< [ 28: 28](R/W) [ROR] is ADDRTYPE\<0\> for data buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ADDRTYPE\<0\> is
+ the relaxed-order attribute for PCIe. */
+ uint64_t nsr_d : 1; /**< [ 29: 29](R/W) [NSR] is ADDRTYPE\<1\> for data buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ADDRTYPE\<1\> is
+ the no-snoop attribute for PCIe. */
+ uint64_t es_d : 2; /**< [ 31: 30](R/W) [ES] is ES\<1:0\> for data buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ES\<1:0\> is the
+ endian-swap attribute for these MAC memory space writes. */
+ uint64_t ror_i : 1; /**< [ 32: 32](R/W) [ROR] is ADDRTYPE\<0\> for info buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ADDRTYPE\<0\> is
+ the relaxed-order attribute for PCIe. */
+ uint64_t nsr_i : 1; /**< [ 33: 33](R/W) [NSR] is ADDRTYPE\<1\> for info buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ADDRTYPE\<1\> is
+ the no-snoop attribute for PCIe. */
+ uint64_t es_i : 2; /**< [ 35: 34](R/W) [ES_I] is ES\<1:0\> for info buffer write operations to buffer/info
+ pair MAC memory space addresses fetched from packet output ring. ES\<1:0\> is the
+ endian-swap attribute for these MAC memory space writes. */
+ uint64_t idle : 1; /**< [ 36: 36](RO/H) Asserted when this ring has no packets in-flight. */
+ uint64_t reserved_37_63 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_out_control_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_out_control bdk_sdpx_epfx_rx_out_control_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_CONTROL(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_CONTROL(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010150ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_OUT_CONTROL", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_OUT_CONTROL(a,b,c) bdk_sdpx_epfx_rx_out_control_t
+#define bustype_BDK_SDPX_EPFX_RX_OUT_CONTROL(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_OUT_CONTROL(a,b,c) "SDPX_EPFX_RX_OUT_CONTROL"
+#define device_bar_BDK_SDPX_EPFX_RX_OUT_CONTROL(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_OUT_CONTROL(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_OUT_CONTROL(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_out_enable
+ *
+ * SDP Packet Output Enable Register
+ * This register is the enable for the output pointer rings.
+ */
+union bdk_sdpx_epfx_rx_out_enable
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_out_enable_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t enb : 1; /**< [ 0: 0](R/W/H) Enable for the output ring i. This bit can be cleared by hardware if certain
+ errors occur or an FLR is indicated by the remote host. It can be cleared by
+ software at any time. It cannot be set unless SDP()_EPF()_R()_OUT_CONTROL[IDLE] == 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t enb : 1; /**< [ 0: 0](R/W/H) Enable for the output ring i. This bit can be cleared by hardware if certain
+ errors occur or an FLR is indicated by the remote host. It can be cleared by
+ software at any time. It cannot be set unless SDP()_EPF()_R()_OUT_CONTROL[IDLE] == 0. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_out_enable_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_out_enable bdk_sdpx_epfx_rx_out_enable_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_ENABLE(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_ENABLE(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010160ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_OUT_ENABLE", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_OUT_ENABLE(a,b,c) bdk_sdpx_epfx_rx_out_enable_t
+#define bustype_BDK_SDPX_EPFX_RX_OUT_ENABLE(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_OUT_ENABLE(a,b,c) "SDPX_EPFX_RX_OUT_ENABLE"
+#define device_bar_BDK_SDPX_EPFX_RX_OUT_ENABLE(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_OUT_ENABLE(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_OUT_ENABLE(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_out_int_levels
+ *
+ * SDP Packet Output Interrupt Levels Register
+ * This register contains SDP output packet interrupt levels.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring.
+ */
+union bdk_sdpx_epfx_rx_out_int_levels
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_out_int_levels_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t bmode : 1; /**< [ 63: 63](R/W) Determines whether SDP()_EPF()_R()_OUT_CNTS[CNT] is a byte or packet counter. When
+ [BMODE]=1,
+ SDP()_EPF()_R()_OUT_CNTS[CNT] is a byte counter, else SDP()_EPF()_R()_OUT_CNTS[CNT] is a
+ packet
+ counter. */
+ uint64_t reserved_54_62 : 9;
+ uint64_t timet : 22; /**< [ 53: 32](R/W) Output port counter time interrupt threshold. An MSI-X interrupt will be generated
+ whenever SDP()_EPF()_R()_OUT_CNTS[TIMER] \> [TIMET]. Whenever software changes the value of
+ [TIMET], it should also subsequently write the corresponding SDP()_EPF()_R()_OUT_CNTS CSR
+ (with
+ a value of zero if desired) to ensure that the hardware correspondingly updates
+ SDP()_EPF()_R()_OUT_CNTS[OUT_INT]. */
+ uint64_t cnt : 32; /**< [ 31: 0](R/W) Output port counter interrupt threshold. An MSI-X interrupt will be generated
+ whenever SDP()_EPF()_R()_OUT_CNTS[CNT] \> [CNT]. Whenever software changes the value of
+ [CNT], it should also subsequently write the corresponding SDP()_EPF()_R()_OUT_CNTS CSR
+ (with a
+ value of zero if desired) to ensure that the hardware correspondingly updates
+ SDP()_EPF()_R()_OUT_CNTS[OUT_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 32; /**< [ 31: 0](R/W) Output port counter interrupt threshold. An MSI-X interrupt will be generated
+ whenever SDP()_EPF()_R()_OUT_CNTS[CNT] \> [CNT]. Whenever software changes the value of
+ [CNT], it should also subsequently write the corresponding SDP()_EPF()_R()_OUT_CNTS CSR
+ (with a
+ value of zero if desired) to ensure that the hardware correspondingly updates
+ SDP()_EPF()_R()_OUT_CNTS[OUT_INT]. */
+ uint64_t timet : 22; /**< [ 53: 32](R/W) Output port counter time interrupt threshold. An MSI-X interrupt will be generated
+ whenever SDP()_EPF()_R()_OUT_CNTS[TIMER] \> [TIMET]. Whenever software changes the value of
+ [TIMET], it should also subsequently write the corresponding SDP()_EPF()_R()_OUT_CNTS CSR
+ (with
+ a value of zero if desired) to ensure that the hardware correspondingly updates
+ SDP()_EPF()_R()_OUT_CNTS[OUT_INT]. */
+ uint64_t reserved_54_62 : 9;
+ uint64_t bmode : 1; /**< [ 63: 63](R/W) Determines whether SDP()_EPF()_R()_OUT_CNTS[CNT] is a byte or packet counter. When
+ [BMODE]=1,
+ SDP()_EPF()_R()_OUT_CNTS[CNT] is a byte counter, else SDP()_EPF()_R()_OUT_CNTS[CNT] is a
+ packet
+ counter. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_out_int_levels_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_out_int_levels bdk_sdpx_epfx_rx_out_int_levels_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_INT_LEVELS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_INT_LEVELS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010110ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_OUT_INT_LEVELS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_OUT_INT_LEVELS(a,b,c) bdk_sdpx_epfx_rx_out_int_levels_t
+#define bustype_BDK_SDPX_EPFX_RX_OUT_INT_LEVELS(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_OUT_INT_LEVELS(a,b,c) "SDPX_EPFX_RX_OUT_INT_LEVELS"
+#define device_bar_BDK_SDPX_EPFX_RX_OUT_INT_LEVELS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_OUT_INT_LEVELS(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_OUT_INT_LEVELS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_out_int_status
+ *
+ * SDP Output Packet Interrupt Status Register
+ * This register contains interrupt status on a per-VF basis. All rings for a given VF
+ * are located in a single register. Note that access to any ring offset within a given
+ * VF will return the same value. When the PF reads any ring in this register it will
+ * return the same value (64 bits each representing one ring.)
+ */
+union bdk_sdpx_epfx_rx_out_int_status
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_out_int_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t intr : 64; /**< [ 63: 0](RO) Packet output interrupt bit for a given VFR's ports (0..i). [INTR]\<ring\> reads
+ as one whenever for the respective ring R(ring):
+
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT].
+
+ * Or, SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET].
+
+ [INTR] can cause an MSI-X interrupt.
+
+ Internal:
+ These interrupt bits are not cleared due to FLR becase the CNTS and
+ LEVELS registers are not reset and we wish to make the interrupt state
+ consistent with CNTS/LEVELS even after FLR. The CNTS register must be
+ cleared by software as part of initialization after a reset (including FLR)
+ which will cause the interrupt state to clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t intr : 64; /**< [ 63: 0](RO) Packet output interrupt bit for a given VFR's ports (0..i). [INTR]\<ring\> reads
+ as one whenever for the respective ring R(ring):
+
+ * SDP()_EPF()_R()_OUT_CNTS[CNT] \> SDP()_EPF()_R()_OUT_INT_LEVELS[CNT].
+
+ * Or, SDP()_EPF()_R()_OUT_CNTS[TIMER] \> SDP()_EPF()_R()_OUT_INT_LEVELS[TIMET].
+
+ [INTR] can cause an MSI-X interrupt.
+
+ Internal:
+ These interrupt bits are not cleared due to FLR becase the CNTS and
+ LEVELS registers are not reset and we wish to make the interrupt state
+ consistent with CNTS/LEVELS even after FLR. The CNTS register must be
+ cleared by software as part of initialization after a reset (including FLR)
+ which will cause the interrupt state to clear. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_out_int_status_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_out_int_status bdk_sdpx_epfx_rx_out_int_status_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_INT_STATUS(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_INT_STATUS(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010170ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_OUT_INT_STATUS", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_OUT_INT_STATUS(a,b,c) bdk_sdpx_epfx_rx_out_int_status_t
+#define bustype_BDK_SDPX_EPFX_RX_OUT_INT_STATUS(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_OUT_INT_STATUS(a,b,c) "SDPX_EPFX_RX_OUT_INT_STATUS"
+#define device_bar_BDK_SDPX_EPFX_RX_OUT_INT_STATUS(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_OUT_INT_STATUS(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_OUT_INT_STATUS(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_out_pkt_cnt
+ *
+ * SDP Packet Output Packet Count Register
+ * This register contains packet counts per ring that have been written to memory by SDP.
+ * The counter will wrap when it reaches its maximum value. It should be cleared
+ * before the ring is enabled for an accurate count.
+ */
+union bdk_sdpx_epfx_rx_out_pkt_cnt
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_out_pkt_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_36_63 : 28;
+ uint64_t cnt : 36; /**< [ 35: 0](R/W/H) Packet count, can be written by software to any value. If a value of 0xFFFFFFFFF is
+ written to this field, it will cause this field as well as SDP()_EPF()_R()_OUT_BYTE_CNT to
+ clear. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 36; /**< [ 35: 0](R/W/H) Packet count, can be written by software to any value. If a value of 0xFFFFFFFFF is
+ written to this field, it will cause this field as well as SDP()_EPF()_R()_OUT_BYTE_CNT to
+ clear. */
+ uint64_t reserved_36_63 : 28;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_out_pkt_cnt_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_out_pkt_cnt bdk_sdpx_epfx_rx_out_pkt_cnt_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_PKT_CNT(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_PKT_CNT(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010180ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_OUT_PKT_CNT", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_OUT_PKT_CNT(a,b,c) bdk_sdpx_epfx_rx_out_pkt_cnt_t
+#define bustype_BDK_SDPX_EPFX_RX_OUT_PKT_CNT(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_OUT_PKT_CNT(a,b,c) "SDPX_EPFX_RX_OUT_PKT_CNT"
+#define device_bar_BDK_SDPX_EPFX_RX_OUT_PKT_CNT(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_OUT_PKT_CNT(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_OUT_PKT_CNT(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_out_slist_baddr
+ *
+ * SDP Packet Ring Base Address Register
+ * This register contains the base address for the output ring.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring. Also, this register cannot be written
+ * while either of the following conditions is true:
+ * * SDP()_EPF()_R()_OUT_CONTROL[IDLE] is clear.
+ * * Or, SDP()_EPF()_R()_OUT_ENABLE[ENB] is set.
+ */
+union bdk_sdpx_epfx_rx_out_slist_baddr
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_out_slist_baddr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t addr : 60; /**< [ 63: 4](R/W) Base address for the output ring, which is an array with
+ SDP()_EPF()_R()_OUT_SLIST_FIFO_RSIZE[RSIZE] entries, each entry being a
+ SDP_BUF_INFO_PAIR_S.
+
+ SDP()_EPF()_R()_OUT_SLIST_BADDR contains a byte address that must be 16-byte
+ aligned, so SDP()_EPF()_R()_OUT_SLIST_BADDR\<3:0\> must be zero. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t addr : 60; /**< [ 63: 4](R/W) Base address for the output ring, which is an array with
+ SDP()_EPF()_R()_OUT_SLIST_FIFO_RSIZE[RSIZE] entries, each entry being a
+ SDP_BUF_INFO_PAIR_S.
+
+ SDP()_EPF()_R()_OUT_SLIST_BADDR contains a byte address that must be 16-byte
+ aligned, so SDP()_EPF()_R()_OUT_SLIST_BADDR\<3:0\> must be zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_out_slist_baddr_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_out_slist_baddr bdk_sdpx_epfx_rx_out_slist_baddr_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_SLIST_BADDR(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_SLIST_BADDR(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010120ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_OUT_SLIST_BADDR", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_OUT_SLIST_BADDR(a,b,c) bdk_sdpx_epfx_rx_out_slist_baddr_t
+#define bustype_BDK_SDPX_EPFX_RX_OUT_SLIST_BADDR(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_OUT_SLIST_BADDR(a,b,c) "SDPX_EPFX_RX_OUT_SLIST_BADDR"
+#define device_bar_BDK_SDPX_EPFX_RX_OUT_SLIST_BADDR(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_OUT_SLIST_BADDR(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_OUT_SLIST_BADDR(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_out_slist_dbell
+ *
+ * SDP Packet Base-Address Offset and Doorbell Registers
+ * This register contains the doorbell and base-address offset for the next read operation.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring.
+ */
+union bdk_sdpx_epfx_rx_out_slist_dbell
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_out_slist_dbell_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t aoff : 32; /**< [ 63: 32](RO/H) Address offset. The offset from the SDP()_EPF()_R()_OUT_SLIST_BADDR where the next pointer
+ is read.
+ A write of 0xFFFFFFFF to [DBELL] clears [DBELL] and [AOFF]. */
+ uint64_t dbell : 32; /**< [ 31: 0](R/W/H) Pointer pair list doorbell count. Write operations to this field increments the present
+ value here. Read operations return the present value. The value of this field is
+ decremented as read operations are issued for scatter pointers. A write of 0xFFFFFFFF
+ to this field clears [DBELL] and [AOFF]. The value of this field is in number of
+ SDP_BUF_INFO_PAIR_S's. This register should be cleared before enabling a ring. */
+#else /* Word 0 - Little Endian */
+ uint64_t dbell : 32; /**< [ 31: 0](R/W/H) Pointer pair list doorbell count. Write operations to this field increments the present
+ value here. Read operations return the present value. The value of this field is
+ decremented as read operations are issued for scatter pointers. A write of 0xFFFFFFFF
+ to this field clears [DBELL] and [AOFF]. The value of this field is in number of
+ SDP_BUF_INFO_PAIR_S's. This register should be cleared before enabling a ring. */
+ uint64_t aoff : 32; /**< [ 63: 32](RO/H) Address offset. The offset from the SDP()_EPF()_R()_OUT_SLIST_BADDR where the next pointer
+ is read.
+ A write of 0xFFFFFFFF to [DBELL] clears [DBELL] and [AOFF]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_out_slist_dbell_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_out_slist_dbell bdk_sdpx_epfx_rx_out_slist_dbell_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_SLIST_DBELL(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_SLIST_DBELL(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010140ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_OUT_SLIST_DBELL", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_OUT_SLIST_DBELL(a,b,c) bdk_sdpx_epfx_rx_out_slist_dbell_t
+#define bustype_BDK_SDPX_EPFX_RX_OUT_SLIST_DBELL(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_OUT_SLIST_DBELL(a,b,c) "SDPX_EPFX_RX_OUT_SLIST_DBELL"
+#define device_bar_BDK_SDPX_EPFX_RX_OUT_SLIST_DBELL(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_OUT_SLIST_DBELL(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_OUT_SLIST_DBELL(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_out_slist_rsize
+ *
+ * SDP Packet Ring Size Register
+ * This register contains the output packet ring size.
+ * This register is not affected by reset (including FLR) and must be initialized
+ * by the VF prior to enabling the ring. Also, this register cannot be written
+ * while either of the following conditions is true:
+ * * SDP()_EPF()_R()_OUT_CONTROL[IDLE] is clear.
+ * * Or, SDP()_EPF()_R()_OUT_ENABLE[ENB] is set.
+ */
+union bdk_sdpx_epfx_rx_out_slist_rsize
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_out_slist_rsize_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t rsize : 32; /**< [ 31: 0](R/W/H) Ring size (number of SDP_BUF_INFO_PAIR_S's). This value must be 16 or
+ greater. If a value is written that is less than 16, then hardware
+ will force a value of 16 to be written. */
+#else /* Word 0 - Little Endian */
+ uint64_t rsize : 32; /**< [ 31: 0](R/W/H) Ring size (number of SDP_BUF_INFO_PAIR_S's). This value must be 16 or
+ greater. If a value is written that is less than 16, then hardware
+ will force a value of 16 to be written. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_out_slist_rsize_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_out_slist_rsize bdk_sdpx_epfx_rx_out_slist_rsize_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_SLIST_RSIZE(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_OUT_SLIST_RSIZE(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010130ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_OUT_SLIST_RSIZE", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_OUT_SLIST_RSIZE(a,b,c) bdk_sdpx_epfx_rx_out_slist_rsize_t
+#define bustype_BDK_SDPX_EPFX_RX_OUT_SLIST_RSIZE(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_OUT_SLIST_RSIZE(a,b,c) "SDPX_EPFX_RX_OUT_SLIST_RSIZE"
+#define device_bar_BDK_SDPX_EPFX_RX_OUT_SLIST_RSIZE(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_OUT_SLIST_RSIZE(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_OUT_SLIST_RSIZE(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_epf#_r#_vf_num
+ *
+ * SDP Ring Error Type Register
+ * These registers provide the virtual function number for each ring (both input and
+ * output). They must be programmed by the PF along with SDP()_EPF()_RINFO before
+ * the given ring is enabled. They are not accessible by the VF.
+ *
+ * All 64 registers associated with an EPF will be reset due to a PF FLR or MAC Reset.
+ * These registers are not affected by VF FLR.
+ */
+union bdk_sdpx_epfx_rx_vf_num
+{
+ uint64_t u;
+ struct bdk_sdpx_epfx_rx_vf_num_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t vf_num : 7; /**< [ 6: 0](R/W) The function that the ring belongs to. If equal to 0, the ring belongs
+ to the physical function. If nonzero, this field is the virtual function
+ that the ring belongs to.
+
+ [VF_NUM] configuration must match SDP()_EPF()_RINFO configuration.
+
+ [VF_NUM] applies to the ring pair, which includes both this input
+ ring and to the output ring of the same index. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_num : 7; /**< [ 6: 0](R/W) The function that the ring belongs to. If equal to 0, the ring belongs
+ to the physical function. If nonzero, this field is the virtual function
+ that the ring belongs to.
+
+ [VF_NUM] configuration must match SDP()_EPF()_RINFO configuration.
+
+ [VF_NUM] applies to the ring pair, which includes both this input
+ ring and to the output ring of the same index. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_epfx_rx_vf_num_s cn; */
+};
+typedef union bdk_sdpx_epfx_rx_vf_num bdk_sdpx_epfx_rx_vf_num_t;
+
+static inline uint64_t BDK_SDPX_EPFX_RX_VF_NUM(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_EPFX_RX_VF_NUM(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1) && (c<=63)))
+ return 0x874080010500ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1) + 0x20000ll * ((c) & 0x3f);
+ __bdk_csr_fatal("SDPX_EPFX_RX_VF_NUM", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SDPX_EPFX_RX_VF_NUM(a,b,c) bdk_sdpx_epfx_rx_vf_num_t
+#define bustype_BDK_SDPX_EPFX_RX_VF_NUM(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_EPFX_RX_VF_NUM(a,b,c) "SDPX_EPFX_RX_VF_NUM"
+#define device_bar_BDK_SDPX_EPFX_RX_VF_NUM(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_EPFX_RX_VF_NUM(a,b,c) (a)
+#define arguments_BDK_SDPX_EPFX_RX_VF_NUM(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sdp#_scratch#
+ *
+ * SDP Scratch Register
+ * These registers are general purpose 64-bit scratch registers for software use.
+ */
+union bdk_sdpx_scratchx
+{
+ uint64_t u;
+ struct bdk_sdpx_scratchx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) The value in this register is totally software defined. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) The value in this register is totally software defined. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_sdpx_scratchx_s cn; */
+};
+typedef union bdk_sdpx_scratchx bdk_sdpx_scratchx_t;
+
+static inline uint64_t BDK_SDPX_SCRATCHX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SDPX_SCRATCHX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080020180ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SDPX_SCRATCHX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SDPX_SCRATCHX(a,b) bdk_sdpx_scratchx_t
+#define bustype_BDK_SDPX_SCRATCHX(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SDPX_SCRATCHX(a,b) "SDPX_SCRATCHX"
+#define device_bar_BDK_SDPX_SCRATCHX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SDPX_SCRATCHX(a,b) (a)
+#define arguments_BDK_SDPX_SCRATCHX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_bar3_addr
+ *
+ * SLI BAR3 Address Register
+ * This register configures PEM BAR3 accesses.
+ */
+union bdk_slix_bar3_addr
+{
+ uint64_t u;
+ struct bdk_slix_bar3_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wvirt : 1; /**< [ 63: 63](R/W) Virtual:
+ 0 = [RD_ADDR] is a physical addresses.
+ 1 = [RD_ADDR] is a virtual address. */
+ uint64_t reserved_49_62 : 14;
+ uint64_t rd_addr : 30; /**< [ 48: 19](R/W) Base address for PEM BAR3 transactions that is appended to the 512KB offset.
+ The reset value is the PEM base address of the EPROM,
+ PEM()_EROM(). */
+ uint64_t reserved_0_18 : 19;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_18 : 19;
+ uint64_t rd_addr : 30; /**< [ 48: 19](R/W) Base address for PEM BAR3 transactions that is appended to the 512KB offset.
+ The reset value is the PEM base address of the EPROM,
+ PEM()_EROM(). */
+ uint64_t reserved_49_62 : 14;
+ uint64_t wvirt : 1; /**< [ 63: 63](R/W) Virtual:
+ 0 = [RD_ADDR] is a physical addresses.
+ 1 = [RD_ADDR] is a virtual address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_bar3_addr_s cn; */
+};
+typedef union bdk_slix_bar3_addr bdk_slix_bar3_addr_t;
+
+static inline uint64_t BDK_SLIX_BAR3_ADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_BAR3_ADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002400ll + 0x1000000000ll * ((a) & 0x0);
+ __bdk_csr_fatal("SLIX_BAR3_ADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_BAR3_ADDR(a) bdk_slix_bar3_addr_t
+#define bustype_BDK_SLIX_BAR3_ADDR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_BAR3_ADDR(a) "SLIX_BAR3_ADDR"
+#define device_bar_BDK_SLIX_BAR3_ADDR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_BAR3_ADDR(a) (a)
+#define arguments_BDK_SLIX_BAR3_ADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_bist_status
+ *
+ * SLI BIST Status Register
+ * This register contains results from BIST runs of MAC's memories: 0 = pass (or BIST in
+ * progress/never run), 1 = fail.
+ */
+union bdk_slix_bist_status
+{
+ uint64_t u;
+ struct bdk_slix_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t status : 31; /**< [ 30: 0](RO) BIST status.
+ Internal:
+ 22 = sli_nod_nfif_bstatus.
+ 21 = csr_region_mem_bstatus.
+ 20 = sncf0_ffifo_bstatus.
+ 19 = sndfh0_ffifo_bstatus.
+ 18 = sndfl0_ffifo_bstatus.
+ 17 = sncf1_ffifo_bstatus.
+ 16 = sndfh1_ffifo_bstatus.
+ 15 = sndfl1_ffifo_bstatus.
+ 14 = sncf2_ffifo_bstatus.
+ 13 = sndfh2_ffifo_bstatus.
+ 12 = sndfl2_ffifo_bstatus.
+ 11 = p2n_port0_tlp_cpl_fifo_bstatus.
+ 10 = p2n_port0_tlp_n_fifo_bstatus.
+ 9 = p2n_port0_tlp_p_fifo_bstatus.
+ 8 = p2n_port1_tlp_cpl_fifo_bstatus.
+ 7 = p2n_port1_tlp_n_fifo_bstatus.
+ 6 = p2n_port1_tlp_p_fifo_bstatus.
+ 5 = p2n_port2_tlp_cpl_fifo_bstatus.
+ 4 = p2n_port2_tlp_n_fifo_bstatus.
+ 3 = p2n_port2_tlp_p_fifo_bstatus.
+ 2 = cpl0_fifo_bstatus.
+ 1 = cpl1_fifo_bstatus.
+ 0 = cpl2_fifo_bstatus. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 31; /**< [ 30: 0](RO) BIST status.
+ Internal:
+ 22 = sli_nod_nfif_bstatus.
+ 21 = csr_region_mem_bstatus.
+ 20 = sncf0_ffifo_bstatus.
+ 19 = sndfh0_ffifo_bstatus.
+ 18 = sndfl0_ffifo_bstatus.
+ 17 = sncf1_ffifo_bstatus.
+ 16 = sndfh1_ffifo_bstatus.
+ 15 = sndfl1_ffifo_bstatus.
+ 14 = sncf2_ffifo_bstatus.
+ 13 = sndfh2_ffifo_bstatus.
+ 12 = sndfl2_ffifo_bstatus.
+ 11 = p2n_port0_tlp_cpl_fifo_bstatus.
+ 10 = p2n_port0_tlp_n_fifo_bstatus.
+ 9 = p2n_port0_tlp_p_fifo_bstatus.
+ 8 = p2n_port1_tlp_cpl_fifo_bstatus.
+ 7 = p2n_port1_tlp_n_fifo_bstatus.
+ 6 = p2n_port1_tlp_p_fifo_bstatus.
+ 5 = p2n_port2_tlp_cpl_fifo_bstatus.
+ 4 = p2n_port2_tlp_n_fifo_bstatus.
+ 3 = p2n_port2_tlp_p_fifo_bstatus.
+ 2 = cpl0_fifo_bstatus.
+ 1 = cpl1_fifo_bstatus.
+ 0 = cpl2_fifo_bstatus. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_bist_status_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_23_63 : 41;
+ uint64_t status : 23; /**< [ 22: 0](RO) BIST status.
+ Internal:
+ 22 = sli_nod_nfif_bstatus.
+ 21 = csr_region_mem_bstatus.
+ 20 = sncf0_ffifo_bstatus.
+ 19 = sndfh0_ffifo_bstatus.
+ 18 = sndfl0_ffifo_bstatus.
+ 17 = sncf1_ffifo_bstatus.
+ 16 = sndfh1_ffifo_bstatus.
+ 15 = sndfl1_ffifo_bstatus.
+ 14 = sncf2_ffifo_bstatus.
+ 13 = sndfh2_ffifo_bstatus.
+ 12 = sndfl2_ffifo_bstatus.
+ 11 = p2n_port0_tlp_cpl_fifo_bstatus.
+ 10 = p2n_port0_tlp_n_fifo_bstatus.
+ 9 = p2n_port0_tlp_p_fifo_bstatus.
+ 8 = p2n_port1_tlp_cpl_fifo_bstatus.
+ 7 = p2n_port1_tlp_n_fifo_bstatus.
+ 6 = p2n_port1_tlp_p_fifo_bstatus.
+ 5 = p2n_port2_tlp_cpl_fifo_bstatus.
+ 4 = p2n_port2_tlp_n_fifo_bstatus.
+ 3 = p2n_port2_tlp_p_fifo_bstatus.
+ 2 = cpl0_fifo_bstatus.
+ 1 = cpl1_fifo_bstatus.
+ 0 = cpl2_fifo_bstatus. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 23; /**< [ 22: 0](RO) BIST status.
+ Internal:
+ 22 = sli_nod_nfif_bstatus.
+ 21 = csr_region_mem_bstatus.
+ 20 = sncf0_ffifo_bstatus.
+ 19 = sndfh0_ffifo_bstatus.
+ 18 = sndfl0_ffifo_bstatus.
+ 17 = sncf1_ffifo_bstatus.
+ 16 = sndfh1_ffifo_bstatus.
+ 15 = sndfl1_ffifo_bstatus.
+ 14 = sncf2_ffifo_bstatus.
+ 13 = sndfh2_ffifo_bstatus.
+ 12 = sndfl2_ffifo_bstatus.
+ 11 = p2n_port0_tlp_cpl_fifo_bstatus.
+ 10 = p2n_port0_tlp_n_fifo_bstatus.
+ 9 = p2n_port0_tlp_p_fifo_bstatus.
+ 8 = p2n_port1_tlp_cpl_fifo_bstatus.
+ 7 = p2n_port1_tlp_n_fifo_bstatus.
+ 6 = p2n_port1_tlp_p_fifo_bstatus.
+ 5 = p2n_port2_tlp_cpl_fifo_bstatus.
+ 4 = p2n_port2_tlp_n_fifo_bstatus.
+ 3 = p2n_port2_tlp_p_fifo_bstatus.
+ 2 = cpl0_fifo_bstatus.
+ 1 = cpl1_fifo_bstatus.
+ 0 = cpl2_fifo_bstatus. */
+ uint64_t reserved_23_63 : 41;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_slix_bist_status_cn81xx cn88xx; */
+ struct bdk_slix_bist_status_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_31_63 : 33;
+ uint64_t status : 31; /**< [ 30: 0](RO/H) BIST status. One bit per memory, enumerated by SLI_RAMS_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t status : 31; /**< [ 30: 0](RO/H) BIST status. One bit per memory, enumerated by SLI_RAMS_E. */
+ uint64_t reserved_31_63 : 33;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_bist_status bdk_slix_bist_status_t;
+
+static inline uint64_t BDK_SLIX_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002180ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002180ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x874001002180ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_BIST_STATUS(a) bdk_slix_bist_status_t
+#define bustype_BDK_SLIX_BIST_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_BIST_STATUS(a) "SLIX_BIST_STATUS"
+#define device_bar_BDK_SLIX_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_BIST_STATUS(a) (a)
+#define arguments_BDK_SLIX_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_const
+ *
+ * SLI Constants Register
+ * This register contains constants for software discovery.
+ */
+union bdk_slix_const
+{
+ uint64_t u;
+ struct bdk_slix_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t pems : 32; /**< [ 31: 0](RO) Bit mask of which PEMs are connected to this SLI.
+ If PEMs are fuse disabled they will still appear in this register.
+
+ E.g. for a single SLI connected to PEM0, PEM1 and PEM2 is 0x7. If PEM1 is fuse
+ disabled, still is 0x7, because software needs to continue to know that PEM2
+ remains MAC number 2 as far as the SLI registers, e.g. SLI()_S2M_MAC()_CTL, are
+ concerned. */
+#else /* Word 0 - Little Endian */
+ uint64_t pems : 32; /**< [ 31: 0](RO) Bit mask of which PEMs are connected to this SLI.
+ If PEMs are fuse disabled they will still appear in this register.
+
+ E.g. for a single SLI connected to PEM0, PEM1 and PEM2 is 0x7. If PEM1 is fuse
+ disabled, still is 0x7, because software needs to continue to know that PEM2
+ remains MAC number 2 as far as the SLI registers, e.g. SLI()_S2M_MAC()_CTL, are
+ concerned. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_const_s cn; */
+};
+typedef union bdk_slix_const bdk_slix_const_t;
+
+static inline uint64_t BDK_SLIX_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002020ll + 0x1000000000ll * ((a) & 0x0);
+ __bdk_csr_fatal("SLIX_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_CONST(a) bdk_slix_const_t
+#define bustype_BDK_SLIX_CONST(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_CONST(a) "SLIX_CONST"
+#define device_bar_BDK_SLIX_CONST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_CONST(a) (a)
+#define arguments_BDK_SLIX_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_const1
+ *
+ * SLI Constants Register 1
+ * This register contains constants for software discovery.
+ */
+union bdk_slix_const1
+{
+ uint64_t u;
+ struct bdk_slix_const1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_const1_s cn; */
+};
+typedef union bdk_slix_const1 bdk_slix_const1_t;
+
+static inline uint64_t BDK_SLIX_CONST1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_CONST1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002030ll + 0x1000000000ll * ((a) & 0x0);
+ __bdk_csr_fatal("SLIX_CONST1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_CONST1(a) bdk_slix_const1_t
+#define bustype_BDK_SLIX_CONST1(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_CONST1(a) "SLIX_CONST1"
+#define device_bar_BDK_SLIX_CONST1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_CONST1(a) (a)
+#define arguments_BDK_SLIX_CONST1(a) (a),-1,-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_data_out_cnt#
+ *
+ * SLI Data Out Count Register
+ * This register contains the EXEC data out FIFO count and the data unload counter.
+ */
+union bdk_slix_data_out_cntx
+{
+ uint64_t u;
+ struct bdk_slix_data_out_cntx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t ucnt : 16; /**< [ 23: 8](RO/H) FIFO unload count. This counter is incremented by 1 every time a word is removed from
+ data out FIFO, whose count is shown in [FCNT]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fcnt : 6; /**< [ 5: 0](RO/H) FIFO data out count. Number of address data words presently buffered in the FIFO. */
+#else /* Word 0 - Little Endian */
+ uint64_t fcnt : 6; /**< [ 5: 0](RO/H) FIFO data out count. Number of address data words presently buffered in the FIFO. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ucnt : 16; /**< [ 23: 8](RO/H) FIFO unload count. This counter is incremented by 1 every time a word is removed from
+ data out FIFO, whose count is shown in [FCNT]. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_data_out_cntx_s cn; */
+};
+typedef union bdk_slix_data_out_cntx bdk_slix_data_out_cntx_t;
+
+static inline uint64_t BDK_SLIX_DATA_OUT_CNTX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_DATA_OUT_CNTX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=2)))
+ return 0x874000001080ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=2)))
+ return 0x874000001080ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_DATA_OUT_CNTX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_DATA_OUT_CNTX(a,b) bdk_slix_data_out_cntx_t
+#define bustype_BDK_SLIX_DATA_OUT_CNTX(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_DATA_OUT_CNTX(a,b) "SLIX_DATA_OUT_CNTX"
+#define device_bar_BDK_SLIX_DATA_OUT_CNTX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_DATA_OUT_CNTX(a,b) (a)
+#define arguments_BDK_SLIX_DATA_OUT_CNTX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_eco
+ *
+ * INTERNAL: SLI ECO Register
+ */
+union bdk_slix_eco
+{
+ uint64_t u;
+ struct bdk_slix_eco_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) Internal:
+ Reserved for ECO usage. */
+#else /* Word 0 - Little Endian */
+ uint64_t eco_rw : 32; /**< [ 31: 0](R/W) Internal:
+ Reserved for ECO usage. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_eco_s cn; */
+};
+typedef union bdk_slix_eco bdk_slix_eco_t;
+
+static inline uint64_t BDK_SLIX_ECO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_ECO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002800ll + 0x1000000000ll * ((a) & 0x0);
+ __bdk_csr_fatal("SLIX_ECO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_ECO(a) bdk_slix_eco_t
+#define bustype_BDK_SLIX_ECO(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_ECO(a) "SLIX_ECO"
+#define device_bar_BDK_SLIX_ECO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_ECO(a) (a)
+#define arguments_BDK_SLIX_ECO(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_end_merge
+ *
+ * SLI End Merge Register
+ * Writing this register will cause a merge to end.
+ */
+union bdk_slix_end_merge
+{
+ uint64_t u;
+ struct bdk_slix_end_merge_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_end_merge_s cn; */
+};
+typedef union bdk_slix_end_merge bdk_slix_end_merge_t;
+
+static inline uint64_t BDK_SLIX_END_MERGE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_END_MERGE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002300ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002300ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x874001002300ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_END_MERGE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_END_MERGE(a) bdk_slix_end_merge_t
+#define bustype_BDK_SLIX_END_MERGE(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_END_MERGE(a) "SLIX_END_MERGE"
+#define device_bar_BDK_SLIX_END_MERGE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_END_MERGE(a) (a)
+#define arguments_BDK_SLIX_END_MERGE(a) (a),-1,-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_data_out_cnt
+ *
+ * SLI Data Out Count Register
+ * This register contains the EXEC data out FIFO count and the data unload counter.
+ */
+union bdk_slix_epfx_data_out_cnt
+{
+ uint64_t u;
+ struct bdk_slix_epfx_data_out_cnt_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_24_63 : 40;
+ uint64_t ucnt : 16; /**< [ 23: 8](RO/H) FIFO unload count. This counter is incremented by 1 every time a word is removed from
+ data out FIFO, whose count is shown in [FCNT]. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t fcnt : 6; /**< [ 5: 0](RO/H) FIFO data out count. Number of address data words presently buffered in the FIFO. */
+#else /* Word 0 - Little Endian */
+ uint64_t fcnt : 6; /**< [ 5: 0](RO/H) FIFO data out count. Number of address data words presently buffered in the FIFO. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t ucnt : 16; /**< [ 23: 8](RO/H) FIFO unload count. This counter is incremented by 1 every time a word is removed from
+ data out FIFO, whose count is shown in [FCNT]. */
+ uint64_t reserved_24_63 : 40;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_data_out_cnt_s cn; */
+};
+typedef union bdk_slix_epfx_data_out_cnt bdk_slix_epfx_data_out_cnt_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DATA_OUT_CNT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DATA_OUT_CNT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080028120ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_DATA_OUT_CNT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DATA_OUT_CNT(a,b) bdk_slix_epfx_data_out_cnt_t
+#define bustype_BDK_SLIX_EPFX_DATA_OUT_CNT(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DATA_OUT_CNT(a,b) "SLIX_EPFX_DATA_OUT_CNT"
+#define device_bar_BDK_SLIX_EPFX_DATA_OUT_CNT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DATA_OUT_CNT(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DATA_OUT_CNT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_cnt#
+ *
+ * SLI DMA Count Registers
+ * These registers contain the DMA count values.
+ */
+union bdk_slix_epfx_dma_cntx
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_cntx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t cnt : 32; /**< [ 31: 0](R/W/H) The DMA counter. SLI/DPI hardware subtracts the written value from
+ the counter whenever software writes this CSR. SLI/DPI hardware increments this
+ counter after completing an OUTBOUND or EXTERNAL-ONLY DMA instruction
+ with DPI_DMA_INSTR_HDR_S[CA] set DPI_DMA_INSTR_HDR_S[CSEL] equal to this
+ CSR index. These increments may cause interrupts.
+ See SLI_EPF()_DMA_INT_LEVEL() and SLI_EPF()_DMA_RINT[DCNT,DTIME]. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 32; /**< [ 31: 0](R/W/H) The DMA counter. SLI/DPI hardware subtracts the written value from
+ the counter whenever software writes this CSR. SLI/DPI hardware increments this
+ counter after completing an OUTBOUND or EXTERNAL-ONLY DMA instruction
+ with DPI_DMA_INSTR_HDR_S[CA] set DPI_DMA_INSTR_HDR_S[CSEL] equal to this
+ CSR index. These increments may cause interrupts.
+ See SLI_EPF()_DMA_INT_LEVEL() and SLI_EPF()_DMA_RINT[DCNT,DTIME]. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_cntx_s cn; */
+};
+typedef union bdk_slix_epfx_dma_cntx bdk_slix_epfx_dma_cntx_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_CNTX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_CNTX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3) && (c<=1)))
+ return 0x874080028680ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3) + 0x10ll * ((c) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_CNTX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_CNTX(a,b,c) bdk_slix_epfx_dma_cntx_t
+#define bustype_BDK_SLIX_EPFX_DMA_CNTX(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_CNTX(a,b,c) "SLIX_EPFX_DMA_CNTX"
+#define device_bar_BDK_SLIX_EPFX_DMA_CNTX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_CNTX(a,b,c) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_CNTX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_int_level#
+ *
+ * SLI DMA Interrupt Level Registers
+ * These registers contain the thresholds for DMA count and timer interrupts.
+ */
+union bdk_slix_epfx_dma_int_levelx
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_int_levelx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tim : 32; /**< [ 63: 32](R/W) Whenever the SLI_EPF()_DMA_TIM()[TIM] timer exceeds this value,
+ SLI_EPF()_DMA_RINT[DTIME\<x\>] is set. The SLI_EPF()_DMA_TIM()[TIM] timer
+ increments every SLI clock whenever SLI_EPF()_DMA_CNT()[CNT] != 0, and is cleared
+ when SLI_EPF()_DMA_CNT()[CNT] is written to a non zero value. */
+ uint64_t cnt : 32; /**< [ 31: 0](R/W) Whenever SLI_EPF()_DMA_CNT()[CNT] exceeds this value, SLI_EPF()_DMA_RINT[DCNT\<x\>]
+ is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t cnt : 32; /**< [ 31: 0](R/W) Whenever SLI_EPF()_DMA_CNT()[CNT] exceeds this value, SLI_EPF()_DMA_RINT[DCNT\<x\>]
+ is set. */
+ uint64_t tim : 32; /**< [ 63: 32](R/W) Whenever the SLI_EPF()_DMA_TIM()[TIM] timer exceeds this value,
+ SLI_EPF()_DMA_RINT[DTIME\<x\>] is set. The SLI_EPF()_DMA_TIM()[TIM] timer
+ increments every SLI clock whenever SLI_EPF()_DMA_CNT()[CNT] != 0, and is cleared
+ when SLI_EPF()_DMA_CNT()[CNT] is written to a non zero value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_int_levelx_s cn; */
+};
+typedef union bdk_slix_epfx_dma_int_levelx bdk_slix_epfx_dma_int_levelx_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_INT_LEVELX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_INT_LEVELX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3) && (c<=1)))
+ return 0x874080028600ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3) + 0x10ll * ((c) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_INT_LEVELX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_INT_LEVELX(a,b,c) bdk_slix_epfx_dma_int_levelx_t
+#define bustype_BDK_SLIX_EPFX_DMA_INT_LEVELX(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_INT_LEVELX(a,b,c) "SLIX_EPFX_DMA_INT_LEVELX"
+#define device_bar_BDK_SLIX_EPFX_DMA_INT_LEVELX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_INT_LEVELX(a,b,c) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_INT_LEVELX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_rint
+ *
+ * SLI/DPI DTIME/DCNT/DMAFI Interrupt Registers
+ * These registers contain interrupts related to the DPI DMA engines.
+ * The given register associated with an EPF will be reset due to a PF FLR or MAC reset.
+ * These registers are not affected by VF FLR.
+ */
+union bdk_slix_epfx_dma_rint
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_rint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dtime : 2; /**< [ 5: 4](R/W1C/H) Whenever SLI_EPF()_DMA_CNT()[CNT] is not 0, the SLI_EPF()_DMA_TIM()[TIM]
+ timer increments every SLI clock. [DTIME]\<x\> is set whenever
+ SLI_EPF()_DMA_TIM()[TIM] \> SLI_EPF()_DMA_INT_LEVEL()[TIM].
+ [DTIME]\<x\> is cleared when writing a non zero value to SLI_EPF()_DMA_CNT()[CNT]
+ causing SLI_EPF()_DMA_TIM()[TIM] to clear to 0 and
+ SLI_EPF()_DMA_TIM()[TIM] to fall below SLI_EPF()_DMA_INT_LEVEL()[TIM]. */
+ uint64_t dcnt : 2; /**< [ 3: 2](R/W1C/H) [DCNT]\<x\> is set whenever SLI_EPF()_DMA_CNT()[CNT] \> SLI_EPF()_DMA_INT_LEVEL()[CNT].
+ [DCNT]\<x\> is normally cleared by decreasing SLI_EPF()_DMA_CNT()[CNT]. */
+ uint64_t dmafi : 2; /**< [ 1: 0](R/W1C/H) DMA set forced interrupts. Set by SLI/DPI after completing a DPI DMA
+ Instruction with DPI_DMA_INSTR_HDR_S[FI] set. */
+#else /* Word 0 - Little Endian */
+ uint64_t dmafi : 2; /**< [ 1: 0](R/W1C/H) DMA set forced interrupts. Set by SLI/DPI after completing a DPI DMA
+ Instruction with DPI_DMA_INSTR_HDR_S[FI] set. */
+ uint64_t dcnt : 2; /**< [ 3: 2](R/W1C/H) [DCNT]\<x\> is set whenever SLI_EPF()_DMA_CNT()[CNT] \> SLI_EPF()_DMA_INT_LEVEL()[CNT].
+ [DCNT]\<x\> is normally cleared by decreasing SLI_EPF()_DMA_CNT()[CNT]. */
+ uint64_t dtime : 2; /**< [ 5: 4](R/W1C/H) Whenever SLI_EPF()_DMA_CNT()[CNT] is not 0, the SLI_EPF()_DMA_TIM()[TIM]
+ timer increments every SLI clock. [DTIME]\<x\> is set whenever
+ SLI_EPF()_DMA_TIM()[TIM] \> SLI_EPF()_DMA_INT_LEVEL()[TIM].
+ [DTIME]\<x\> is cleared when writing a non zero value to SLI_EPF()_DMA_CNT()[CNT]
+ causing SLI_EPF()_DMA_TIM()[TIM] to clear to 0 and
+ SLI_EPF()_DMA_TIM()[TIM] to fall below SLI_EPF()_DMA_INT_LEVEL()[TIM]. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_rint_s cn; */
+};
+typedef union bdk_slix_epfx_dma_rint bdk_slix_epfx_dma_rint_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_RINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_RINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080028500ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_RINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_RINT(a,b) bdk_slix_epfx_dma_rint_t
+#define bustype_BDK_SLIX_EPFX_DMA_RINT(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_RINT(a,b) "SLIX_EPFX_DMA_RINT"
+#define device_bar_BDK_SLIX_EPFX_DMA_RINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_RINT(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_RINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_rint_ena_w1c
+ *
+ * SLI/DPI DTIME/DCNT/DMAFI Interrupt Remote Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_slix_epfx_dma_rint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_rint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dtime : 2; /**< [ 5: 4](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_DMA_RINT[DTIME]. */
+ uint64_t dcnt : 2; /**< [ 3: 2](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_DMA_RINT[DCNT]. */
+ uint64_t dmafi : 2; /**< [ 1: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_DMA_RINT[DMAFI]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dmafi : 2; /**< [ 1: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_DMA_RINT[DMAFI]. */
+ uint64_t dcnt : 2; /**< [ 3: 2](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_DMA_RINT[DCNT]. */
+ uint64_t dtime : 2; /**< [ 5: 4](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_DMA_RINT[DTIME]. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_rint_ena_w1c_s cn; */
+};
+typedef union bdk_slix_epfx_dma_rint_ena_w1c bdk_slix_epfx_dma_rint_ena_w1c_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_RINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_RINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080028540ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_RINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_RINT_ENA_W1C(a,b) bdk_slix_epfx_dma_rint_ena_w1c_t
+#define bustype_BDK_SLIX_EPFX_DMA_RINT_ENA_W1C(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_RINT_ENA_W1C(a,b) "SLIX_EPFX_DMA_RINT_ENA_W1C"
+#define device_bar_BDK_SLIX_EPFX_DMA_RINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_RINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_RINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_rint_ena_w1s
+ *
+ * SLI/DPI DTIME/DCNT/DMAFI Interrupt Remote Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_slix_epfx_dma_rint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_rint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dtime : 2; /**< [ 5: 4](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_DMA_RINT[DTIME]. */
+ uint64_t dcnt : 2; /**< [ 3: 2](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_DMA_RINT[DCNT]. */
+ uint64_t dmafi : 2; /**< [ 1: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_DMA_RINT[DMAFI]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dmafi : 2; /**< [ 1: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_DMA_RINT[DMAFI]. */
+ uint64_t dcnt : 2; /**< [ 3: 2](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_DMA_RINT[DCNT]. */
+ uint64_t dtime : 2; /**< [ 5: 4](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_DMA_RINT[DTIME]. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_rint_ena_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_dma_rint_ena_w1s bdk_slix_epfx_dma_rint_ena_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_RINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_RINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080028550ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_RINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_RINT_ENA_W1S(a,b) bdk_slix_epfx_dma_rint_ena_w1s_t
+#define bustype_BDK_SLIX_EPFX_DMA_RINT_ENA_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_RINT_ENA_W1S(a,b) "SLIX_EPFX_DMA_RINT_ENA_W1S"
+#define device_bar_BDK_SLIX_EPFX_DMA_RINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_RINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_RINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_rint_w1s
+ *
+ * SLI/DPI DTIME/DCNT/DMAFI Interrupt Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_slix_epfx_dma_rint_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_rint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dtime : 2; /**< [ 5: 4](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_DMA_RINT[DTIME]. */
+ uint64_t dcnt : 2; /**< [ 3: 2](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_DMA_RINT[DCNT]. */
+ uint64_t dmafi : 2; /**< [ 1: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_DMA_RINT[DMAFI]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dmafi : 2; /**< [ 1: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_DMA_RINT[DMAFI]. */
+ uint64_t dcnt : 2; /**< [ 3: 2](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_DMA_RINT[DCNT]. */
+ uint64_t dtime : 2; /**< [ 5: 4](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_DMA_RINT[DTIME]. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_rint_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_dma_rint_w1s bdk_slix_epfx_dma_rint_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_RINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_RINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080028510ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_RINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_RINT_W1S(a,b) bdk_slix_epfx_dma_rint_w1s_t
+#define bustype_BDK_SLIX_EPFX_DMA_RINT_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_RINT_W1S(a,b) "SLIX_EPFX_DMA_RINT_W1S"
+#define device_bar_BDK_SLIX_EPFX_DMA_RINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_RINT_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_RINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_tim#
+ *
+ * SLI DMA Timer Registers
+ * These registers contain the DMA timer values.
+ */
+union bdk_slix_epfx_dma_timx
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_timx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t tim : 32; /**< [ 31: 0](RO/H) The DMA timer value. The timer increments when
+ SLI_EPF()_DMA_CNT()[CNT]!=0 and clears when SLI_EPF()_DMA_RINT[DTIME\<x\>] is written with
+ one. */
+#else /* Word 0 - Little Endian */
+ uint64_t tim : 32; /**< [ 31: 0](RO/H) The DMA timer value. The timer increments when
+ SLI_EPF()_DMA_CNT()[CNT]!=0 and clears when SLI_EPF()_DMA_RINT[DTIME\<x\>] is written with
+ one. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_timx_s cn; */
+};
+typedef union bdk_slix_epfx_dma_timx bdk_slix_epfx_dma_timx_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_TIMX(unsigned long a, unsigned long b, unsigned long c) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_TIMX(unsigned long a, unsigned long b, unsigned long c)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3) && (c<=1)))
+ return 0x874080028700ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3) + 0x10ll * ((c) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_TIMX", 3, a, b, c, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_TIMX(a,b,c) bdk_slix_epfx_dma_timx_t
+#define bustype_BDK_SLIX_EPFX_DMA_TIMX(a,b,c) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_TIMX(a,b,c) "SLIX_EPFX_DMA_TIMX"
+#define device_bar_BDK_SLIX_EPFX_DMA_TIMX(a,b,c) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_TIMX(a,b,c) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_TIMX(a,b,c) (a),(b),(c),-1
+
+/**
+ * Register (NCB) sli#_epf#_dma_vf_lint
+ *
+ * SLI DMA Error Response VF Bit Array Registers
+ * When an error response is received for a VF PP transaction read, the appropriate VF indexed
+ * bit is set. The appropriate PF should read the appropriate register.
+ * These registers are only valid for PEM0 PF0 and PEM2 PF0.
+ */
+union bdk_slix_epfx_dma_vf_lint
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_vf_lint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) When an error response is received for a VF DMA transaction read, the appropriate VF
+ indexed bit is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) When an error response is received for a VF DMA transaction read, the appropriate VF
+ indexed bit is set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_vf_lint_s cn; */
+};
+typedef union bdk_slix_epfx_dma_vf_lint bdk_slix_epfx_dma_vf_lint_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_LINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_LINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000002000ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_VF_LINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_VF_LINT(a,b) bdk_slix_epfx_dma_vf_lint_t
+#define bustype_BDK_SLIX_EPFX_DMA_VF_LINT(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_DMA_VF_LINT(a,b) "SLIX_EPFX_DMA_VF_LINT"
+#define device_bar_BDK_SLIX_EPFX_DMA_VF_LINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_VF_LINT(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_VF_LINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_dma_vf_lint_ena_w1c
+ *
+ * SLI DMA Error Response VF Bit Array Local Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_slix_epfx_dma_vf_lint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_vf_lint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..1)_DMA_VF_LINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..1)_DMA_VF_LINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_vf_lint_ena_w1c_s cn; */
+};
+typedef union bdk_slix_epfx_dma_vf_lint_ena_w1c bdk_slix_epfx_dma_vf_lint_ena_w1c_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000002200ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_VF_LINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1C(a,b) bdk_slix_epfx_dma_vf_lint_ena_w1c_t
+#define bustype_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1C(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1C(a,b) "SLIX_EPFX_DMA_VF_LINT_ENA_W1C"
+#define device_bar_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_dma_vf_lint_ena_w1s
+ *
+ * SLI DMA Error Response VF Bit Array Local Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_slix_epfx_dma_vf_lint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_vf_lint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..1)_DMA_VF_LINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..1)_DMA_VF_LINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_vf_lint_ena_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_dma_vf_lint_ena_w1s bdk_slix_epfx_dma_vf_lint_ena_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000002300ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_VF_LINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1S(a,b) bdk_slix_epfx_dma_vf_lint_ena_w1s_t
+#define bustype_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1S(a,b) "SLIX_EPFX_DMA_VF_LINT_ENA_W1S"
+#define device_bar_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_VF_LINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_dma_vf_lint_w1s
+ *
+ * SLI DMA Error Response VF Bit Array Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_slix_epfx_dma_vf_lint_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_vf_lint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..1)_DMA_VF_LINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..1)_DMA_VF_LINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_vf_lint_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_dma_vf_lint_w1s bdk_slix_epfx_dma_vf_lint_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_LINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_LINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000002100ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_VF_LINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_VF_LINT_W1S(a,b) bdk_slix_epfx_dma_vf_lint_w1s_t
+#define bustype_BDK_SLIX_EPFX_DMA_VF_LINT_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_DMA_VF_LINT_W1S(a,b) "SLIX_EPFX_DMA_VF_LINT_W1S"
+#define device_bar_BDK_SLIX_EPFX_DMA_VF_LINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_VF_LINT_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_VF_LINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_vf_rint
+ *
+ * SLI DMA Error Response VF Bit Array Registers
+ * When an error response is received for a VF PP transaction read, the appropriate VF indexed
+ * bit is set. The appropriate PF should read the appropriate register.
+ * The given register associated with an EPF will be reset due to a PF FLR or MAC reset.
+ * These registers are not affected by VF FLR.
+ * These registers are only valid for PEM0 PF0 and PEM2 PF0.
+ */
+union bdk_slix_epfx_dma_vf_rint
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_vf_rint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) When an error response is received for a VF DMA transaction read, the appropriate VF
+ indexed bit is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) When an error response is received for a VF DMA transaction read, the appropriate VF
+ indexed bit is set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_vf_rint_s cn; */
+};
+typedef union bdk_slix_epfx_dma_vf_rint bdk_slix_epfx_dma_vf_rint_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_RINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_RINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080028400ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_VF_RINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_VF_RINT(a,b) bdk_slix_epfx_dma_vf_rint_t
+#define bustype_BDK_SLIX_EPFX_DMA_VF_RINT(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_VF_RINT(a,b) "SLIX_EPFX_DMA_VF_RINT"
+#define device_bar_BDK_SLIX_EPFX_DMA_VF_RINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_VF_RINT(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_VF_RINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_vf_rint_ena_w1c
+ *
+ * SLI DMA Error Response VF Bit Array Local Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_slix_epfx_dma_vf_rint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_vf_rint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..1)_DMA_VF_RINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..1)_DMA_VF_RINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_vf_rint_ena_w1c_s cn; */
+};
+typedef union bdk_slix_epfx_dma_vf_rint_ena_w1c bdk_slix_epfx_dma_vf_rint_ena_w1c_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080028420ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_VF_RINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1C(a,b) bdk_slix_epfx_dma_vf_rint_ena_w1c_t
+#define bustype_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1C(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1C(a,b) "SLIX_EPFX_DMA_VF_RINT_ENA_W1C"
+#define device_bar_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_vf_rint_ena_w1s
+ *
+ * SLI DMA Error Response VF Bit Array Local Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_slix_epfx_dma_vf_rint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_vf_rint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..1)_DMA_VF_RINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..1)_DMA_VF_RINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_vf_rint_ena_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_dma_vf_rint_ena_w1s bdk_slix_epfx_dma_vf_rint_ena_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080028430ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_VF_RINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1S(a,b) bdk_slix_epfx_dma_vf_rint_ena_w1s_t
+#define bustype_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1S(a,b) "SLIX_EPFX_DMA_VF_RINT_ENA_W1S"
+#define device_bar_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_VF_RINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_dma_vf_rint_w1s
+ *
+ * SLI DMA Error Response VF Bit Array Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_slix_epfx_dma_vf_rint_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_dma_vf_rint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..1)_DMA_VF_RINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..1)_DMA_VF_RINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_dma_vf_rint_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_dma_vf_rint_w1s bdk_slix_epfx_dma_vf_rint_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_RINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_DMA_VF_RINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874080028410ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_DMA_VF_RINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_DMA_VF_RINT_W1S(a,b) bdk_slix_epfx_dma_vf_rint_w1s_t
+#define bustype_BDK_SLIX_EPFX_DMA_VF_RINT_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_DMA_VF_RINT_W1S(a,b) "SLIX_EPFX_DMA_VF_RINT_W1S"
+#define device_bar_BDK_SLIX_EPFX_DMA_VF_RINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_DMA_VF_RINT_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_DMA_VF_RINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_misc_lint
+ *
+ * SLI MAC Interrupt Summary Register
+ * This register contains the different interrupt-summary bits for one MAC in the SLI.
+ * This set of interrupt registers are aliased to SLI(0)_MAC(0..3)_INT_SUM.
+ * SLI(0)_EPF(0..3)_MISC_LINT_W1S aliases to SLI(0)_MAC(0..3)_INT_SUM_W1S.
+ * SLI(0)_EPF(0..3)_MISC_LINT_ENA_W1C aliases to SLI(0)_MAC(0..3)_INT_ENA_W1C.
+ * SLI(0)_EPF(0..3)_MISC_LINT_ENA_W1S aliases to SLI(0)_MAC(0..3)_INT_ENA_W1S.
+ */
+union bdk_slix_epfx_misc_lint
+{
+ uint64_t u;
+ struct bdk_slix_epfx_misc_lint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t flr : 1; /**< [ 6: 6](R/W1C/H) A FLR occurred for the PF on the corresponding MAC. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1C/H) An error response was received for a PF DMA transaction read. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1C/H) Set when an error response is received for a PF PP transaction read. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Received unsupported N-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access occurs. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Received unsupported N-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Received unsupported P-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access
+ occurs. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Received unsupported P-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Received unsupported P-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Received unsupported P-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access
+ occurs. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Received unsupported N-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Received unsupported N-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access occurs. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1C/H) Set when an error response is received for a PF PP transaction read. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1C/H) An error response was received for a PF DMA transaction read. */
+ uint64_t flr : 1; /**< [ 6: 6](R/W1C/H) A FLR occurred for the PF on the corresponding MAC. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_misc_lint_s cn; */
+};
+typedef union bdk_slix_epfx_misc_lint bdk_slix_epfx_misc_lint_t;
+
+static inline uint64_t BDK_SLIX_EPFX_MISC_LINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_MISC_LINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874000002400ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_MISC_LINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_MISC_LINT(a,b) bdk_slix_epfx_misc_lint_t
+#define bustype_BDK_SLIX_EPFX_MISC_LINT(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_MISC_LINT(a,b) "SLIX_EPFX_MISC_LINT"
+#define device_bar_BDK_SLIX_EPFX_MISC_LINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_MISC_LINT(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_MISC_LINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_misc_lint_ena_w1c
+ *
+ * SLI MAC Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_slix_epfx_misc_lint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_slix_epfx_misc_lint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t flr : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[FLR]. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[DMAPF_ERR]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[PPPF_ERR]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[UN_WI]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[PPPF_ERR]. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[DMAPF_ERR]. */
+ uint64_t flr : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_LINT[FLR]. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_misc_lint_ena_w1c_s cn; */
+};
+typedef union bdk_slix_epfx_misc_lint_ena_w1c bdk_slix_epfx_misc_lint_ena_w1c_t;
+
+static inline uint64_t BDK_SLIX_EPFX_MISC_LINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_MISC_LINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874000002600ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_MISC_LINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_MISC_LINT_ENA_W1C(a,b) bdk_slix_epfx_misc_lint_ena_w1c_t
+#define bustype_BDK_SLIX_EPFX_MISC_LINT_ENA_W1C(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_MISC_LINT_ENA_W1C(a,b) "SLIX_EPFX_MISC_LINT_ENA_W1C"
+#define device_bar_BDK_SLIX_EPFX_MISC_LINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_MISC_LINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_MISC_LINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_misc_lint_ena_w1s
+ *
+ * SLI MAC Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_slix_epfx_misc_lint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_misc_lint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t flr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[FLR]. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[DMAPF_ERR]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[PPPF_ERR]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[UN_WI]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[PPPF_ERR]. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[DMAPF_ERR]. */
+ uint64_t flr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_LINT[FLR]. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_misc_lint_ena_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_misc_lint_ena_w1s bdk_slix_epfx_misc_lint_ena_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_MISC_LINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_MISC_LINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874000002700ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_MISC_LINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_MISC_LINT_ENA_W1S(a,b) bdk_slix_epfx_misc_lint_ena_w1s_t
+#define bustype_BDK_SLIX_EPFX_MISC_LINT_ENA_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_MISC_LINT_ENA_W1S(a,b) "SLIX_EPFX_MISC_LINT_ENA_W1S"
+#define device_bar_BDK_SLIX_EPFX_MISC_LINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_MISC_LINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_MISC_LINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_misc_lint_w1s
+ *
+ * SLI MAC Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_slix_epfx_misc_lint_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_misc_lint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_7_63 : 57;
+ uint64_t flr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[FLR]. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[DMAPF_ERR]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[PPPF_ERR]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[UN_WI]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[PPPF_ERR]. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[DMAPF_ERR]. */
+ uint64_t flr : 1; /**< [ 6: 6](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_LINT[FLR]. */
+ uint64_t reserved_7_63 : 57;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_misc_lint_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_misc_lint_w1s bdk_slix_epfx_misc_lint_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_MISC_LINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_MISC_LINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874000002500ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_MISC_LINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_MISC_LINT_W1S(a,b) bdk_slix_epfx_misc_lint_w1s_t
+#define bustype_BDK_SLIX_EPFX_MISC_LINT_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_MISC_LINT_W1S(a,b) "SLIX_EPFX_MISC_LINT_W1S"
+#define device_bar_BDK_SLIX_EPFX_MISC_LINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_MISC_LINT_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_MISC_LINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_misc_rint
+ *
+ * SLI MAC Interrupt Summary Register
+ * This register contains the different interrupt-summary bits for one MAC in the SLI.
+ * The given register associated with an EPF will be reset due to a PF FLR or MAC reset.
+ * These registers are not affected by VF FLR.
+ */
+union bdk_slix_epfx_misc_rint
+{
+ uint64_t u;
+ struct bdk_slix_epfx_misc_rint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1C/H) Set when an error response is received for a PF DMA transaction read. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1C/H) Set when an error response is received for a PF PP transaction read. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Received unsupported N-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access
+ occurs. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Received unsupported N-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Received unsupported P-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access
+ occurs. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Received unsupported P-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Received unsupported P-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Received unsupported P-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access
+ occurs. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Received unsupported N-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Received unsupported N-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access
+ occurs. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1C/H) Set when an error response is received for a PF PP transaction read. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1C/H) Set when an error response is received for a PF DMA transaction read. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_misc_rint_s cn; */
+};
+typedef union bdk_slix_epfx_misc_rint bdk_slix_epfx_misc_rint_t;
+
+static inline uint64_t BDK_SLIX_EPFX_MISC_RINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_MISC_RINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080028240ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_MISC_RINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_MISC_RINT(a,b) bdk_slix_epfx_misc_rint_t
+#define bustype_BDK_SLIX_EPFX_MISC_RINT(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_MISC_RINT(a,b) "SLIX_EPFX_MISC_RINT"
+#define device_bar_BDK_SLIX_EPFX_MISC_RINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_MISC_RINT(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_MISC_RINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_misc_rint_ena_w1c
+ *
+ * SLI MAC Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_slix_epfx_misc_rint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_slix_epfx_misc_rint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[DMAPF_ERR]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[PPPF_ERR]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[UN_WI]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[PPPF_ERR]. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..3)_MISC_RINT[DMAPF_ERR]. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_misc_rint_ena_w1c_s cn; */
+};
+typedef union bdk_slix_epfx_misc_rint_ena_w1c bdk_slix_epfx_misc_rint_ena_w1c_t;
+
+static inline uint64_t BDK_SLIX_EPFX_MISC_RINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_MISC_RINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080028260ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_MISC_RINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_MISC_RINT_ENA_W1C(a,b) bdk_slix_epfx_misc_rint_ena_w1c_t
+#define bustype_BDK_SLIX_EPFX_MISC_RINT_ENA_W1C(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_MISC_RINT_ENA_W1C(a,b) "SLIX_EPFX_MISC_RINT_ENA_W1C"
+#define device_bar_BDK_SLIX_EPFX_MISC_RINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_MISC_RINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_MISC_RINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_misc_rint_ena_w1s
+ *
+ * SLI MAC Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_slix_epfx_misc_rint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_misc_rint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[DMAPF_ERR]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[PPPF_ERR]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[UN_WI]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[PPPF_ERR]. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..3)_MISC_RINT[DMAPF_ERR]. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_misc_rint_ena_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_misc_rint_ena_w1s bdk_slix_epfx_misc_rint_ena_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_MISC_RINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_MISC_RINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080028270ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_MISC_RINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_MISC_RINT_ENA_W1S(a,b) bdk_slix_epfx_misc_rint_ena_w1s_t
+#define bustype_BDK_SLIX_EPFX_MISC_RINT_ENA_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_MISC_RINT_ENA_W1S(a,b) "SLIX_EPFX_MISC_RINT_ENA_W1S"
+#define device_bar_BDK_SLIX_EPFX_MISC_RINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_MISC_RINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_MISC_RINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_misc_rint_w1s
+ *
+ * SLI MAC Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_slix_epfx_misc_rint_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_misc_rint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_6_63 : 58;
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[DMAPF_ERR]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[PPPF_ERR]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[UN_WI]. */
+ uint64_t pppf_err : 1; /**< [ 4: 4](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[PPPF_ERR]. */
+ uint64_t dmapf_err : 1; /**< [ 5: 5](R/W1S/H) Reads or sets SLI(0)_EPF(0..3)_MISC_RINT[DMAPF_ERR]. */
+ uint64_t reserved_6_63 : 58;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_misc_rint_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_misc_rint_w1s bdk_slix_epfx_misc_rint_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_MISC_RINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_MISC_RINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080028250ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_MISC_RINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_MISC_RINT_W1S(a,b) bdk_slix_epfx_misc_rint_w1s_t
+#define bustype_BDK_SLIX_EPFX_MISC_RINT_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_MISC_RINT_W1S(a,b) "SLIX_EPFX_MISC_RINT_W1S"
+#define device_bar_BDK_SLIX_EPFX_MISC_RINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_MISC_RINT_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_MISC_RINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_pp_vf_lint
+ *
+ * SLI PP Error Response VF Bit Array Registers
+ * When an error response is received for a VF PP transaction read, the appropriate VF indexed
+ * bit is set. The appropriate PF should read the appropriate register.
+ * These registers are only valid for PEM0 PF0 and PEM2 PF0.
+ */
+union bdk_slix_epfx_pp_vf_lint
+{
+ uint64_t u;
+ struct bdk_slix_epfx_pp_vf_lint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) When an error response is received for a VF PP transaction read, the appropriate VF
+ indexed bit is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) When an error response is received for a VF PP transaction read, the appropriate VF
+ indexed bit is set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_pp_vf_lint_s cn; */
+};
+typedef union bdk_slix_epfx_pp_vf_lint bdk_slix_epfx_pp_vf_lint_t;
+
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_LINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_LINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000002800ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_PP_VF_LINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_PP_VF_LINT(a,b) bdk_slix_epfx_pp_vf_lint_t
+#define bustype_BDK_SLIX_EPFX_PP_VF_LINT(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_PP_VF_LINT(a,b) "SLIX_EPFX_PP_VF_LINT"
+#define device_bar_BDK_SLIX_EPFX_PP_VF_LINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_PP_VF_LINT(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_PP_VF_LINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_pp_vf_lint_ena_w1c
+ *
+ * SLI PP Error Response VF Bit Array Local Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_slix_epfx_pp_vf_lint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_slix_epfx_pp_vf_lint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..1)_PP_VF_LINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..1)_PP_VF_LINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_pp_vf_lint_ena_w1c_s cn; */
+};
+typedef union bdk_slix_epfx_pp_vf_lint_ena_w1c bdk_slix_epfx_pp_vf_lint_ena_w1c_t;
+
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000002a00ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_PP_VF_LINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1C(a,b) bdk_slix_epfx_pp_vf_lint_ena_w1c_t
+#define bustype_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1C(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1C(a,b) "SLIX_EPFX_PP_VF_LINT_ENA_W1C"
+#define device_bar_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_pp_vf_lint_ena_w1s
+ *
+ * SLI PP Error Response VF Bit Array Local Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_slix_epfx_pp_vf_lint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_pp_vf_lint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..1)_PP_VF_LINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..1)_PP_VF_LINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_pp_vf_lint_ena_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_pp_vf_lint_ena_w1s bdk_slix_epfx_pp_vf_lint_ena_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000002b00ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_PP_VF_LINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1S(a,b) bdk_slix_epfx_pp_vf_lint_ena_w1s_t
+#define bustype_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1S(a,b) "SLIX_EPFX_PP_VF_LINT_ENA_W1S"
+#define device_bar_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_PP_VF_LINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_epf#_pp_vf_lint_w1s
+ *
+ * SLI PP Error Response VF Bit Array Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_slix_epfx_pp_vf_lint_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_pp_vf_lint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..1)_PP_VF_LINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..1)_PP_VF_LINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_pp_vf_lint_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_pp_vf_lint_w1s bdk_slix_epfx_pp_vf_lint_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_LINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_LINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x874000002900ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_PP_VF_LINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_PP_VF_LINT_W1S(a,b) bdk_slix_epfx_pp_vf_lint_w1s_t
+#define bustype_BDK_SLIX_EPFX_PP_VF_LINT_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_EPFX_PP_VF_LINT_W1S(a,b) "SLIX_EPFX_PP_VF_LINT_W1S"
+#define device_bar_BDK_SLIX_EPFX_PP_VF_LINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_PP_VF_LINT_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_PP_VF_LINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_pp_vf_rint
+ *
+ * SLI PP Error Response VF Bit Array Registers
+ * When an error response is received for a VF PP transaction read, the appropriate VF indexed
+ * bit is set. The appropriate PF should read the appropriate register.
+ * The given register associated with an EPF will be reset due to a PF FLR or MAC reset.
+ * These registers are not affected by VF FLR.
+ * These registers are only valid for PEM0 PF0 and PEM2 PF0.
+ */
+union bdk_slix_epfx_pp_vf_rint
+{
+ uint64_t u;
+ struct bdk_slix_epfx_pp_vf_rint_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) When an error response is received for a VF PP transaction read, the appropriate VF
+ indexed bit is set. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) When an error response is received for a VF PP transaction read, the appropriate VF
+ indexed bit is set. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_pp_vf_rint_s cn; */
+};
+typedef union bdk_slix_epfx_pp_vf_rint bdk_slix_epfx_pp_vf_rint_t;
+
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_RINT(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_RINT(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x8740800282c0ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_PP_VF_RINT", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_PP_VF_RINT(a,b) bdk_slix_epfx_pp_vf_rint_t
+#define bustype_BDK_SLIX_EPFX_PP_VF_RINT(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_PP_VF_RINT(a,b) "SLIX_EPFX_PP_VF_RINT"
+#define device_bar_BDK_SLIX_EPFX_PP_VF_RINT(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_PP_VF_RINT(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_PP_VF_RINT(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_pp_vf_rint_ena_w1c
+ *
+ * SLI PP Error Response VF Bit Array Remote Enable Clear Registers
+ * This register clears interrupt enable bits.
+ */
+union bdk_slix_epfx_pp_vf_rint_ena_w1c
+{
+ uint64_t u;
+ struct bdk_slix_epfx_pp_vf_rint_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..1)_PP_VF_RINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1C/H) Reads or clears enable for SLI(0)_EPF(0..1)_PP_VF_RINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_pp_vf_rint_ena_w1c_s cn; */
+};
+typedef union bdk_slix_epfx_pp_vf_rint_ena_w1c bdk_slix_epfx_pp_vf_rint_ena_w1c_t;
+
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x8740800282e0ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_PP_VF_RINT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1C(a,b) bdk_slix_epfx_pp_vf_rint_ena_w1c_t
+#define bustype_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1C(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1C(a,b) "SLIX_EPFX_PP_VF_RINT_ENA_W1C"
+#define device_bar_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_pp_vf_rint_ena_w1s
+ *
+ * SLI PP Error Response VF Bit Array Remote Enable Set Registers
+ * This register sets interrupt enable bits.
+ */
+union bdk_slix_epfx_pp_vf_rint_ena_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_pp_vf_rint_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..1)_PP_VF_RINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets enable for SLI(0)_EPF(0..1)_PP_VF_RINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_pp_vf_rint_ena_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_pp_vf_rint_ena_w1s bdk_slix_epfx_pp_vf_rint_ena_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x8740800282f0ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_PP_VF_RINT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1S(a,b) bdk_slix_epfx_pp_vf_rint_ena_w1s_t
+#define bustype_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1S(a,b) "SLIX_EPFX_PP_VF_RINT_ENA_W1S"
+#define device_bar_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_PP_VF_RINT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_pp_vf_rint_w1s
+ *
+ * SLI PP Error Response VF Bit Array Set Registers
+ * This register sets interrupt bits.
+ */
+union bdk_slix_epfx_pp_vf_rint_w1s
+{
+ uint64_t u;
+ struct bdk_slix_epfx_pp_vf_rint_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..1)_PP_VF_RINT[VF_INT]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vf_int : 64; /**< [ 63: 0](R/W1S/H) Reads or sets SLI(0)_EPF(0..1)_PP_VF_RINT[VF_INT]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_pp_vf_rint_w1s_s cn; */
+};
+typedef union bdk_slix_epfx_pp_vf_rint_w1s bdk_slix_epfx_pp_vf_rint_w1s_t;
+
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_RINT_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_PP_VF_RINT_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=1)))
+ return 0x8740800282d0ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x1);
+ __bdk_csr_fatal("SLIX_EPFX_PP_VF_RINT_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_PP_VF_RINT_W1S(a,b) bdk_slix_epfx_pp_vf_rint_w1s_t
+#define bustype_BDK_SLIX_EPFX_PP_VF_RINT_W1S(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_PP_VF_RINT_W1S(a,b) "SLIX_EPFX_PP_VF_RINT_W1S"
+#define device_bar_BDK_SLIX_EPFX_PP_VF_RINT_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_PP_VF_RINT_W1S(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_PP_VF_RINT_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_epf#_scratch
+ *
+ * SLI Scratch Register
+ * These registers are general purpose 64-bit scratch registers for software use.
+ */
+union bdk_slix_epfx_scratch
+{
+ uint64_t u;
+ struct bdk_slix_epfx_scratch_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) The value in this register is totally software defined. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) The value in this register is totally software defined. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_epfx_scratch_s cn; */
+};
+typedef union bdk_slix_epfx_scratch bdk_slix_epfx_scratch_t;
+
+static inline uint64_t BDK_SLIX_EPFX_SCRATCH(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_EPFX_SCRATCH(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874080028100ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_EPFX_SCRATCH", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_EPFX_SCRATCH(a,b) bdk_slix_epfx_scratch_t
+#define bustype_BDK_SLIX_EPFX_SCRATCH(a,b) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_EPFX_SCRATCH(a,b) "SLIX_EPFX_SCRATCH"
+#define device_bar_BDK_SLIX_EPFX_SCRATCH(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_EPFX_SCRATCH(a,b) (a)
+#define arguments_BDK_SLIX_EPFX_SCRATCH(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_lmac_const0#
+ *
+ * SLI Logical MAC Capabilities Register 0
+ * These registers along with SLI()_LMAC_CONST1() create a table of logical MAC
+ * capabilities. Each entry is 128 bits, with half the information in SLI()_LMAC_CONST0()
+ * and half in SLI()_LMAC_CONST1().
+ * The list ends with an entry where [V] is clear.
+ *
+ * Internal:
+ * For CN81XX the table is as follows:
+ * * SLI(0)_LMAC_CONST0/1(0) [ V=1 EP=0 IFTY=0 IFN=0 MAC=0 PF=0 EPF=0 VFS=0 RINGS=0 ].
+ * * SLI(0)_LMAC_CONST0/1(1) [ V=1 EP=0 IFTY=0 IFN=1 MAC=1 PF=0 EPF=1 VFS=0 RINGS=0 ].
+ * * SLI(0)_LMAC_CONST0/1(2) [ V=1 EP=0 IFTY=0 IFN=2 MAC=2 PF=0 EPF=2 VFS=0 RINGS=0 ].
+ * * SLI(0)_LMAC_CONST0/1(3) [ V=0 ].
+ *
+ * For CN83XX the table is as follows:
+ * * SLI(0)_LMAC_CONST0/1(0) [ V=1 EP=1 IFTY=0 IFN=0 MAC=0 PF=0 EPF=0 VFS=64 RINGS=64 ].
+ * * SLI(0)_LMAC_CONST0/1(1) [ V=1 EP=1 IFTY=0 IFN=1 MAC=1 PF=0 EPF=2 VFS=0 RINGS=0 ].
+ * * SLI(0)_LMAC_CONST0/1(2) [ V=1 EP=1 IFTY=0 IFN=2 MAC=2 PF=0 EPF=1 VFS=64 RINGS=64 ].
+ * * SLI(0)_LMAC_CONST0/1(3) [ V=1 EP=1 IFTY=0 IFN=3 MAC=3 PF=0 EPF=3 VFS=0 RINGS=0 ].
+ * * SLI(0)_LMAC_CONST0/1(4) [ V=0 ].
+ */
+union bdk_slix_lmac_const0x
+{
+ uint64_t u;
+ struct bdk_slix_lmac_const0x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_40_63 : 24;
+ uint64_t epf : 8; /**< [ 39: 32](RO) EPF number. Indicates the index number to EPF registers, e.g. the second index
+ of SDP()_EPF()_MBOX_RINT. */
+ uint64_t pf : 8; /**< [ 31: 24](RO) Physical function number. Indicates the PF number as viewed from the external
+ PCI bus. */
+ uint64_t mac : 8; /**< [ 23: 16](RO) Relative MAC number. Indicates the index number to MAC registers, e.g. the
+ second index of SLI()_S2M_MAC()_CTL. */
+ uint64_t ifn : 8; /**< [ 15: 8](RO) Interface number. Indicates the physical PEM number. */
+ uint64_t ifty : 4; /**< [ 7: 4](RO) Interface type.
+ 0x0 = PEM. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t ep : 1; /**< [ 1: 1](RO) Endpoint.
+ 0 = This MAC/PF does not support endpoint mode; many registers are not
+ implemented including input and output ring-based registers. MSI-X message
+ generation is also not implemented.
+ 1 = This MAC/PF combination supports endpoint mode. */
+ uint64_t v : 1; /**< [ 0: 0](RO) Valid entry.
+ 0 = Fields in this register will all be zero. This ends the list of capabilities.
+ 1 = Fields are valid. There will be at least one subsequent list entry. */
+#else /* Word 0 - Little Endian */
+ uint64_t v : 1; /**< [ 0: 0](RO) Valid entry.
+ 0 = Fields in this register will all be zero. This ends the list of capabilities.
+ 1 = Fields are valid. There will be at least one subsequent list entry. */
+ uint64_t ep : 1; /**< [ 1: 1](RO) Endpoint.
+ 0 = This MAC/PF does not support endpoint mode; many registers are not
+ implemented including input and output ring-based registers. MSI-X message
+ generation is also not implemented.
+ 1 = This MAC/PF combination supports endpoint mode. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t ifty : 4; /**< [ 7: 4](RO) Interface type.
+ 0x0 = PEM. */
+ uint64_t ifn : 8; /**< [ 15: 8](RO) Interface number. Indicates the physical PEM number. */
+ uint64_t mac : 8; /**< [ 23: 16](RO) Relative MAC number. Indicates the index number to MAC registers, e.g. the
+ second index of SLI()_S2M_MAC()_CTL. */
+ uint64_t pf : 8; /**< [ 31: 24](RO) Physical function number. Indicates the PF number as viewed from the external
+ PCI bus. */
+ uint64_t epf : 8; /**< [ 39: 32](RO) EPF number. Indicates the index number to EPF registers, e.g. the second index
+ of SDP()_EPF()_MBOX_RINT. */
+ uint64_t reserved_40_63 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_lmac_const0x_s cn; */
+};
+typedef union bdk_slix_lmac_const0x bdk_slix_lmac_const0x_t;
+
+static inline uint64_t BDK_SLIX_LMAC_CONST0X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_LMAC_CONST0X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=4)))
+ return 0x874001004000ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=4)))
+ return 0x874001004000ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x7);
+ __bdk_csr_fatal("SLIX_LMAC_CONST0X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_LMAC_CONST0X(a,b) bdk_slix_lmac_const0x_t
+#define bustype_BDK_SLIX_LMAC_CONST0X(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_LMAC_CONST0X(a,b) "SLIX_LMAC_CONST0X"
+#define device_bar_BDK_SLIX_LMAC_CONST0X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_LMAC_CONST0X(a,b) (a)
+#define arguments_BDK_SLIX_LMAC_CONST0X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_lmac_const1#
+ *
+ * SLI Logical MAC Capabilities Register 1
+ * See SLI()_LMAC_CONST0().
+ */
+union bdk_slix_lmac_const1x
+{
+ uint64_t u;
+ struct bdk_slix_lmac_const1x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t rings : 16; /**< [ 31: 16](RO) Number of rings.
+ If [EP] is set then this field indicates the number of rings assigned
+ to the physical function (which can also be shared with its associated
+ virtual functions by means of the SLI()_EPF()_RINFO register.)
+ If [EP] is clear then this field will be zero. */
+ uint64_t vfs : 16; /**< [ 15: 0](RO) Number of virtual functions.
+ The maximum number that may be programmed into SLI()_S2M_REG()_ACC2[VF]. */
+#else /* Word 0 - Little Endian */
+ uint64_t vfs : 16; /**< [ 15: 0](RO) Number of virtual functions.
+ The maximum number that may be programmed into SLI()_S2M_REG()_ACC2[VF]. */
+ uint64_t rings : 16; /**< [ 31: 16](RO) Number of rings.
+ If [EP] is set then this field indicates the number of rings assigned
+ to the physical function (which can also be shared with its associated
+ virtual functions by means of the SLI()_EPF()_RINFO register.)
+ If [EP] is clear then this field will be zero. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_lmac_const1x_s cn; */
+};
+typedef union bdk_slix_lmac_const1x bdk_slix_lmac_const1x_t;
+
+static inline uint64_t BDK_SLIX_LMAC_CONST1X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_LMAC_CONST1X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=4)))
+ return 0x874001004008ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x7);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=4)))
+ return 0x874001004008ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x7);
+ __bdk_csr_fatal("SLIX_LMAC_CONST1X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_LMAC_CONST1X(a,b) bdk_slix_lmac_const1x_t
+#define bustype_BDK_SLIX_LMAC_CONST1X(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_LMAC_CONST1X(a,b) "SLIX_LMAC_CONST1X"
+#define device_bar_BDK_SLIX_LMAC_CONST1X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_LMAC_CONST1X(a,b) (a)
+#define arguments_BDK_SLIX_LMAC_CONST1X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_m2s_mac#_ctl
+ *
+ * SLI Control Port Registers
+ * This register controls the functionality of the SLI's M2S in regards to a MAC.
+ * Internal:
+ * In 78xx was SLI()_CTL_PORT() and SLI()_S2M_PORT()_CTL.
+ */
+union bdk_slix_m2s_macx_ctl
+{
+ uint64_t u;
+ struct bdk_slix_m2s_macx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_21_63 : 43;
+ uint64_t bige : 1; /**< [ 20: 20](R/W) Atomics sent on NCBI will be marked as big endian. If the link partner is
+ big-endian and the processors are big-endian, this allows exchange of big-endian
+ atomics without byte swapping. */
+ uint64_t wait_pxfr : 1; /**< [ 19: 19](R/W) When set, will cause a posted TLP write from a MAC to follow the following sequence:
+ (having this bit set will cut the posted-TLP performance about 50%).
+ _ 1. Request the NCBI.
+ _ 2. Wait for the grant and send the transfer on the NCBI.
+ _ 3. Start the next posted TLP.
+
+ For diagnostic use only. */
+ uint64_t wvirt : 1; /**< [ 18: 18](R/W) Write virtual:
+ 1 = Addresses in SLI()_WIN_WR_ADDR and SLI()_WIN_RD_ADDR are virtual addresses.
+ 0 = Addresses are physical addresses. */
+ uint64_t dis_port : 1; /**< [ 17: 17](R/W1C/H) When set, the output to the MAC is disabled. This occurs when the MAC reset line
+ transitions from de-asserted to asserted. Writing a 1 to this location clears this
+ condition when the MAC is no longer in reset and the output to the MAC is at the beginning
+ of a transfer. */
+ uint64_t waitl_com : 1; /**< [ 16: 16](R/W) When set, causes the SLI to wait for a store done from the L2C for any
+ previously sent stores, before sending additional completions to the L2C from
+ the MAC.
+ 0 = More aggressive, higher-performance behavior. Suitable when device drivers are
+ appropriately written for performance and do not assume that IO reads force all DMAs
+ to be complete.
+ 1 = Compliant, lower-performing behavior. Enforce PCI-compliant completion
+ versus posted/non-posted ordering. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t ctlp_ro : 1; /**< [ 6: 6](R/W) Relaxed ordering enable for completion TLPS. This permits the SLI to use the RO bit sent
+ from
+ the MACs. See WAITL_COM. */
+ uint64_t ptlp_ro : 1; /**< [ 5: 5](R/W) Relaxed ordering enable for posted TLPS. This permits the SLI to use the RO bit sent from
+ the MACs. See WAIT_COM. */
+ uint64_t wind_d : 1; /**< [ 4: 4](R/W) Window disable. When set, disables access to the window registers from the MAC. */
+ uint64_t bar0_d : 1; /**< [ 3: 3](R/W) BAR0 disable. When set, disables access from the MAC to SLI BAR0 registers. */
+ uint64_t ld_cmd : 2; /**< [ 2: 1](R/W) When SLI issues a load command to the L2C that is to be cached, this field selects the
+ type of load command to use. Un-cached loads will use LDT:
+ 0x0 = LDD.
+ 0x1 = LDI.
+ 0x2 = LDE.
+ 0x3 = LDY. */
+ uint64_t wait_com : 1; /**< [ 0: 0](R/W) Wait for commit. When set, causes the SLI to wait for a store done from the L2C before
+ sending additional stores to the L2C from the MAC. The SLI requests a commit on the last
+ store if more than one STORE operation is required on the NCB. Most applications will not
+ notice a difference, so this bit should not be set. Setting the bit is more conservative
+ on ordering, lower performance. */
+#else /* Word 0 - Little Endian */
+ uint64_t wait_com : 1; /**< [ 0: 0](R/W) Wait for commit. When set, causes the SLI to wait for a store done from the L2C before
+ sending additional stores to the L2C from the MAC. The SLI requests a commit on the last
+ store if more than one STORE operation is required on the NCB. Most applications will not
+ notice a difference, so this bit should not be set. Setting the bit is more conservative
+ on ordering, lower performance. */
+ uint64_t ld_cmd : 2; /**< [ 2: 1](R/W) When SLI issues a load command to the L2C that is to be cached, this field selects the
+ type of load command to use. Un-cached loads will use LDT:
+ 0x0 = LDD.
+ 0x1 = LDI.
+ 0x2 = LDE.
+ 0x3 = LDY. */
+ uint64_t bar0_d : 1; /**< [ 3: 3](R/W) BAR0 disable. When set, disables access from the MAC to SLI BAR0 registers. */
+ uint64_t wind_d : 1; /**< [ 4: 4](R/W) Window disable. When set, disables access to the window registers from the MAC. */
+ uint64_t ptlp_ro : 1; /**< [ 5: 5](R/W) Relaxed ordering enable for posted TLPS. This permits the SLI to use the RO bit sent from
+ the MACs. See WAIT_COM. */
+ uint64_t ctlp_ro : 1; /**< [ 6: 6](R/W) Relaxed ordering enable for completion TLPS. This permits the SLI to use the RO bit sent
+ from
+ the MACs. See WAITL_COM. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t waitl_com : 1; /**< [ 16: 16](R/W) When set, causes the SLI to wait for a store done from the L2C for any
+ previously sent stores, before sending additional completions to the L2C from
+ the MAC.
+ 0 = More aggressive, higher-performance behavior. Suitable when device drivers are
+ appropriately written for performance and do not assume that IO reads force all DMAs
+ to be complete.
+ 1 = Compliant, lower-performing behavior. Enforce PCI-compliant completion
+ versus posted/non-posted ordering. */
+ uint64_t dis_port : 1; /**< [ 17: 17](R/W1C/H) When set, the output to the MAC is disabled. This occurs when the MAC reset line
+ transitions from de-asserted to asserted. Writing a 1 to this location clears this
+ condition when the MAC is no longer in reset and the output to the MAC is at the beginning
+ of a transfer. */
+ uint64_t wvirt : 1; /**< [ 18: 18](R/W) Write virtual:
+ 1 = Addresses in SLI()_WIN_WR_ADDR and SLI()_WIN_RD_ADDR are virtual addresses.
+ 0 = Addresses are physical addresses. */
+ uint64_t wait_pxfr : 1; /**< [ 19: 19](R/W) When set, will cause a posted TLP write from a MAC to follow the following sequence:
+ (having this bit set will cut the posted-TLP performance about 50%).
+ _ 1. Request the NCBI.
+ _ 2. Wait for the grant and send the transfer on the NCBI.
+ _ 3. Start the next posted TLP.
+
+ For diagnostic use only. */
+ uint64_t bige : 1; /**< [ 20: 20](R/W) Atomics sent on NCBI will be marked as big endian. If the link partner is
+ big-endian and the processors are big-endian, this allows exchange of big-endian
+ atomics without byte swapping. */
+ uint64_t reserved_21_63 : 43;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_m2s_macx_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_19_63 : 45;
+ uint64_t wvirt : 1; /**< [ 18: 18](R/W) Write virtual:
+ 1 = Addresses in SLI()_WIN_WR_ADDR and SLI()_WIN_RD_ADDR are virtual addresses.
+ 0 = Addresses are physical addresses. */
+ uint64_t dis_port : 1; /**< [ 17: 17](R/W1C/H) When set, the output to the MAC is disabled. This occurs when the MAC reset line
+ transitions from de-asserted to asserted. Writing a 1 to this location clears this
+ condition when the MAC is no longer in reset and the output to the MAC is at the beginning
+ of a transfer. */
+ uint64_t waitl_com : 1; /**< [ 16: 16](R/W) When set, causes the SLI to wait for a store done from the L2C for any
+ previously sent stores, before sending additional completions to the L2C from
+ the MAC.
+ 0 = More aggressive, higher-performance behavior. Suitable when device drivers are
+ appropriately written for performance and do not assume that IO reads force all DMAs
+ to be complete.
+ 1 = Compliant, lower-performing behavior. Enforce PCI-compliant completion
+ versus posted/non-posted ordering. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t ctlp_ro : 1; /**< [ 6: 6](R/W) Relaxed ordering enable for completion TLPS. This permits the SLI to use the RO bit sent
+ from
+ the MACs. See WAITL_COM. */
+ uint64_t ptlp_ro : 1; /**< [ 5: 5](R/W) Relaxed ordering enable for posted TLPS. This permits the SLI to use the RO bit sent from
+ the MACs. See WAIT_COM. */
+ uint64_t wind_d : 1; /**< [ 4: 4](R/W) Window disable. When set, disables access to the window registers from the MAC. */
+ uint64_t bar0_d : 1; /**< [ 3: 3](R/W) BAR0 disable. When set, disables access from the MAC to SLI BAR0 registers. */
+ uint64_t ld_cmd : 2; /**< [ 2: 1](R/W) When SLI issues a load command to the L2C that is to be cached, this field selects the
+ type of load command to use. Un-cached loads will use LDT:
+ 0x0 = LDD.
+ 0x1 = LDI.
+ 0x2 = LDE.
+ 0x3 = LDY. */
+ uint64_t wait_com : 1; /**< [ 0: 0](R/W) Wait for commit. When set, causes the SLI to wait for a store done from the L2C before
+ sending additional stores to the L2C from the MAC. The SLI requests a commit on the last
+ store if more than one STORE operation is required on the NCB. Most applications will not
+ notice a difference, so this bit should not be set. Setting the bit is more conservative
+ on ordering, lower performance. */
+#else /* Word 0 - Little Endian */
+ uint64_t wait_com : 1; /**< [ 0: 0](R/W) Wait for commit. When set, causes the SLI to wait for a store done from the L2C before
+ sending additional stores to the L2C from the MAC. The SLI requests a commit on the last
+ store if more than one STORE operation is required on the NCB. Most applications will not
+ notice a difference, so this bit should not be set. Setting the bit is more conservative
+ on ordering, lower performance. */
+ uint64_t ld_cmd : 2; /**< [ 2: 1](R/W) When SLI issues a load command to the L2C that is to be cached, this field selects the
+ type of load command to use. Un-cached loads will use LDT:
+ 0x0 = LDD.
+ 0x1 = LDI.
+ 0x2 = LDE.
+ 0x3 = LDY. */
+ uint64_t bar0_d : 1; /**< [ 3: 3](R/W) BAR0 disable. When set, disables access from the MAC to SLI BAR0 registers. */
+ uint64_t wind_d : 1; /**< [ 4: 4](R/W) Window disable. When set, disables access to the window registers from the MAC. */
+ uint64_t ptlp_ro : 1; /**< [ 5: 5](R/W) Relaxed ordering enable for posted TLPS. This permits the SLI to use the RO bit sent from
+ the MACs. See WAIT_COM. */
+ uint64_t ctlp_ro : 1; /**< [ 6: 6](R/W) Relaxed ordering enable for completion TLPS. This permits the SLI to use the RO bit sent
+ from
+ the MACs. See WAITL_COM. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t waitl_com : 1; /**< [ 16: 16](R/W) When set, causes the SLI to wait for a store done from the L2C for any
+ previously sent stores, before sending additional completions to the L2C from
+ the MAC.
+ 0 = More aggressive, higher-performance behavior. Suitable when device drivers are
+ appropriately written for performance and do not assume that IO reads force all DMAs
+ to be complete.
+ 1 = Compliant, lower-performing behavior. Enforce PCI-compliant completion
+ versus posted/non-posted ordering. */
+ uint64_t dis_port : 1; /**< [ 17: 17](R/W1C/H) When set, the output to the MAC is disabled. This occurs when the MAC reset line
+ transitions from de-asserted to asserted. Writing a 1 to this location clears this
+ condition when the MAC is no longer in reset and the output to the MAC is at the beginning
+ of a transfer. */
+ uint64_t wvirt : 1; /**< [ 18: 18](R/W) Write virtual:
+ 1 = Addresses in SLI()_WIN_WR_ADDR and SLI()_WIN_RD_ADDR are virtual addresses.
+ 0 = Addresses are physical addresses. */
+ uint64_t reserved_19_63 : 45;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_slix_m2s_macx_ctl_s cn81xx; */
+ /* struct bdk_slix_m2s_macx_ctl_s cn83xx; */
+ struct bdk_slix_m2s_macx_ctl_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_20_63 : 44;
+ uint64_t wait_pxfr : 1; /**< [ 19: 19](R/W) When set, will cause a posted TLP write from a MAC to follow the following sequence:
+ (having this bit set will cut the posted-TLP performance about 50%).
+ _ 1. Request the NCBI.
+ _ 2. Wait for the grant and send the transfer on the NCBI.
+ _ 3. Start the next posted TLP.
+
+ For diagnostic use only. */
+ uint64_t wvirt : 1; /**< [ 18: 18](R/W) Write virtual:
+ 1 = Addresses in SLI()_WIN_WR_ADDR and SLI()_WIN_RD_ADDR are virtual addresses.
+ 0 = Addresses are physical addresses. */
+ uint64_t dis_port : 1; /**< [ 17: 17](R/W1C/H) When set, the output to the MAC is disabled. This occurs when the MAC reset line
+ transitions from de-asserted to asserted. Writing a 1 to this location clears this
+ condition when the MAC is no longer in reset and the output to the MAC is at the beginning
+ of a transfer. */
+ uint64_t waitl_com : 1; /**< [ 16: 16](R/W) When set, causes the SLI to wait for a store done from the L2C for any
+ previously sent stores, before sending additional completions to the L2C from
+ the MAC.
+ 0 = More aggressive, higher-performance behavior. Suitable when device drivers are
+ appropriately written for performance and do not assume that IO reads force all DMAs
+ to be complete.
+ 1 = Compliant, lower-performing behavior. Enforce PCI-compliant completion
+ versus posted/non-posted ordering. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t ctlp_ro : 1; /**< [ 6: 6](R/W) Relaxed ordering enable for completion TLPS. This permits the SLI to use the RO bit sent
+ from
+ the MACs. See WAITL_COM. */
+ uint64_t ptlp_ro : 1; /**< [ 5: 5](R/W) Relaxed ordering enable for posted TLPS. This permits the SLI to use the RO bit sent from
+ the MACs. See WAIT_COM. */
+ uint64_t wind_d : 1; /**< [ 4: 4](R/W) Window disable. When set, disables access to the window registers from the MAC. */
+ uint64_t bar0_d : 1; /**< [ 3: 3](R/W) BAR0 disable. When set, disables access from the MAC to SLI BAR0 registers. */
+ uint64_t ld_cmd : 2; /**< [ 2: 1](R/W) When SLI issues a load command to the L2C that is to be cached, this field selects the
+ type of load command to use. Un-cached loads will use LDT:
+ 0x0 = LDD.
+ 0x1 = LDI.
+ 0x2 = LDE.
+ 0x3 = LDY. */
+ uint64_t wait_com : 1; /**< [ 0: 0](R/W) Wait for commit. When set, causes the SLI to wait for a store done from the L2C before
+ sending additional stores to the L2C from the MAC. The SLI requests a commit on the last
+ store if more than one STORE operation is required on the NCB. Most applications will not
+ notice a difference, so this bit should not be set. Setting the bit is more conservative
+ on ordering, lower performance. */
+#else /* Word 0 - Little Endian */
+ uint64_t wait_com : 1; /**< [ 0: 0](R/W) Wait for commit. When set, causes the SLI to wait for a store done from the L2C before
+ sending additional stores to the L2C from the MAC. The SLI requests a commit on the last
+ store if more than one STORE operation is required on the NCB. Most applications will not
+ notice a difference, so this bit should not be set. Setting the bit is more conservative
+ on ordering, lower performance. */
+ uint64_t ld_cmd : 2; /**< [ 2: 1](R/W) When SLI issues a load command to the L2C that is to be cached, this field selects the
+ type of load command to use. Un-cached loads will use LDT:
+ 0x0 = LDD.
+ 0x1 = LDI.
+ 0x2 = LDE.
+ 0x3 = LDY. */
+ uint64_t bar0_d : 1; /**< [ 3: 3](R/W) BAR0 disable. When set, disables access from the MAC to SLI BAR0 registers. */
+ uint64_t wind_d : 1; /**< [ 4: 4](R/W) Window disable. When set, disables access to the window registers from the MAC. */
+ uint64_t ptlp_ro : 1; /**< [ 5: 5](R/W) Relaxed ordering enable for posted TLPS. This permits the SLI to use the RO bit sent from
+ the MACs. See WAIT_COM. */
+ uint64_t ctlp_ro : 1; /**< [ 6: 6](R/W) Relaxed ordering enable for completion TLPS. This permits the SLI to use the RO bit sent
+ from
+ the MACs. See WAITL_COM. */
+ uint64_t reserved_7_15 : 9;
+ uint64_t waitl_com : 1; /**< [ 16: 16](R/W) When set, causes the SLI to wait for a store done from the L2C for any
+ previously sent stores, before sending additional completions to the L2C from
+ the MAC.
+ 0 = More aggressive, higher-performance behavior. Suitable when device drivers are
+ appropriately written for performance and do not assume that IO reads force all DMAs
+ to be complete.
+ 1 = Compliant, lower-performing behavior. Enforce PCI-compliant completion
+ versus posted/non-posted ordering. */
+ uint64_t dis_port : 1; /**< [ 17: 17](R/W1C/H) When set, the output to the MAC is disabled. This occurs when the MAC reset line
+ transitions from de-asserted to asserted. Writing a 1 to this location clears this
+ condition when the MAC is no longer in reset and the output to the MAC is at the beginning
+ of a transfer. */
+ uint64_t wvirt : 1; /**< [ 18: 18](R/W) Write virtual:
+ 1 = Addresses in SLI()_WIN_WR_ADDR and SLI()_WIN_RD_ADDR are virtual addresses.
+ 0 = Addresses are physical addresses. */
+ uint64_t wait_pxfr : 1; /**< [ 19: 19](R/W) When set, will cause a posted TLP write from a MAC to follow the following sequence:
+ (having this bit set will cut the posted-TLP performance about 50%).
+ _ 1. Request the NCBI.
+ _ 2. Wait for the grant and send the transfer on the NCBI.
+ _ 3. Start the next posted TLP.
+
+ For diagnostic use only. */
+ uint64_t reserved_20_63 : 44;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_slix_m2s_macx_ctl bdk_slix_m2s_macx_ctl_t;
+
+static inline uint64_t BDK_SLIX_M2S_MACX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_M2S_MACX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=2)))
+ return 0x874001002100ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874001002100ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=2)))
+ return 0x874001002100ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_M2S_MACX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_M2S_MACX_CTL(a,b) bdk_slix_m2s_macx_ctl_t
+#define bustype_BDK_SLIX_M2S_MACX_CTL(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_M2S_MACX_CTL(a,b) "SLIX_M2S_MACX_CTL"
+#define device_bar_BDK_SLIX_M2S_MACX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_M2S_MACX_CTL(a,b) (a)
+#define arguments_BDK_SLIX_M2S_MACX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_mac#_int_ena_w1c
+ *
+ * SLI MAC Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_slix_macx_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_slix_macx_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SLI(0..1)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SLI(0..1)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SLI(0..1)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SLI(0..1)_MAC(0..2)_INT_SUM[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SLI(0..1)_MAC(0..2)_INT_SUM[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SLI(0..1)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SLI(0..1)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SLI(0..1)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_macx_int_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..2)_INT_SUM[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..2)_INT_SUM[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_slix_macx_int_ena_w1c_s cn88xx; */
+ struct bdk_slix_macx_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..3)_INT_SUM[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..3)_INT_SUM[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..3)_INT_SUM[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..3)_INT_SUM[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..3)_INT_SUM[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..3)_INT_SUM[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..3)_INT_SUM[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Reads or clears enable for SLI(0)_MAC(0..3)_INT_SUM[UN_WI]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_macx_int_ena_w1c bdk_slix_macx_int_ena_w1c_t;
+
+static inline uint64_t BDK_SLIX_MACX_INT_ENA_W1C(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MACX_INT_ENA_W1C(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=2)))
+ return 0x874000001200ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874000001200ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=2)))
+ return 0x874000001200ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_MACX_INT_ENA_W1C", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MACX_INT_ENA_W1C(a,b) bdk_slix_macx_int_ena_w1c_t
+#define bustype_BDK_SLIX_MACX_INT_ENA_W1C(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MACX_INT_ENA_W1C(a,b) "SLIX_MACX_INT_ENA_W1C"
+#define device_bar_BDK_SLIX_MACX_INT_ENA_W1C(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_MACX_INT_ENA_W1C(a,b) (a)
+#define arguments_BDK_SLIX_MACX_INT_ENA_W1C(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_mac#_int_ena_w1s
+ *
+ * SLI MAC Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_slix_macx_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_slix_macx_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SLI(0..1)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SLI(0..1)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SLI(0..1)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SLI(0..1)_MAC(0..2)_INT_SUM[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SLI(0..1)_MAC(0..2)_INT_SUM[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SLI(0..1)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SLI(0..1)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SLI(0..1)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_macx_int_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..2)_INT_SUM[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..2)_INT_SUM[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_slix_macx_int_ena_w1s_s cn88xx; */
+ struct bdk_slix_macx_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..3)_INT_SUM[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..3)_INT_SUM[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..3)_INT_SUM[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..3)_INT_SUM[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..3)_INT_SUM[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..3)_INT_SUM[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..3)_INT_SUM[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets enable for SLI(0)_MAC(0..3)_INT_SUM[UN_WI]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_macx_int_ena_w1s bdk_slix_macx_int_ena_w1s_t;
+
+static inline uint64_t BDK_SLIX_MACX_INT_ENA_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MACX_INT_ENA_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=2)))
+ return 0x874000001280ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874000001280ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=2)))
+ return 0x874000001280ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_MACX_INT_ENA_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MACX_INT_ENA_W1S(a,b) bdk_slix_macx_int_ena_w1s_t
+#define bustype_BDK_SLIX_MACX_INT_ENA_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MACX_INT_ENA_W1S(a,b) "SLIX_MACX_INT_ENA_W1S"
+#define device_bar_BDK_SLIX_MACX_INT_ENA_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_MACX_INT_ENA_W1S(a,b) (a)
+#define arguments_BDK_SLIX_MACX_INT_ENA_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_mac#_int_sum
+ *
+ * SLI MAC Interrupt Summary Register
+ * This register contains the different interrupt-summary bits for one MAC in the SLI.
+ */
+union bdk_slix_macx_int_sum
+{
+ uint64_t u;
+ struct bdk_slix_macx_int_sum_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Received unsupported N-TLP for window register from MAC(0..2). This occurs when the window
+ registers are disabled and a window register access occurs. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Received unsupported N-TLP for Bar 0 from MAC(0..2). This occurs when the BAR 0 address
+ space is disabled. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Received unsupported P-TLP for window register from MAC(0..2). This occurs when the window
+ registers are disabled and a window register access occurs. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Received unsupported P-TLP for Bar 0 from MAC(0..2). This occurs when the BAR 0 address
+ space is disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Received unsupported P-TLP for Bar 0 from MAC(0..2). This occurs when the BAR 0 address
+ space is disabled. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Received unsupported P-TLP for window register from MAC(0..2). This occurs when the window
+ registers are disabled and a window register access occurs. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Received unsupported N-TLP for Bar 0 from MAC(0..2). This occurs when the BAR 0 address
+ space is disabled. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Received unsupported N-TLP for window register from MAC(0..2). This occurs when the window
+ registers are disabled and a window register access occurs. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_macx_int_sum_s cn81xx; */
+ /* struct bdk_slix_macx_int_sum_s cn88xx; */
+ struct bdk_slix_macx_int_sum_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Received unsupported N-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access occurs. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Received unsupported N-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Received unsupported P-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access occurs. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Received unsupported P-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1C/H) Received unsupported P-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1C/H) Received unsupported P-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access occurs. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1C/H) Received unsupported N-TLP for Bar 0 from the corresponding MAC. This occurs
+ when the BAR 0 address space is disabled. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1C/H) Received unsupported N-TLP for window register from the corresponding MAC. This
+ occurs when the window registers are disabled and a window register access occurs. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_macx_int_sum bdk_slix_macx_int_sum_t;
+
+static inline uint64_t BDK_SLIX_MACX_INT_SUM(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MACX_INT_SUM(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=2)))
+ return 0x874000001100ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874000001100ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=2)))
+ return 0x874000001100ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_MACX_INT_SUM", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MACX_INT_SUM(a,b) bdk_slix_macx_int_sum_t
+#define bustype_BDK_SLIX_MACX_INT_SUM(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MACX_INT_SUM(a,b) "SLIX_MACX_INT_SUM"
+#define device_bar_BDK_SLIX_MACX_INT_SUM(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_MACX_INT_SUM(a,b) (a)
+#define arguments_BDK_SLIX_MACX_INT_SUM(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_mac#_int_sum_w1s
+ *
+ * SLI MAC Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_slix_macx_int_sum_w1s
+{
+ uint64_t u;
+ struct bdk_slix_macx_int_sum_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SLI(0..1)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SLI(0..1)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SLI(0..1)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SLI(0..1)_MAC(0..2)_INT_SUM[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SLI(0..1)_MAC(0..2)_INT_SUM[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SLI(0..1)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SLI(0..1)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SLI(0..1)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_macx_int_sum_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SLI(0)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SLI(0)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SLI(0)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SLI(0)_MAC(0..2)_INT_SUM[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SLI(0)_MAC(0..2)_INT_SUM[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SLI(0)_MAC(0..2)_INT_SUM[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SLI(0)_MAC(0..2)_INT_SUM[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SLI(0)_MAC(0..2)_INT_SUM[UN_WI]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_slix_macx_int_sum_w1s_s cn88xx; */
+ struct bdk_slix_macx_int_sum_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SLI(0)_MAC(0..3)_INT_SUM[UN_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SLI(0)_MAC(0..3)_INT_SUM[UN_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SLI(0)_MAC(0..3)_INT_SUM[UP_WI]. */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SLI(0)_MAC(0..3)_INT_SUM[UP_B0]. */
+#else /* Word 0 - Little Endian */
+ uint64_t up_b0 : 1; /**< [ 0: 0](R/W1S/H) Reads or sets SLI(0)_MAC(0..3)_INT_SUM[UP_B0]. */
+ uint64_t up_wi : 1; /**< [ 1: 1](R/W1S/H) Reads or sets SLI(0)_MAC(0..3)_INT_SUM[UP_WI]. */
+ uint64_t un_b0 : 1; /**< [ 2: 2](R/W1S/H) Reads or sets SLI(0)_MAC(0..3)_INT_SUM[UN_B0]. */
+ uint64_t un_wi : 1; /**< [ 3: 3](R/W1S/H) Reads or sets SLI(0)_MAC(0..3)_INT_SUM[UN_WI]. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_macx_int_sum_w1s bdk_slix_macx_int_sum_w1s_t;
+
+static inline uint64_t BDK_SLIX_MACX_INT_SUM_W1S(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MACX_INT_SUM_W1S(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=2)))
+ return 0x874000001180ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874000001180ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=2)))
+ return 0x874000001180ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_MACX_INT_SUM_W1S", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MACX_INT_SUM_W1S(a,b) bdk_slix_macx_int_sum_w1s_t
+#define bustype_BDK_SLIX_MACX_INT_SUM_W1S(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MACX_INT_SUM_W1S(a,b) "SLIX_MACX_INT_SUM_W1S"
+#define device_bar_BDK_SLIX_MACX_INT_SUM_W1S(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_MACX_INT_SUM_W1S(a,b) (a)
+#define arguments_BDK_SLIX_MACX_INT_SUM_W1S(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP) sli#_mac_number
+ *
+ * SLI MAC Number Register
+ * When read from a MAC, this register returns the MAC's port number, otherwise returns zero.
+ */
+union bdk_slix_mac_number
+{
+ uint64_t u;
+ struct bdk_slix_mac_number_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t chip_rev : 8; /**< [ 31: 24](RO/H) Chip revision. See MIO_FUS_DAT2[CHIP_ID]. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t oci_id : 4; /**< [ 19: 16](RO) The CCPI node ID. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t a_mode : 1; /**< [ 8: 8](RO/H) Trusted mode. See RST_BOOT[TRUSTED_MODE]. */
+ uint64_t num : 8; /**< [ 7: 0](RO/H) MAC number. */
+#else /* Word 0 - Little Endian */
+ uint64_t num : 8; /**< [ 7: 0](RO/H) MAC number. */
+ uint64_t a_mode : 1; /**< [ 8: 8](RO/H) Trusted mode. See RST_BOOT[TRUSTED_MODE]. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t oci_id : 4; /**< [ 19: 16](RO) The CCPI node ID. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t chip_rev : 8; /**< [ 31: 24](RO/H) Chip revision. See MIO_FUS_DAT2[CHIP_ID]. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_mac_number_s cn; */
+};
+typedef union bdk_slix_mac_number bdk_slix_mac_number_t;
+
+static inline uint64_t BDK_SLIX_MAC_NUMBER(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MAC_NUMBER(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x80ll + 0x10000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x80ll + 0x10000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_MAC_NUMBER", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MAC_NUMBER(a) bdk_slix_mac_number_t
+#define bustype_BDK_SLIX_MAC_NUMBER(a) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_MAC_NUMBER(a) "SLIX_MAC_NUMBER"
+#define busnum_BDK_SLIX_MAC_NUMBER(a) (a)
+#define arguments_BDK_SLIX_MAC_NUMBER(a) (a),-1,-1,-1
+
+/**
+ * Register (PEXP) sli#_mac_number#
+ *
+ * SLI MAC Number Register
+ * When read from a MAC, this register returns the MAC's port number; otherwise returns zero.
+ */
+union bdk_slix_mac_numberx
+{
+ uint64_t u;
+ struct bdk_slix_mac_numberx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t chip_rev : 8; /**< [ 31: 24](RO/H) Chip revision. See MIO_FUS_DAT2[CHIP_ID]. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t oci_id : 4; /**< [ 19: 16](RO/H) The CCPI node ID. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t a_mode : 1; /**< [ 8: 8](RO/H) Trusted mode. See RST_BOOT[TRUSTED_MODE]. */
+ uint64_t num : 8; /**< [ 7: 0](RO/H) MAC number. */
+#else /* Word 0 - Little Endian */
+ uint64_t num : 8; /**< [ 7: 0](RO/H) MAC number. */
+ uint64_t a_mode : 1; /**< [ 8: 8](RO/H) Trusted mode. See RST_BOOT[TRUSTED_MODE]. */
+ uint64_t reserved_9_15 : 7;
+ uint64_t oci_id : 4; /**< [ 19: 16](RO/H) The CCPI node ID. */
+ uint64_t reserved_20_23 : 4;
+ uint64_t chip_rev : 8; /**< [ 31: 24](RO/H) Chip revision. See MIO_FUS_DAT2[CHIP_ID]. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_mac_numberx_s cn; */
+};
+typedef union bdk_slix_mac_numberx bdk_slix_mac_numberx_t;
+
+static inline uint64_t BDK_SLIX_MAC_NUMBERX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MAC_NUMBERX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x2c050ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_MAC_NUMBERX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MAC_NUMBERX(a,b) bdk_slix_mac_numberx_t
+#define bustype_BDK_SLIX_MAC_NUMBERX(a,b) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_MAC_NUMBERX(a,b) "SLIX_MAC_NUMBERX"
+#define busnum_BDK_SLIX_MAC_NUMBERX(a,b) (a)
+#define arguments_BDK_SLIX_MAC_NUMBERX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_mbe_int_ena_w1c
+ *
+ * SLI Interrupt Enable Clear Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_slix_mbe_int_ena_w1c
+{
+ uint64_t u;
+ struct bdk_slix_mbe_int_ena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_mbe_int_ena_w1c_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1C/H) Reads or clears enable for SLI(0)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1C/H) Reads or clears enable for SLI(0)_MBE_INT_SUM[SED0_SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1C/H) Reads or clears enable for SLI(0)_MBE_INT_SUM[SED0_SBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1C/H) Reads or clears enable for SLI(0)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_slix_mbe_int_ena_w1c_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1C/H) Reads or clears enable for SLI(0..1)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1C/H) Reads or clears enable for SLI(0..1)_MBE_INT_SUM[SED0_SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1C/H) Reads or clears enable for SLI(0..1)_MBE_INT_SUM[SED0_SBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1C/H) Reads or clears enable for SLI(0..1)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_slix_mbe_int_ena_w1c_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1C/H) Reads or clears enable for SLI(0)_MBE_INT_SUM[DBE]. */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1C/H) Reads or clears enable for SLI(0)_MBE_INT_SUM[SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1C/H) Reads or clears enable for SLI(0)_MBE_INT_SUM[SBE]. */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1C/H) Reads or clears enable for SLI(0)_MBE_INT_SUM[DBE]. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_mbe_int_ena_w1c bdk_slix_mbe_int_ena_w1c_t;
+
+static inline uint64_t BDK_SLIX_MBE_INT_ENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MBE_INT_ENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002260ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002260ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x874001002260ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_MBE_INT_ENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MBE_INT_ENA_W1C(a) bdk_slix_mbe_int_ena_w1c_t
+#define bustype_BDK_SLIX_MBE_INT_ENA_W1C(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MBE_INT_ENA_W1C(a) "SLIX_MBE_INT_ENA_W1C"
+#define device_bar_BDK_SLIX_MBE_INT_ENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_MBE_INT_ENA_W1C(a) (a)
+#define arguments_BDK_SLIX_MBE_INT_ENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_mbe_int_ena_w1s
+ *
+ * SLI Interrupt Enable Set Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_slix_mbe_int_ena_w1s
+{
+ uint64_t u;
+ struct bdk_slix_mbe_int_ena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_mbe_int_ena_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1S/H) Reads or sets enable for SLI(0)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1S/H) Reads or sets enable for SLI(0)_MBE_INT_SUM[SED0_SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1S/H) Reads or sets enable for SLI(0)_MBE_INT_SUM[SED0_SBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1S/H) Reads or sets enable for SLI(0)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_slix_mbe_int_ena_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1S/H) Reads or sets enable for SLI(0..1)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1S/H) Reads or sets enable for SLI(0..1)_MBE_INT_SUM[SED0_SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1S/H) Reads or sets enable for SLI(0..1)_MBE_INT_SUM[SED0_SBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1S/H) Reads or sets enable for SLI(0..1)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_slix_mbe_int_ena_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1S/H) Reads or sets enable for SLI(0)_MBE_INT_SUM[DBE]. */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1S/H) Reads or sets enable for SLI(0)_MBE_INT_SUM[SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1S/H) Reads or sets enable for SLI(0)_MBE_INT_SUM[SBE]. */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1S/H) Reads or sets enable for SLI(0)_MBE_INT_SUM[DBE]. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_mbe_int_ena_w1s bdk_slix_mbe_int_ena_w1s_t;
+
+static inline uint64_t BDK_SLIX_MBE_INT_ENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MBE_INT_ENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002280ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002280ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x874001002280ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_MBE_INT_ENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MBE_INT_ENA_W1S(a) bdk_slix_mbe_int_ena_w1s_t
+#define bustype_BDK_SLIX_MBE_INT_ENA_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MBE_INT_ENA_W1S(a) "SLIX_MBE_INT_ENA_W1S"
+#define device_bar_BDK_SLIX_MBE_INT_ENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_MBE_INT_ENA_W1S(a) (a)
+#define arguments_BDK_SLIX_MBE_INT_ENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_mbe_int_sum
+ *
+ * SLI MBE Interrupt Summary Register
+ * This register contains the MBE interrupt-summary bits of the SLI.
+ */
+union bdk_slix_mbe_int_sum
+{
+ uint64_t u;
+ struct bdk_slix_mbe_int_sum_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_mbe_int_sum_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1C/H) SED0 double-bit error. When set, a SED0 double-bit error has occurred. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1C/H) SED0 single-bit error. When set, a SED0 single-bit error has occurred. */
+#else /* Word 0 - Little Endian */
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1C/H) SED0 single-bit error. When set, a SED0 single-bit error has occurred. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1C/H) SED0 double-bit error. When set, a SED0 double-bit error has occurred. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_slix_mbe_int_sum_cn81xx cn88xx; */
+ struct bdk_slix_mbe_int_sum_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1C/H) Double-bit error detected in internal RAM. One bit per memory, enumerated by
+ SLI_RAMS_E. */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1C/H) Single-bit error detected in internal RAM. One bit per memory, enumerated by
+ SLI_RAMS_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1C/H) Single-bit error detected in internal RAM. One bit per memory, enumerated by
+ SLI_RAMS_E. */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1C/H) Double-bit error detected in internal RAM. One bit per memory, enumerated by
+ SLI_RAMS_E. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_mbe_int_sum bdk_slix_mbe_int_sum_t;
+
+static inline uint64_t BDK_SLIX_MBE_INT_SUM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MBE_INT_SUM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002220ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002220ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x874001002220ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_MBE_INT_SUM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MBE_INT_SUM(a) bdk_slix_mbe_int_sum_t
+#define bustype_BDK_SLIX_MBE_INT_SUM(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MBE_INT_SUM(a) "SLIX_MBE_INT_SUM"
+#define device_bar_BDK_SLIX_MBE_INT_SUM(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_MBE_INT_SUM(a) (a)
+#define arguments_BDK_SLIX_MBE_INT_SUM(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_mbe_int_sum_w1s
+ *
+ * SLI Interrupt Set Register
+ * This register sets interrupt bits.
+ */
+union bdk_slix_mbe_int_sum_w1s
+{
+ uint64_t u;
+ struct bdk_slix_mbe_int_sum_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_mbe_int_sum_w1s_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1S/H) Reads or sets SLI(0)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1S/H) Reads or sets SLI(0)_MBE_INT_SUM[SED0_SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1S/H) Reads or sets SLI(0)_MBE_INT_SUM[SED0_SBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1S/H) Reads or sets SLI(0)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_slix_mbe_int_sum_w1s_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_54_63 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1S/H) Reads or sets SLI(0..1)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1S/H) Reads or sets SLI(0..1)_MBE_INT_SUM[SED0_SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sed0_sbe : 22; /**< [ 21: 0](R/W1S/H) Reads or sets SLI(0..1)_MBE_INT_SUM[SED0_SBE]. */
+ uint64_t reserved_22_31 : 10;
+ uint64_t sed0_dbe : 22; /**< [ 53: 32](R/W1S/H) Reads or sets SLI(0..1)_MBE_INT_SUM[SED0_DBE]. */
+ uint64_t reserved_54_63 : 10;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_slix_mbe_int_sum_w1s_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1S/H) Reads or sets SLI(0)_MBE_INT_SUM[DBE]. */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1S/H) Reads or sets SLI(0)_MBE_INT_SUM[SBE]. */
+#else /* Word 0 - Little Endian */
+ uint64_t sbe : 32; /**< [ 31: 0](R/W1S/H) Reads or sets SLI(0)_MBE_INT_SUM[SBE]. */
+ uint64_t dbe : 32; /**< [ 63: 32](R/W1S/H) Reads or sets SLI(0)_MBE_INT_SUM[DBE]. */
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_mbe_int_sum_w1s bdk_slix_mbe_int_sum_w1s_t;
+
+static inline uint64_t BDK_SLIX_MBE_INT_SUM_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MBE_INT_SUM_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002240ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002240ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x874001002240ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_MBE_INT_SUM_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MBE_INT_SUM_W1S(a) bdk_slix_mbe_int_sum_w1s_t
+#define bustype_BDK_SLIX_MBE_INT_SUM_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MBE_INT_SUM_W1S(a) "SLIX_MBE_INT_SUM_W1S"
+#define device_bar_BDK_SLIX_MBE_INT_SUM_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_MBE_INT_SUM_W1S(a) (a)
+#define arguments_BDK_SLIX_MBE_INT_SUM_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_mem_ctl
+ *
+ * SLI Memory Control Register
+ * This register controls the ECC of the SLI memories.
+ */
+union bdk_slix_mem_ctl
+{
+ uint64_t u;
+ struct bdk_slix_mem_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_mem_ctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t ctl : 30; /**< [ 29: 0](R/W) Control memory ECC functionality.
+ \<29\> = Correction disable for csr_region_mem_csr_cor_dis.
+ \<28:29\> = Flip syndrome for csr_region_mem_csr_flip_synd.
+
+ \<26\> = Correction disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<25:24\> = Flip syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+ \<23\> = Correction disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<22:21\> = Flip syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+ \<20\> = Correction disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<19:18\> = Flip syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+
+ \<17\> = Correction disable for cpl0_fifo_csr_cor_dis.
+ \<16:15\> = Flip syndrome for cpl0_fifo_csr_flip_synd.
+ \<14\> = Correction disable for cpl1_fifo_csr_cor_dis.
+ \<13:12\> = Flip syndrome for cpl1_fifo_csr_flip_synd.
+ \<11\> = Correction disable for cpl2_fifo_csr_cor_dis.
+ \<10:9\> = Flip syndrome for cpl2_fifo_csr_flip_synd.
+
+ \<8\> = Correction disable for p2n0_tlp_\<p, n, cpl\>_fifo.
+ \<7:6\> = Flip syndrome for p2n0_tlp_\<p,n,cpl\>_fifo.
+ \<5\> = Correction disable for p2n1_tlp_\<p, n, cpl\>_fifo.
+ \<4:3\> = Flip syndrome for p2n1_tlp_\<p,n,cpl\>_fifo.
+ \<2\> = Correction disable for p2n2_tlp_\<p, n, cpl\>_fifo.
+ \<1:0\> = Flip syndrome for p2n2_tlp_\<p,n,cpl\>_fifo. */
+#else /* Word 0 - Little Endian */
+ uint64_t ctl : 30; /**< [ 29: 0](R/W) Control memory ECC functionality.
+ \<29\> = Correction disable for csr_region_mem_csr_cor_dis.
+ \<28:29\> = Flip syndrome for csr_region_mem_csr_flip_synd.
+
+ \<26\> = Correction disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<25:24\> = Flip syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+ \<23\> = Correction disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<22:21\> = Flip syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+ \<20\> = Correction disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<19:18\> = Flip syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+
+ \<17\> = Correction disable for cpl0_fifo_csr_cor_dis.
+ \<16:15\> = Flip syndrome for cpl0_fifo_csr_flip_synd.
+ \<14\> = Correction disable for cpl1_fifo_csr_cor_dis.
+ \<13:12\> = Flip syndrome for cpl1_fifo_csr_flip_synd.
+ \<11\> = Correction disable for cpl2_fifo_csr_cor_dis.
+ \<10:9\> = Flip syndrome for cpl2_fifo_csr_flip_synd.
+
+ \<8\> = Correction disable for p2n0_tlp_\<p, n, cpl\>_fifo.
+ \<7:6\> = Flip syndrome for p2n0_tlp_\<p,n,cpl\>_fifo.
+ \<5\> = Correction disable for p2n1_tlp_\<p, n, cpl\>_fifo.
+ \<4:3\> = Flip syndrome for p2n1_tlp_\<p,n,cpl\>_fifo.
+ \<2\> = Correction disable for p2n2_tlp_\<p, n, cpl\>_fifo.
+ \<1:0\> = Flip syndrome for p2n2_tlp_\<p,n,cpl\>_fifo. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_slix_mem_ctl_cn88xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t ctl : 30; /**< [ 29: 0](R/W) Control memory ECC functionality.
+ \<29\> = Correction Disable for csr_region_mem_csr_cor_dis.
+ \<28:29\> = Flip Syndrome for csr_region_mem_csr_flip_synd.
+
+ \<26\> = Correction Disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<25:24\> = Flip Syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+ \<23\> = Correction Disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<22:21\> = Flip Syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+ \<20\> = Correction Disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<19:18\> = Flip Syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+
+ \<17\> = Correction Disable for cpl0_fifo_csr_cor_dis.
+ \<16:15\> = Flip Syndrome for cpl0_fifo_csr_flip_synd.
+ \<14\> = Correction Disable for cpl1_fifo_csr_cor_dis.
+ \<13:12\> = Flip Syndrome for cpl1_fifo_csr_flip_synd.
+ \<11\> = Correction Disable for cpl2_fifo_csr_cor_dis.
+ \<10:9\> = Flip Syndrome for cpl2_fifo_csr_flip_synd.
+
+ \<8\> = Correction Disable for p2n0_tlp_\<p, n, cpl\>_fifo.
+ \<7:6\> = Flip Syndrome for p2n0_tlp_\<p,n,cpl\>_fifo.
+ \<5\> = Correction Disable for p2n1_tlp_\<p, n, cpl\>_fifo.
+ \<4:3\> = Flip Syndrome for p2n1_tlp_\<p,n,cpl\>_fifo.
+ \<2\> = Correction Disable for p2n2_tlp_\<p, n, cpl\>_fifo.
+ \<1:0\> = Flip Syndrome for p2n2_tlp_\<p,n,cpl\>_fifo. */
+#else /* Word 0 - Little Endian */
+ uint64_t ctl : 30; /**< [ 29: 0](R/W) Control memory ECC functionality.
+ \<29\> = Correction Disable for csr_region_mem_csr_cor_dis.
+ \<28:29\> = Flip Syndrome for csr_region_mem_csr_flip_synd.
+
+ \<26\> = Correction Disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<25:24\> = Flip Syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+ \<23\> = Correction Disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<22:21\> = Flip Syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+ \<20\> = Correction Disable for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_cor_dis.
+ \<19:18\> = Flip Syndrome for sndf\<h,l\>2_ffifo, sncf2_ffifo_csr_flip_synd.
+
+ \<17\> = Correction Disable for cpl0_fifo_csr_cor_dis.
+ \<16:15\> = Flip Syndrome for cpl0_fifo_csr_flip_synd.
+ \<14\> = Correction Disable for cpl1_fifo_csr_cor_dis.
+ \<13:12\> = Flip Syndrome for cpl1_fifo_csr_flip_synd.
+ \<11\> = Correction Disable for cpl2_fifo_csr_cor_dis.
+ \<10:9\> = Flip Syndrome for cpl2_fifo_csr_flip_synd.
+
+ \<8\> = Correction Disable for p2n0_tlp_\<p, n, cpl\>_fifo.
+ \<7:6\> = Flip Syndrome for p2n0_tlp_\<p,n,cpl\>_fifo.
+ \<5\> = Correction Disable for p2n1_tlp_\<p, n, cpl\>_fifo.
+ \<4:3\> = Flip Syndrome for p2n1_tlp_\<p,n,cpl\>_fifo.
+ \<2\> = Correction Disable for p2n2_tlp_\<p, n, cpl\>_fifo.
+ \<1:0\> = Flip Syndrome for p2n2_tlp_\<p,n,cpl\>_fifo. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } cn88xx;
+ struct bdk_slix_mem_ctl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t cdis : 32; /**< [ 31: 0](R/W) Disables ECC correction on each RAM. Bit positions enumerated with SLI_RAMS_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t cdis : 32; /**< [ 31: 0](R/W) Disables ECC correction on each RAM. Bit positions enumerated with SLI_RAMS_E. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_mem_ctl bdk_slix_mem_ctl_t;
+
+static inline uint64_t BDK_SLIX_MEM_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MEM_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002200ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002200ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x874001002200ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_MEM_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MEM_CTL(a) bdk_slix_mem_ctl_t
+#define bustype_BDK_SLIX_MEM_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MEM_CTL(a) "SLIX_MEM_CTL"
+#define device_bar_BDK_SLIX_MEM_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_MEM_CTL(a) (a)
+#define arguments_BDK_SLIX_MEM_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_mem_flip
+ *
+ * SLI ECC Control Register
+ * This register controls the ECC of the SLI memories.
+ */
+union bdk_slix_mem_flip
+{
+ uint64_t u;
+ struct bdk_slix_mem_flip_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t flip1 : 32; /**< [ 63: 32](R/W) Flips syndrome bit 1 on writes. Bit positions enumerated with SLI_RAMS_E. */
+ uint64_t flip0 : 32; /**< [ 31: 0](R/W) Flips syndrome bit 0 on writes. Bit positions enumerated with SLI_RAMS_E. */
+#else /* Word 0 - Little Endian */
+ uint64_t flip0 : 32; /**< [ 31: 0](R/W) Flips syndrome bit 0 on writes. Bit positions enumerated with SLI_RAMS_E. */
+ uint64_t flip1 : 32; /**< [ 63: 32](R/W) Flips syndrome bit 1 on writes. Bit positions enumerated with SLI_RAMS_E. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_mem_flip_s cn; */
+};
+typedef union bdk_slix_mem_flip bdk_slix_mem_flip_t;
+
+static inline uint64_t BDK_SLIX_MEM_FLIP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MEM_FLIP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002210ll + 0x1000000000ll * ((a) & 0x0);
+ __bdk_csr_fatal("SLIX_MEM_FLIP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MEM_FLIP(a) bdk_slix_mem_flip_t
+#define bustype_BDK_SLIX_MEM_FLIP(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MEM_FLIP(a) "SLIX_MEM_FLIP"
+#define device_bar_BDK_SLIX_MEM_FLIP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_MEM_FLIP(a) (a)
+#define arguments_BDK_SLIX_MEM_FLIP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_msix_pba#
+ *
+ * SLI MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table; the bit number is indexed by the SLI_INT_VEC_E enumeration.
+ */
+union bdk_slix_msix_pbax
+{
+ uint64_t u;
+ struct bdk_slix_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated SLI_MSIX_VEC()_CTL, enumerated by SLI_INT_VEC_E. Bits
+ that have no associated SLI_INT_VEC_E are 0. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated SLI_MSIX_VEC()_CTL, enumerated by SLI_INT_VEC_E. Bits
+ that have no associated SLI_INT_VEC_E are 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_msix_pbax_s cn; */
+};
+typedef union bdk_slix_msix_pbax bdk_slix_msix_pbax_t;
+
+static inline uint64_t BDK_SLIX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b==0)))
+ return 0x8740100f0000ll + 0x1000000000ll * ((a) & 0x0) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b==0)))
+ return 0x874c000f0000ll + 0x1000000000ll * ((a) & 0x0) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x8740100f0000ll + 0x1000000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("SLIX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MSIX_PBAX(a,b) bdk_slix_msix_pbax_t
+#define bustype_BDK_SLIX_MSIX_PBAX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MSIX_PBAX(a,b) "SLIX_MSIX_PBAX"
+#define device_bar_BDK_SLIX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SLIX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_SLIX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_msix_vec#_addr
+ *
+ * SLI MSI-X Vector-Table Address Register
+ * This register is the MSI-X vector table, indexed by the SLI_INT_VEC_E enumeration.
+ */
+union bdk_slix_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_slix_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SLI_MSIX_VEC()_ADDR, SLI_MSIX_VEC()_CTL, and corresponding
+ bit of SLI_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SLI_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's SLI_MSIX_VEC()_ADDR, SLI_MSIX_VEC()_CTL, and corresponding
+ bit of SLI_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_SLI_VSEC_SCTL[MSIX_SEC] (for documentation, see PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_msix_vecx_addr_s cn; */
+};
+typedef union bdk_slix_msix_vecx_addr bdk_slix_msix_vecx_addr_t;
+
+static inline uint64_t BDK_SLIX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=3)))
+ return 0x874010000000ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=16)))
+ return 0x874c00000000ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x874010000000ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MSIX_VECX_ADDR(a,b) bdk_slix_msix_vecx_addr_t
+#define bustype_BDK_SLIX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MSIX_VECX_ADDR(a,b) "SLIX_MSIX_VECX_ADDR"
+#define device_bar_BDK_SLIX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SLIX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_SLIX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_msix_vec#_ctl
+ *
+ * SLI MSI-X Vector-Table Control and Data Register
+ * This register is the MSI-X vector table, indexed by the SLI_INT_VEC_E enumeration.
+ */
+union bdk_slix_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_slix_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts are sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_msix_vecx_ctl_s cn; */
+};
+typedef union bdk_slix_msix_vecx_ctl bdk_slix_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_SLIX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=3)))
+ return 0x874010000008ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=16)))
+ return 0x874c00000008ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x1f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x874010000008ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_MSIX_VECX_CTL(a,b) bdk_slix_msix_vecx_ctl_t
+#define bustype_BDK_SLIX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_MSIX_VECX_CTL(a,b) "SLIX_MSIX_VECX_CTL"
+#define device_bar_BDK_SLIX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_SLIX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_SLIX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_s2m_ctl
+ *
+ * SLI S2M Control Register
+ * This register contains control functionality of the S2M attached to the SLI. This register
+ * impacts all MACs attached to the S2M.
+ */
+union bdk_slix_s2m_ctl
+{
+ uint64_t u;
+ struct bdk_slix_s2m_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t rd_flt : 1; /**< [ 14: 14](R/W) Read fault.
+ 0 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will return
+ to the NCB/cores all-ones and non-fault.
+
+ 1 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will return
+ to the NCB/cores all-zeros and fault. In the case of a read by a core, this fault will
+ cause an synchronous external abort in the core.
+
+ Config reads which are terminated by PCIe in with an error (UR, etc), or config reads when
+ the PEM is disabled or link is down, will return to the NCB/cores all-ones and non-fault
+ regardless of this bit. */
+ uint64_t max_word : 4; /**< [ 13: 10](R/W) Maximum number of words. Specifies the maximum number of 8-byte words to merge into a
+ single write operation from the cores to the MAC. Legal values are 1 to 8, with 0 treated
+ as 16. */
+ uint64_t timer : 10; /**< [ 9: 0](R/W) Merge timer. When the SLI starts a core-to-MAC write, TIMER specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional write operations from the cores into one
+ large write. The values for this field range from 1 to 1024, with 0 treated as 1024. */
+#else /* Word 0 - Little Endian */
+ uint64_t timer : 10; /**< [ 9: 0](R/W) Merge timer. When the SLI starts a core-to-MAC write, TIMER specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional write operations from the cores into one
+ large write. The values for this field range from 1 to 1024, with 0 treated as 1024. */
+ uint64_t max_word : 4; /**< [ 13: 10](R/W) Maximum number of words. Specifies the maximum number of 8-byte words to merge into a
+ single write operation from the cores to the MAC. Legal values are 1 to 8, with 0 treated
+ as 16. */
+ uint64_t rd_flt : 1; /**< [ 14: 14](R/W) Read fault.
+ 0 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will return
+ to the NCB/cores all-ones and non-fault.
+
+ 1 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will return
+ to the NCB/cores all-zeros and fault. In the case of a read by a core, this fault will
+ cause an synchronous external abort in the core.
+
+ Config reads which are terminated by PCIe in with an error (UR, etc), or config reads when
+ the PEM is disabled or link is down, will return to the NCB/cores all-ones and non-fault
+ regardless of this bit. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_s2m_ctl_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_14_63 : 50;
+ uint64_t max_word : 4; /**< [ 13: 10](R/W) Maximum number of words. Specifies the maximum number of 8-byte words to merge into a
+ single write operation from the cores to the MAC. Legal values are 1 to 8, with 0 treated
+ as 16. */
+ uint64_t timer : 10; /**< [ 9: 0](R/W) Merge timer. When the SLI starts a core-to-MAC write, TIMER specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional write operations from the cores into one
+ large write. The values for this field range from 1 to 1024, with 0 treated as 1024. */
+#else /* Word 0 - Little Endian */
+ uint64_t timer : 10; /**< [ 9: 0](R/W) Merge timer. When the SLI starts a core-to-MAC write, TIMER specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional write operations from the cores into one
+ large write. The values for this field range from 1 to 1024, with 0 treated as 1024. */
+ uint64_t max_word : 4; /**< [ 13: 10](R/W) Maximum number of words. Specifies the maximum number of 8-byte words to merge into a
+ single write operation from the cores to the MAC. Legal values are 1 to 8, with 0 treated
+ as 16. */
+ uint64_t reserved_14_63 : 50;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_slix_s2m_ctl_s cn81xx; */
+ /* struct bdk_slix_s2m_ctl_s cn83xx; */
+ struct bdk_slix_s2m_ctl_cn88xxp2
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_15_63 : 49;
+ uint64_t rd_flt : 1; /**< [ 14: 14](R/W) Read fault.
+ 0 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will return
+ to the NCB/cores all-ones and non-fault. This is compatible with CN88XX pass 1.0.
+
+ 1 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will return
+ to the NCB/cores all-zeros and fault. In the case of a read by a core, this fault will
+ cause an synchronous external abort in the core.
+
+ Config reads which are terminated by PCIe in with an error (UR, etc), or config reads when
+ the PEM is disabled or link is down, will return to the NCB/cores all-ones and non-fault
+ regardless of this bit. */
+ uint64_t max_word : 4; /**< [ 13: 10](R/W) Maximum number of words. Specifies the maximum number of 8-byte words to merge into a
+ single write operation from the cores to the MAC. Legal values are 1 to 8, with 0 treated
+ as 16. */
+ uint64_t timer : 10; /**< [ 9: 0](R/W) Merge timer. When the SLI starts a core-to-MAC write, TIMER specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional write operations from the cores into one
+ large write. The values for this field range from 1 to 1024, with 0 treated as 1024. */
+#else /* Word 0 - Little Endian */
+ uint64_t timer : 10; /**< [ 9: 0](R/W) Merge timer. When the SLI starts a core-to-MAC write, TIMER specifies the maximum wait, in
+ coprocessor-clock cycles, to merge additional write operations from the cores into one
+ large write. The values for this field range from 1 to 1024, with 0 treated as 1024. */
+ uint64_t max_word : 4; /**< [ 13: 10](R/W) Maximum number of words. Specifies the maximum number of 8-byte words to merge into a
+ single write operation from the cores to the MAC. Legal values are 1 to 8, with 0 treated
+ as 16. */
+ uint64_t rd_flt : 1; /**< [ 14: 14](R/W) Read fault.
+ 0 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will return
+ to the NCB/cores all-ones and non-fault. This is compatible with CN88XX pass 1.0.
+
+ 1 = A PCIe non-config read which is terminated by PCIe with an error (UR, etc) will return
+ to the NCB/cores all-zeros and fault. In the case of a read by a core, this fault will
+ cause an synchronous external abort in the core.
+
+ Config reads which are terminated by PCIe in with an error (UR, etc), or config reads when
+ the PEM is disabled or link is down, will return to the NCB/cores all-ones and non-fault
+ regardless of this bit. */
+ uint64_t reserved_15_63 : 49;
+#endif /* Word 0 - End */
+ } cn88xxp2;
+};
+typedef union bdk_slix_s2m_ctl bdk_slix_s2m_ctl_t;
+
+static inline uint64_t BDK_SLIX_S2M_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_S2M_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002000ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002000ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x874001002000ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_S2M_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_S2M_CTL(a) bdk_slix_s2m_ctl_t
+#define bustype_BDK_SLIX_S2M_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_S2M_CTL(a) "SLIX_S2M_CTL"
+#define device_bar_BDK_SLIX_S2M_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_S2M_CTL(a) (a)
+#define arguments_BDK_SLIX_S2M_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_s2m_mac#_ctl
+ *
+ * SLI MAC Control Register
+ * This register controls the functionality of the SLI's S2M in regards to a MAC.
+ * Internal:
+ * In 78xx was SLI()_CTL_STATUS and SLI()_MAC_CREDIT_CNT.
+ */
+union bdk_slix_s2m_macx_ctl
+{
+ uint64_t u;
+ struct bdk_slix_s2m_macx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ccnt : 8; /**< [ 31: 24](R/W/H) CPL-TLP FIFO credits. Legal values are 0x25 to 0x80. For diagnostic use only. */
+ uint64_t ncnt : 8; /**< [ 23: 16](R/W/H) NP-TLP FIFO credits. Legal values are 0x5 to 0x10. For diagnostic use only. */
+ uint64_t pcnt : 8; /**< [ 15: 8](R/W/H) P-TLP FIFO credits. Legal values are 0x25 to 0x80. For diagnostic use only. */
+ uint64_t tags : 8; /**< [ 7: 0](R/W/H) Number of tags available for MAC.
+ One tag is needed for each outbound TLP that requires a CPL TLP.
+ This field should only be written as part of a reset sequence and before issuing any read
+ operations, CFGs, or I/O transactions from the core(s). For diagnostic use only.
+ Legal values are 1 to 32. */
+#else /* Word 0 - Little Endian */
+ uint64_t tags : 8; /**< [ 7: 0](R/W/H) Number of tags available for MAC.
+ One tag is needed for each outbound TLP that requires a CPL TLP.
+ This field should only be written as part of a reset sequence and before issuing any read
+ operations, CFGs, or I/O transactions from the core(s). For diagnostic use only.
+ Legal values are 1 to 32. */
+ uint64_t pcnt : 8; /**< [ 15: 8](R/W/H) P-TLP FIFO credits. Legal values are 0x25 to 0x80. For diagnostic use only. */
+ uint64_t ncnt : 8; /**< [ 23: 16](R/W/H) NP-TLP FIFO credits. Legal values are 0x5 to 0x10. For diagnostic use only. */
+ uint64_t ccnt : 8; /**< [ 31: 24](R/W/H) CPL-TLP FIFO credits. Legal values are 0x25 to 0x80. For diagnostic use only. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_s2m_macx_ctl_s cn81xx; */
+ /* struct bdk_slix_s2m_macx_ctl_s cn88xx; */
+ struct bdk_slix_s2m_macx_ctl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_32_63 : 32;
+ uint64_t ccnt : 8; /**< [ 31: 24](R/W) CPL-TLP FIFO credits. Legal values are 0x25 to 0xF4. For diagnostic use only. */
+ uint64_t ncnt : 8; /**< [ 23: 16](R/W) NP-TLP FIFO credits. Legal values are 0x5 to 0x20. For diagnostic use only. */
+ uint64_t pcnt : 8; /**< [ 15: 8](R/W) P-TLP FIFO credits. Legal values are 0x25 to 0xF4. For diagnostic use only. */
+ uint64_t tags : 8; /**< [ 7: 0](R/W) Number of tags available for MAC.
+ One tag is needed for each outbound TLP that requires a CPL TLP.
+ This field should only be written as part of a reset sequence and before issuing any read
+ operations, CFGs, or I/O transactions from the core(s). For diagnostic use only.
+ Legal values are 1 to 32. */
+#else /* Word 0 - Little Endian */
+ uint64_t tags : 8; /**< [ 7: 0](R/W) Number of tags available for MAC.
+ One tag is needed for each outbound TLP that requires a CPL TLP.
+ This field should only be written as part of a reset sequence and before issuing any read
+ operations, CFGs, or I/O transactions from the core(s). For diagnostic use only.
+ Legal values are 1 to 32. */
+ uint64_t pcnt : 8; /**< [ 15: 8](R/W) P-TLP FIFO credits. Legal values are 0x25 to 0xF4. For diagnostic use only. */
+ uint64_t ncnt : 8; /**< [ 23: 16](R/W) NP-TLP FIFO credits. Legal values are 0x5 to 0x20. For diagnostic use only. */
+ uint64_t ccnt : 8; /**< [ 31: 24](R/W) CPL-TLP FIFO credits. Legal values are 0x25 to 0xF4. For diagnostic use only. */
+ uint64_t reserved_32_63 : 32;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_s2m_macx_ctl bdk_slix_s2m_macx_ctl_t;
+
+static inline uint64_t BDK_SLIX_S2M_MACX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_S2M_MACX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=2)))
+ return 0x874001002080ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x874001002080ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=2)))
+ return 0x874001002080ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_S2M_MACX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_S2M_MACX_CTL(a,b) bdk_slix_s2m_macx_ctl_t
+#define bustype_BDK_SLIX_S2M_MACX_CTL(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_S2M_MACX_CTL(a,b) "SLIX_S2M_MACX_CTL"
+#define device_bar_BDK_SLIX_S2M_MACX_CTL(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_S2M_MACX_CTL(a,b) (a)
+#define arguments_BDK_SLIX_S2M_MACX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_s2m_reg#_acc
+ *
+ * SLI Region Access Registers
+ * These registers contains address index and control bits for access to memory from cores.
+ * Indexed using {NCBO DST[3:0], NCBO Address[35:32]}.
+ */
+union bdk_slix_s2m_regx_acc
+{
+ uint64_t u;
+ struct bdk_slix_s2m_regx_acc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t ctype : 2; /**< [ 54: 53](R/W) The command type to be generated:
+ 0x0 = PCI memory.
+ 0x1 = PCI configuration (only 8, 16, 32-bit loads are supported). Note normally the ECAM
+ would be used in place of this CTYPE.
+ 0x2 = PCI I/O (Only 8, 16, 32-bit loads are supported).
+ 0x3 = Reserved. */
+ uint64_t zero : 1; /**< [ 52: 52](R/W) Causes all byte read operations to be zero-length read operations. Returns zeros to the
+ EXEC for all read data. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t nmerge : 1; /**< [ 48: 48](R/W) When set, no write merging is allowed in this window. */
+ uint64_t esr : 2; /**< [ 47: 46](RO) Reserved. */
+ uint64_t esw : 2; /**< [ 45: 44](RO) Reserved. */
+ uint64_t wtype : 2; /**< [ 43: 42](R/W) Write type. ADDRTYPE\<1:0\> for write operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t rtype : 2; /**< [ 41: 40](R/W) Read type. ADDRTYPE\<1:0\> for read operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t reserved_32_39 : 8;
+ uint64_t ba : 32; /**< [ 31: 0](R/W) Bus address. Address bits\<63:32\> for read/write operations that use this region. */
+#else /* Word 0 - Little Endian */
+ uint64_t ba : 32; /**< [ 31: 0](R/W) Bus address. Address bits\<63:32\> for read/write operations that use this region. */
+ uint64_t reserved_32_39 : 8;
+ uint64_t rtype : 2; /**< [ 41: 40](R/W) Read type. ADDRTYPE\<1:0\> for read operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t wtype : 2; /**< [ 43: 42](R/W) Write type. ADDRTYPE\<1:0\> for write operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t esw : 2; /**< [ 45: 44](RO) Reserved. */
+ uint64_t esr : 2; /**< [ 47: 46](RO) Reserved. */
+ uint64_t nmerge : 1; /**< [ 48: 48](R/W) When set, no write merging is allowed in this window. */
+ uint64_t reserved_49_51 : 3;
+ uint64_t zero : 1; /**< [ 52: 52](R/W) Causes all byte read operations to be zero-length read operations. Returns zeros to the
+ EXEC for all read data. */
+ uint64_t ctype : 2; /**< [ 54: 53](R/W) The command type to be generated:
+ 0x0 = PCI memory.
+ 0x1 = PCI configuration (only 8, 16, 32-bit loads are supported). Note normally the ECAM
+ would be used in place of this CTYPE.
+ 0x2 = PCI I/O (Only 8, 16, 32-bit loads are supported).
+ 0x3 = Reserved. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_s2m_regx_acc_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t ctype : 2; /**< [ 54: 53](R/W) The command type to be generated:
+ 0x0 = PCI memory.
+ 0x1 = PCI configuration (only 8, 16, 32-bit loads are supported). Note normally the ECAM
+ would be used in place of this CTYPE.
+ 0x2 = PCI I/O (Only 8, 16, 32-bit loads are supported).
+ 0x3 = Reserved. */
+ uint64_t zero : 1; /**< [ 52: 52](R/W) Causes all byte read operations to be zero-length read operations. Returns zeros to the
+ EXEC for all read data. */
+ uint64_t mac : 3; /**< [ 51: 49](R/W) The MAC that reads/writes to this subid are sent. */
+ uint64_t nmerge : 1; /**< [ 48: 48](R/W) When set, no write merging is allowed in this window. */
+ uint64_t esr : 2; /**< [ 47: 46](RO) Reserved. */
+ uint64_t esw : 2; /**< [ 45: 44](RO) Reserved. */
+ uint64_t wtype : 2; /**< [ 43: 42](R/W) Write type. ADDRTYPE\<1:0\> for write operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t rtype : 2; /**< [ 41: 40](R/W) Read type. ADDRTYPE\<1:0\> for read operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t reserved_32_39 : 8;
+ uint64_t ba : 32; /**< [ 31: 0](R/W) Bus address. Address bits\<63:32\> for read/write operations that use this region. */
+#else /* Word 0 - Little Endian */
+ uint64_t ba : 32; /**< [ 31: 0](R/W) Bus address. Address bits\<63:32\> for read/write operations that use this region. */
+ uint64_t reserved_32_39 : 8;
+ uint64_t rtype : 2; /**< [ 41: 40](R/W) Read type. ADDRTYPE\<1:0\> for read operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t wtype : 2; /**< [ 43: 42](R/W) Write type. ADDRTYPE\<1:0\> for write operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t esw : 2; /**< [ 45: 44](RO) Reserved. */
+ uint64_t esr : 2; /**< [ 47: 46](RO) Reserved. */
+ uint64_t nmerge : 1; /**< [ 48: 48](R/W) When set, no write merging is allowed in this window. */
+ uint64_t mac : 3; /**< [ 51: 49](R/W) The MAC that reads/writes to this subid are sent. */
+ uint64_t zero : 1; /**< [ 52: 52](R/W) Causes all byte read operations to be zero-length read operations. Returns zeros to the
+ EXEC for all read data. */
+ uint64_t ctype : 2; /**< [ 54: 53](R/W) The command type to be generated:
+ 0x0 = PCI memory.
+ 0x1 = PCI configuration (only 8, 16, 32-bit loads are supported). Note normally the ECAM
+ would be used in place of this CTYPE.
+ 0x2 = PCI I/O (Only 8, 16, 32-bit loads are supported).
+ 0x3 = Reserved. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_slix_s2m_regx_acc_cn81xx cn88xx; */
+ struct bdk_slix_s2m_regx_acc_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_55_63 : 9;
+ uint64_t ctype : 2; /**< [ 54: 53](R/W) The command type to be generated:
+ 0x0 = PCI memory.
+ 0x1 = PCI configuration (only 8, 16, 32-bit loads are supported). Note normally the ECAM
+ would be used in place of this CTYPE.
+ 0x2 = PCI I/O (Only 8, 16, 32-bit loads are supported).
+ 0x3 = Reserved. */
+ uint64_t zero : 1; /**< [ 52: 52](R/W) Causes all byte read operations to be zero-length read operations. Returns zeros to the
+ EXEC for all read data. */
+ uint64_t epf : 3; /**< [ 51: 49](R/W) The EPF that reads/writes to this subid are sent. */
+ uint64_t nmerge : 1; /**< [ 48: 48](R/W) When set, no write merging is allowed in this window. */
+ uint64_t esr : 2; /**< [ 47: 46](RO) Reserved. */
+ uint64_t esw : 2; /**< [ 45: 44](RO) Reserved. */
+ uint64_t wtype : 2; /**< [ 43: 42](R/W) Write type. ADDRTYPE\<1:0\> for write operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t rtype : 2; /**< [ 41: 40](R/W) Read type. ADDRTYPE\<1:0\> for read operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t reserved_32_39 : 8;
+ uint64_t ba : 32; /**< [ 31: 0](R/W) Bus address. Address bits\<63:32\> for read/write operations that use this region. */
+#else /* Word 0 - Little Endian */
+ uint64_t ba : 32; /**< [ 31: 0](R/W) Bus address. Address bits\<63:32\> for read/write operations that use this region. */
+ uint64_t reserved_32_39 : 8;
+ uint64_t rtype : 2; /**< [ 41: 40](R/W) Read type. ADDRTYPE\<1:0\> for read operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t wtype : 2; /**< [ 43: 42](R/W) Write type. ADDRTYPE\<1:0\> for write operations to this region.
+ ADDRTYPE\<0\> is the relaxed-order attribute.
+ ADDRTYPE\<1\> is the no-snoop attribute. */
+ uint64_t esw : 2; /**< [ 45: 44](RO) Reserved. */
+ uint64_t esr : 2; /**< [ 47: 46](RO) Reserved. */
+ uint64_t nmerge : 1; /**< [ 48: 48](R/W) When set, no write merging is allowed in this window. */
+ uint64_t epf : 3; /**< [ 51: 49](R/W) The EPF that reads/writes to this subid are sent. */
+ uint64_t zero : 1; /**< [ 52: 52](R/W) Causes all byte read operations to be zero-length read operations. Returns zeros to the
+ EXEC for all read data. */
+ uint64_t ctype : 2; /**< [ 54: 53](R/W) The command type to be generated:
+ 0x0 = PCI memory.
+ 0x1 = PCI configuration (only 8, 16, 32-bit loads are supported). Note normally the ECAM
+ would be used in place of this CTYPE.
+ 0x2 = PCI I/O (Only 8, 16, 32-bit loads are supported).
+ 0x3 = Reserved. */
+ uint64_t reserved_55_63 : 9;
+#endif /* Word 0 - End */
+ } cn83xx;
+};
+typedef union bdk_slix_s2m_regx_acc bdk_slix_s2m_regx_acc_t;
+
+static inline uint64_t BDK_SLIX_S2M_REGX_ACC(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_S2M_REGX_ACC(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a==0) && (b<=255)))
+ return 0x874001000000ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=255)))
+ return 0x874001000000ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0xff);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=255)))
+ return 0x874001000000ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xff);
+ __bdk_csr_fatal("SLIX_S2M_REGX_ACC", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_S2M_REGX_ACC(a,b) bdk_slix_s2m_regx_acc_t
+#define bustype_BDK_SLIX_S2M_REGX_ACC(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_S2M_REGX_ACC(a,b) "SLIX_S2M_REGX_ACC"
+#define device_bar_BDK_SLIX_S2M_REGX_ACC(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_S2M_REGX_ACC(a,b) (a)
+#define arguments_BDK_SLIX_S2M_REGX_ACC(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) sli#_s2m_reg#_acc2
+ *
+ * SLI Region Access 2 Registers
+ * See SLI()_LMAC_CONST0().
+ */
+union bdk_slix_s2m_regx_acc2
+{
+ uint64_t u;
+ struct bdk_slix_s2m_regx_acc2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_16_63 : 48;
+ uint64_t vf_rsvd : 9; /**< [ 15: 7](RO) For expansion of the [VF] field for compatibility with other chips with larger
+ SLI()_LMAC_CONST1()[VFS]. */
+ uint64_t pvf : 7; /**< [ 6: 0](R/W) The PF/VF number. 0x0=PF, 0x1-0x40 is VF number (i.e 0x1=VF1).
+ Must be less than SLI()_LMAC_CONST1()[VFS]. */
+#else /* Word 0 - Little Endian */
+ uint64_t pvf : 7; /**< [ 6: 0](R/W) The PF/VF number. 0x0=PF, 0x1-0x40 is VF number (i.e 0x1=VF1).
+ Must be less than SLI()_LMAC_CONST1()[VFS]. */
+ uint64_t vf_rsvd : 9; /**< [ 15: 7](RO) For expansion of the [VF] field for compatibility with other chips with larger
+ SLI()_LMAC_CONST1()[VFS]. */
+ uint64_t reserved_16_63 : 48;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_s2m_regx_acc2_s cn; */
+};
+typedef union bdk_slix_s2m_regx_acc2 bdk_slix_s2m_regx_acc2_t;
+
+static inline uint64_t BDK_SLIX_S2M_REGX_ACC2(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_S2M_REGX_ACC2(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=255)))
+ return 0x874001005000ll + 0x1000000000ll * ((a) & 0x0) + 0x10ll * ((b) & 0xff);
+ __bdk_csr_fatal("SLIX_S2M_REGX_ACC2", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_S2M_REGX_ACC2(a,b) bdk_slix_s2m_regx_acc2_t
+#define bustype_BDK_SLIX_S2M_REGX_ACC2(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_S2M_REGX_ACC2(a,b) "SLIX_S2M_REGX_ACC2"
+#define device_bar_BDK_SLIX_S2M_REGX_ACC2(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_S2M_REGX_ACC2(a,b) (a)
+#define arguments_BDK_SLIX_S2M_REGX_ACC2(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_scratch_1
+ *
+ * SLI Scratch 1 Register
+ * These registers are general purpose 64-bit scratch registers for software use.
+ */
+union bdk_slix_scratch_1
+{
+ uint64_t u;
+ struct bdk_slix_scratch_1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) The value in this register is totally software defined. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) The value in this register is totally software defined. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_scratch_1_s cn; */
+};
+typedef union bdk_slix_scratch_1 bdk_slix_scratch_1_t;
+
+static inline uint64_t BDK_SLIX_SCRATCH_1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_SCRATCH_1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874000001000ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874000001000ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x874000001000ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_SCRATCH_1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_SCRATCH_1(a) bdk_slix_scratch_1_t
+#define bustype_BDK_SLIX_SCRATCH_1(a) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_SCRATCH_1(a) "SLIX_SCRATCH_1"
+#define device_bar_BDK_SLIX_SCRATCH_1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_SCRATCH_1(a) (a)
+#define arguments_BDK_SLIX_SCRATCH_1(a) (a),-1,-1,-1
+
+/**
+ * Register (PEXP_NCB) sli#_scratch_2
+ *
+ * SLI Scratch 2 Register
+ * These registers are general purpose 64-bit scratch registers for software use.
+ */
+union bdk_slix_scratch_2
+{
+ uint64_t u;
+ struct bdk_slix_scratch_2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) The value in this register is totally software defined. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 64; /**< [ 63: 0](R/W) The value in this register is totally software defined. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_scratch_2_s cn; */
+};
+typedef union bdk_slix_scratch_2 bdk_slix_scratch_2_t;
+
+static inline uint64_t BDK_SLIX_SCRATCH_2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_SCRATCH_2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874000001010ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874000001010ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x874000001010ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_SCRATCH_2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_SCRATCH_2(a) bdk_slix_scratch_2_t
+#define bustype_BDK_SLIX_SCRATCH_2(a) BDK_CSR_TYPE_PEXP_NCB
+#define basename_BDK_SLIX_SCRATCH_2(a) "SLIX_SCRATCH_2"
+#define device_bar_BDK_SLIX_SCRATCH_2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_SCRATCH_2(a) (a)
+#define arguments_BDK_SLIX_SCRATCH_2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) sli#_sctl
+ *
+ * SLI Secure Control Register
+ */
+union bdk_slix_sctl
+{
+ uint64_t u;
+ struct bdk_slix_sctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t scen : 1; /**< [ 0: 0](SR/W) Allow SLI window transactions to request secure-world accesses.
+
+ 0 = SLI()_WIN_RD_ADDR[SECEN], SLI()_WIN_WR_ADDR[SECEN] are ignored and treated
+ as if zero. Window transactions onto NCB are nonsecure, though the SMMU may
+ later promote them to secure.
+
+ 1 = SLI()_WIN_RD_ADDR[SECEN], SLI()_WIN_WR_ADDR[SECEN] are honored. Window
+ transactions may request nonsecure or secure world. This bit should not be set
+ in trusted-mode.
+
+ Resets to 0 when in trusted-mode (RST_BOOT[TRUSTED_MODE]), else resets to 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t scen : 1; /**< [ 0: 0](SR/W) Allow SLI window transactions to request secure-world accesses.
+
+ 0 = SLI()_WIN_RD_ADDR[SECEN], SLI()_WIN_WR_ADDR[SECEN] are ignored and treated
+ as if zero. Window transactions onto NCB are nonsecure, though the SMMU may
+ later promote them to secure.
+
+ 1 = SLI()_WIN_RD_ADDR[SECEN], SLI()_WIN_WR_ADDR[SECEN] are honored. Window
+ transactions may request nonsecure or secure world. This bit should not be set
+ in trusted-mode.
+
+ Resets to 0 when in trusted-mode (RST_BOOT[TRUSTED_MODE]), else resets to 1. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_sctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t scen : 1; /**< [ 0: 0](SR/W) Allow SLI window transactions to request secure-world accesses.
+
+ 0 = SLI()_WIN_RD_ADDR[RD_SEC], SLI()_WIN_WR_ADDR[WR_SEC] are ignored and treated
+ as if zero. Window transactions onto NCB are nonsecure, though the SMMU may
+ later promote them to secure.
+
+ 1 = SLI()_WIN_RD_ADDR[RD_SEC], SLI()_WIN_WR_ADDR[WR_SEC] are honored. Window
+ transactions may request nonsecure or secure world. This bit should not be set
+ in trusted-mode.
+
+ Resets to 0 when in trusted-mode (RST_BOOT[TRUSTED_MODE]), else resets to 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t scen : 1; /**< [ 0: 0](SR/W) Allow SLI window transactions to request secure-world accesses.
+
+ 0 = SLI()_WIN_RD_ADDR[RD_SEC], SLI()_WIN_WR_ADDR[WR_SEC] are ignored and treated
+ as if zero. Window transactions onto NCB are nonsecure, though the SMMU may
+ later promote them to secure.
+
+ 1 = SLI()_WIN_RD_ADDR[RD_SEC], SLI()_WIN_WR_ADDR[WR_SEC] are honored. Window
+ transactions may request nonsecure or secure world. This bit should not be set
+ in trusted-mode.
+
+ Resets to 0 when in trusted-mode (RST_BOOT[TRUSTED_MODE]), else resets to 1. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } cn81xx;
+ /* struct bdk_slix_sctl_cn81xx cn88xx; */
+ /* struct bdk_slix_sctl_s cn83xx; */
+};
+typedef union bdk_slix_sctl bdk_slix_sctl_t;
+
+static inline uint64_t BDK_SLIX_SCTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_SCTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x874001002010ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a==0))
+ return 0x874001002010ll + 0x1000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS2_X) && (a<=1))
+ return 0x874001002010ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_SCTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_SCTL(a) bdk_slix_sctl_t
+#define bustype_BDK_SLIX_SCTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_SLIX_SCTL(a) "SLIX_SCTL"
+#define device_bar_BDK_SLIX_SCTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SLIX_SCTL(a) (a)
+#define arguments_BDK_SLIX_SCTL(a) (a),-1,-1,-1
+
+/**
+ * Register (PEXP) sli#_win_rd_addr
+ *
+ * SLI Window Read Address Register
+ * This register contains the address to be read when SLI()_WIN_RD_DATA is read.
+ * Writing this register causes a read operation to take place.
+ * This register should not be used to read SLI_* registers.
+ */
+union bdk_slix_win_rd_addr
+{
+ uint64_t u;
+ struct bdk_slix_win_rd_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t secen : 1; /**< [ 63: 63](R/W) This request is a secure-world transaction. This is intended to be set only for
+ transactions during early boot when the SMMU is in bypass mode; after SMMU
+ initialization SMMU()_SDDR() may be used to control which SLI streams are secure.
+
+ If SLI()_SCTL[SECEN] = 0, this bit is ignored and transactions are always nonsecure
+ onto the NCB, though the SMMU may later promote them to secure. */
+ uint64_t reserved_51_62 : 12;
+ uint64_t ld_cmd : 2; /**< [ 50: 49](R/W) The load command size.
+ 0x3 = Load 8 bytes.
+ 0x2 = Load 4 bytes.
+ 0x1 = Load 2 bytes.
+ 0x0 = Load 1 bytes. */
+ uint64_t rd_addr : 49; /**< [ 48: 0](R/W) The IOVA sent to the NCB for this load request. */
+#else /* Word 0 - Little Endian */
+ uint64_t rd_addr : 49; /**< [ 48: 0](R/W) The IOVA sent to the NCB for this load request. */
+ uint64_t ld_cmd : 2; /**< [ 50: 49](R/W) The load command size.
+ 0x3 = Load 8 bytes.
+ 0x2 = Load 4 bytes.
+ 0x1 = Load 2 bytes.
+ 0x0 = Load 1 bytes. */
+ uint64_t reserved_51_62 : 12;
+ uint64_t secen : 1; /**< [ 63: 63](R/W) This request is a secure-world transaction. This is intended to be set only for
+ transactions during early boot when the SMMU is in bypass mode; after SMMU
+ initialization SMMU()_SDDR() may be used to control which SLI streams are secure.
+
+ If SLI()_SCTL[SECEN] = 0, this bit is ignored and transactions are always nonsecure
+ onto the NCB, though the SMMU may later promote them to secure. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_win_rd_addr_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_51_63 : 13;
+ uint64_t ld_cmd : 2; /**< [ 50: 49](R/W) The load command size.
+ 0x3 = Load 8 bytes.
+ 0x2 = Load 4 bytes.
+ 0x1 = Load 2 bytes.
+ 0x0 = Load 1 bytes. */
+ uint64_t rd_addr : 49; /**< [ 48: 0](R/W) The IOVA sent to the NCB for this load request. */
+#else /* Word 0 - Little Endian */
+ uint64_t rd_addr : 49; /**< [ 48: 0](R/W) The IOVA sent to the NCB for this load request. */
+ uint64_t ld_cmd : 2; /**< [ 50: 49](R/W) The load command size.
+ 0x3 = Load 8 bytes.
+ 0x2 = Load 4 bytes.
+ 0x1 = Load 2 bytes.
+ 0x0 = Load 1 bytes. */
+ uint64_t reserved_51_63 : 13;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_slix_win_rd_addr_s cn81xx; */
+ /* struct bdk_slix_win_rd_addr_s cn88xxp2; */
+};
+typedef union bdk_slix_win_rd_addr bdk_slix_win_rd_addr_t;
+
+static inline uint64_t BDK_SLIX_WIN_RD_ADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_WIN_RD_ADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x10ll + 0x10000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x10ll + 0x10000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_WIN_RD_ADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_WIN_RD_ADDR(a) bdk_slix_win_rd_addr_t
+#define bustype_BDK_SLIX_WIN_RD_ADDR(a) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_WIN_RD_ADDR(a) "SLIX_WIN_RD_ADDR"
+#define busnum_BDK_SLIX_WIN_RD_ADDR(a) (a)
+#define arguments_BDK_SLIX_WIN_RD_ADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PEXP) sli#_win_rd_addr#
+ *
+ * SLI Window Read Address Register
+ * This register contains the address to be read when SLI()_WIN_RD_DATA() is read.
+ * Writing this register causes a read operation to take place.
+ * This register should not be used to read SLI_* registers.
+ */
+union bdk_slix_win_rd_addrx
+{
+ uint64_t u;
+ struct bdk_slix_win_rd_addrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t secen : 1; /**< [ 63: 63](R/W) This request is a secure-world transaction. This is intended to be set only for
+ transactions during early boot when the SMMU is in bypass mode; after SMMU
+ initialization SMMU()_SSDR() may be used to control which SLI streams are secure.
+
+ If SLI()_SCTL[SCEN] = 0, this bit is ignored and transactions are always nonsecure
+ onto the NCB, though the SMMU may later promote them to secure. */
+ uint64_t reserved_51_62 : 12;
+ uint64_t ld_cmd : 2; /**< [ 50: 49](R/W) The load command size.
+ 0x3 = Load 8 bytes.
+ 0x2 = Load 4 bytes.
+ 0x1 = Load 2 bytes.
+ 0x0 = Load 1 bytes. */
+ uint64_t rd_addr : 49; /**< [ 48: 0](R/W) The IOVA sent to the NCB for this load request. */
+#else /* Word 0 - Little Endian */
+ uint64_t rd_addr : 49; /**< [ 48: 0](R/W) The IOVA sent to the NCB for this load request. */
+ uint64_t ld_cmd : 2; /**< [ 50: 49](R/W) The load command size.
+ 0x3 = Load 8 bytes.
+ 0x2 = Load 4 bytes.
+ 0x1 = Load 2 bytes.
+ 0x0 = Load 1 bytes. */
+ uint64_t reserved_51_62 : 12;
+ uint64_t secen : 1; /**< [ 63: 63](R/W) This request is a secure-world transaction. This is intended to be set only for
+ transactions during early boot when the SMMU is in bypass mode; after SMMU
+ initialization SMMU()_SSDR() may be used to control which SLI streams are secure.
+
+ If SLI()_SCTL[SCEN] = 0, this bit is ignored and transactions are always nonsecure
+ onto the NCB, though the SMMU may later promote them to secure. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_win_rd_addrx_s cn; */
+};
+typedef union bdk_slix_win_rd_addrx bdk_slix_win_rd_addrx_t;
+
+static inline uint64_t BDK_SLIX_WIN_RD_ADDRX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_WIN_RD_ADDRX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x2c010ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_WIN_RD_ADDRX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_WIN_RD_ADDRX(a,b) bdk_slix_win_rd_addrx_t
+#define bustype_BDK_SLIX_WIN_RD_ADDRX(a,b) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_WIN_RD_ADDRX(a,b) "SLIX_WIN_RD_ADDRX"
+#define busnum_BDK_SLIX_WIN_RD_ADDRX(a,b) (a)
+#define arguments_BDK_SLIX_WIN_RD_ADDRX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP) sli#_win_rd_data
+ *
+ * SLI Window Read Data Register
+ * This register contains the address to be read when SLI()_WIN_RD_DATA is read.
+ */
+union bdk_slix_win_rd_data
+{
+ uint64_t u;
+ struct bdk_slix_win_rd_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rd_data : 64; /**< [ 63: 0](RO/H) The read data. */
+#else /* Word 0 - Little Endian */
+ uint64_t rd_data : 64; /**< [ 63: 0](RO/H) The read data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_win_rd_data_s cn; */
+};
+typedef union bdk_slix_win_rd_data bdk_slix_win_rd_data_t;
+
+static inline uint64_t BDK_SLIX_WIN_RD_DATA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_WIN_RD_DATA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x40ll + 0x10000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x40ll + 0x10000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_WIN_RD_DATA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_WIN_RD_DATA(a) bdk_slix_win_rd_data_t
+#define bustype_BDK_SLIX_WIN_RD_DATA(a) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_WIN_RD_DATA(a) "SLIX_WIN_RD_DATA"
+#define busnum_BDK_SLIX_WIN_RD_DATA(a) (a)
+#define arguments_BDK_SLIX_WIN_RD_DATA(a) (a),-1,-1,-1
+
+/**
+ * Register (PEXP) sli#_win_rd_data#
+ *
+ * SLI Window Read Data Register
+ * This register contains the address to be read when SLI()_WIN_RD_DATA() is read.
+ */
+union bdk_slix_win_rd_datax
+{
+ uint64_t u;
+ struct bdk_slix_win_rd_datax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t rd_data : 64; /**< [ 63: 0](RO/H) The read data. */
+#else /* Word 0 - Little Endian */
+ uint64_t rd_data : 64; /**< [ 63: 0](RO/H) The read data. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_win_rd_datax_s cn; */
+};
+typedef union bdk_slix_win_rd_datax bdk_slix_win_rd_datax_t;
+
+static inline uint64_t BDK_SLIX_WIN_RD_DATAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_WIN_RD_DATAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x2c040ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_WIN_RD_DATAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_WIN_RD_DATAX(a,b) bdk_slix_win_rd_datax_t
+#define bustype_BDK_SLIX_WIN_RD_DATAX(a,b) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_WIN_RD_DATAX(a,b) "SLIX_WIN_RD_DATAX"
+#define busnum_BDK_SLIX_WIN_RD_DATAX(a,b) (a)
+#define arguments_BDK_SLIX_WIN_RD_DATAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP) sli#_win_wr_addr
+ *
+ * SLI Window Write Address Register
+ * Contains the address to be writen to when a write operation is started by writing
+ * SLI()_WIN_WR_DATA.
+ * This register should not be used to write SLI_* registers.
+ */
+union bdk_slix_win_wr_addr
+{
+ uint64_t u;
+ struct bdk_slix_win_wr_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t secen : 1; /**< [ 63: 63](R/W) This request is a secure-world transaction. This is intended to be set only for
+ transactions during early boot when the SMMU is in bypass mode; after SMMU
+ initialization SMMU()_SDDR() may be used to control which SLI streams are secure.
+
+ If SLI()_SCTL[SECEN] = 0, this bit is ignored and transactions are always nonsecure
+ onto the NCB, though the SMMU may later promote them to secure. */
+ uint64_t reserved_49_62 : 14;
+ uint64_t wr_addr : 46; /**< [ 48: 3](R/W) The IOVA sent to the NCB for this store request. */
+ uint64_t reserved_0_2 : 3;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_2 : 3;
+ uint64_t wr_addr : 46; /**< [ 48: 3](R/W) The IOVA sent to the NCB for this store request. */
+ uint64_t reserved_49_62 : 14;
+ uint64_t secen : 1; /**< [ 63: 63](R/W) This request is a secure-world transaction. This is intended to be set only for
+ transactions during early boot when the SMMU is in bypass mode; after SMMU
+ initialization SMMU()_SDDR() may be used to control which SLI streams are secure.
+
+ If SLI()_SCTL[SECEN] = 0, this bit is ignored and transactions are always nonsecure
+ onto the NCB, though the SMMU may later promote them to secure. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_slix_win_wr_addr_cn88xxp1
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t wr_addr : 46; /**< [ 48: 3](R/W) The IOVA sent to the NCB for this store request. */
+ uint64_t reserved_0_2 : 3;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_2 : 3;
+ uint64_t wr_addr : 46; /**< [ 48: 3](R/W) The IOVA sent to the NCB for this store request. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn88xxp1;
+ /* struct bdk_slix_win_wr_addr_s cn81xx; */
+ /* struct bdk_slix_win_wr_addr_s cn88xxp2; */
+};
+typedef union bdk_slix_win_wr_addr bdk_slix_win_wr_addr_t;
+
+static inline uint64_t BDK_SLIX_WIN_WR_ADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_WIN_WR_ADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0ll + 0x10000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0ll + 0x10000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_WIN_WR_ADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_WIN_WR_ADDR(a) bdk_slix_win_wr_addr_t
+#define bustype_BDK_SLIX_WIN_WR_ADDR(a) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_WIN_WR_ADDR(a) "SLIX_WIN_WR_ADDR"
+#define busnum_BDK_SLIX_WIN_WR_ADDR(a) (a)
+#define arguments_BDK_SLIX_WIN_WR_ADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (PEXP) sli#_win_wr_addr#
+ *
+ * SLI Window Write Address Register
+ * Contains the address to be written to when a write operation is started by writing
+ * SLI()_WIN_WR_DATA().
+ * This register should not be used to write SLI_* registers.
+ */
+union bdk_slix_win_wr_addrx
+{
+ uint64_t u;
+ struct bdk_slix_win_wr_addrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t secen : 1; /**< [ 63: 63](R/W) This request is a secure-world transaction. This is intended to be set only for
+ transactions during early boot when the SMMU is in bypass mode; after SMMU
+ initialization SMMU()_SSDR() may be used to control which SLI streams are secure.
+
+ If SLI()_SCTL[SCEN] = 0, this bit is ignored and transactions are always nonsecure
+ onto the NCB, though the SMMU may later promote them to secure. */
+ uint64_t reserved_49_62 : 14;
+ uint64_t wr_addr : 46; /**< [ 48: 3](R/W) The IOVA sent to the NCB for this store request. */
+ uint64_t reserved_0_2 : 3;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_2 : 3;
+ uint64_t wr_addr : 46; /**< [ 48: 3](R/W) The IOVA sent to the NCB for this store request. */
+ uint64_t reserved_49_62 : 14;
+ uint64_t secen : 1; /**< [ 63: 63](R/W) This request is a secure-world transaction. This is intended to be set only for
+ transactions during early boot when the SMMU is in bypass mode; after SMMU
+ initialization SMMU()_SSDR() may be used to control which SLI streams are secure.
+
+ If SLI()_SCTL[SCEN] = 0, this bit is ignored and transactions are always nonsecure
+ onto the NCB, though the SMMU may later promote them to secure. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_win_wr_addrx_s cn; */
+};
+typedef union bdk_slix_win_wr_addrx bdk_slix_win_wr_addrx_t;
+
+static inline uint64_t BDK_SLIX_WIN_WR_ADDRX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_WIN_WR_ADDRX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x2c000ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_WIN_WR_ADDRX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_WIN_WR_ADDRX(a,b) bdk_slix_win_wr_addrx_t
+#define bustype_BDK_SLIX_WIN_WR_ADDRX(a,b) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_WIN_WR_ADDRX(a,b) "SLIX_WIN_WR_ADDRX"
+#define busnum_BDK_SLIX_WIN_WR_ADDRX(a,b) (a)
+#define arguments_BDK_SLIX_WIN_WR_ADDRX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP) sli#_win_wr_data
+ *
+ * SLI Window Write Data Register
+ * This register contains the data to write to the address located in SLI()_WIN_WR_ADDR.
+ * Writing this register causes a write operation to take place.
+ */
+union bdk_slix_win_wr_data
+{
+ uint64_t u;
+ struct bdk_slix_win_wr_data_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wr_data : 64; /**< [ 63: 0](R/W) The data to be written. */
+#else /* Word 0 - Little Endian */
+ uint64_t wr_data : 64; /**< [ 63: 0](R/W) The data to be written. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_win_wr_data_s cn; */
+};
+typedef union bdk_slix_win_wr_data bdk_slix_win_wr_data_t;
+
+static inline uint64_t BDK_SLIX_WIN_WR_DATA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_WIN_WR_DATA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x20ll + 0x10000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x20ll + 0x10000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_WIN_WR_DATA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_WIN_WR_DATA(a) bdk_slix_win_wr_data_t
+#define bustype_BDK_SLIX_WIN_WR_DATA(a) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_WIN_WR_DATA(a) "SLIX_WIN_WR_DATA"
+#define busnum_BDK_SLIX_WIN_WR_DATA(a) (a)
+#define arguments_BDK_SLIX_WIN_WR_DATA(a) (a),-1,-1,-1
+
+/**
+ * Register (PEXP) sli#_win_wr_data#
+ *
+ * SLI Window Write Data Register
+ * This register contains the data to write to the address located in SLI()_WIN_WR_ADDR().
+ * Writing this register causes a write operation to take place.
+ */
+union bdk_slix_win_wr_datax
+{
+ uint64_t u;
+ struct bdk_slix_win_wr_datax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t wr_data : 64; /**< [ 63: 0](R/W) The data to be written. */
+#else /* Word 0 - Little Endian */
+ uint64_t wr_data : 64; /**< [ 63: 0](R/W) The data to be written. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_win_wr_datax_s cn; */
+};
+typedef union bdk_slix_win_wr_datax bdk_slix_win_wr_datax_t;
+
+static inline uint64_t BDK_SLIX_WIN_WR_DATAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_WIN_WR_DATAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x2c020ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_WIN_WR_DATAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_WIN_WR_DATAX(a,b) bdk_slix_win_wr_datax_t
+#define bustype_BDK_SLIX_WIN_WR_DATAX(a,b) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_WIN_WR_DATAX(a,b) "SLIX_WIN_WR_DATAX"
+#define busnum_BDK_SLIX_WIN_WR_DATAX(a,b) (a)
+#define arguments_BDK_SLIX_WIN_WR_DATAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (PEXP) sli#_win_wr_mask
+ *
+ * SLI Window Write Mask Register
+ * This register contains the mask for the data in SLI()_WIN_WR_DATA.
+ */
+union bdk_slix_win_wr_mask
+{
+ uint64_t u;
+ struct bdk_slix_win_wr_mask_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t wr_mask : 8; /**< [ 7: 0](R/W) The byte enables sent to the NCB for this store request. */
+#else /* Word 0 - Little Endian */
+ uint64_t wr_mask : 8; /**< [ 7: 0](R/W) The byte enables sent to the NCB for this store request. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_win_wr_mask_s cn; */
+};
+typedef union bdk_slix_win_wr_mask bdk_slix_win_wr_mask_t;
+
+static inline uint64_t BDK_SLIX_WIN_WR_MASK(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_WIN_WR_MASK(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a==0))
+ return 0x30ll + 0x10000000000ll * ((a) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x30ll + 0x10000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("SLIX_WIN_WR_MASK", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SLIX_WIN_WR_MASK(a) bdk_slix_win_wr_mask_t
+#define bustype_BDK_SLIX_WIN_WR_MASK(a) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_WIN_WR_MASK(a) "SLIX_WIN_WR_MASK"
+#define busnum_BDK_SLIX_WIN_WR_MASK(a) (a)
+#define arguments_BDK_SLIX_WIN_WR_MASK(a) (a),-1,-1,-1
+
+/**
+ * Register (PEXP) sli#_win_wr_mask#
+ *
+ * SLI Window Write Mask Register
+ * This register contains the mask for the data in SLI()_WIN_WR_DATA().
+ */
+union bdk_slix_win_wr_maskx
+{
+ uint64_t u;
+ struct bdk_slix_win_wr_maskx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t wr_mask : 8; /**< [ 7: 0](R/W) The byte enables sent to the NCB for this store request. */
+#else /* Word 0 - Little Endian */
+ uint64_t wr_mask : 8; /**< [ 7: 0](R/W) The byte enables sent to the NCB for this store request. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_slix_win_wr_maskx_s cn; */
+};
+typedef union bdk_slix_win_wr_maskx bdk_slix_win_wr_maskx_t;
+
+static inline uint64_t BDK_SLIX_WIN_WR_MASKX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SLIX_WIN_WR_MASKX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a==0) && (b<=3)))
+ return 0x2c030ll + 0x1000000000ll * ((a) & 0x0) + 0x800000ll * ((b) & 0x3);
+ __bdk_csr_fatal("SLIX_WIN_WR_MASKX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_SLIX_WIN_WR_MASKX(a,b) bdk_slix_win_wr_maskx_t
+#define bustype_BDK_SLIX_WIN_WR_MASKX(a,b) BDK_CSR_TYPE_PEXP
+#define basename_BDK_SLIX_WIN_WR_MASKX(a,b) "SLIX_WIN_WR_MASKX"
+#define busnum_BDK_SLIX_WIN_WR_MASKX(a,b) (a)
+#define arguments_BDK_SLIX_WIN_WR_MASKX(a,b) (a),(b),-1,-1
+
+#endif /* __BDK_CSRS_SLI_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-smi.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-smi.h
new file mode 100644
index 0000000000..a77a2a7de6
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-smi.h
@@ -0,0 +1,551 @@
+#ifndef __BDK_CSRS_SMI_H__
+#define __BDK_CSRS_SMI_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium SMI.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration smi_bar_e
+ *
+ * SMI Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_SMI_BAR_E_SMI_PF_BAR0_CN8 (0x87e005000000ll)
+#define BDK_SMI_BAR_E_SMI_PF_BAR0_CN8_SIZE 0x800000ull
+#define BDK_SMI_BAR_E_SMI_PF_BAR0_CN9 (0x87e005000000ll)
+#define BDK_SMI_BAR_E_SMI_PF_BAR0_CN9_SIZE 0x100000ull
+
+/**
+ * Register (RSL) smi_#_clk
+ *
+ * SMI Clock Control Register
+ * This register determines the SMI timing characteristics.
+ * If software wants to change SMI CLK timing parameters ([SAMPLE]/[SAMPLE_HI]), software
+ * must delay the SMI_()_CLK CSR write by at least 512 coprocessor-clock cycles after the
+ * previous SMI operation is finished.
+ */
+union bdk_smi_x_clk
+{
+ uint64_t u;
+ struct bdk_smi_x_clk_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_25_63 : 39;
+ uint64_t mode : 1; /**< [ 24: 24](R/W) IEEE operating mode; 0 = Clause 22 compliant, 1 = Clause 45 compliant. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sample_hi : 5; /**< [ 20: 16](R/W) Sample (extended bits). Specifies in coprocessor clock cycles when to sample read data. */
+ uint64_t sample_mode : 1; /**< [ 15: 15](R/W) Read data sampling mode.
+ According to the 802.3 specification, on read operations, the STA transitions SMIn_MDC and
+ the PHY drives SMIn_MDIO with some delay relative to that edge. This is Edge1.
+ The STA then samples SMIn_MDIO on the next rising edge of SMIn_MDC. This is Edge2. The
+ read data can be sampled relative to either edge.
+ 0 = Sample time is relative to Edge2.
+ 1 = Sample time is relative to Edge1. */
+ uint64_t reserved_14 : 1;
+ uint64_t clk_idle : 1; /**< [ 13: 13](R/W) SMIn_MDC toggle. When set, this bit causes SMIn_MDC not to toggle on idle cycles. */
+ uint64_t preamble : 1; /**< [ 12: 12](R/W) Preamble. When this bit is set, the 32-bit preamble is sent first on SMI transactions.
+ This field must be set to 1 when [MODE] = 1 in order for the receiving PHY to correctly
+ frame the transaction. */
+ uint64_t sample : 4; /**< [ 11: 8](R/W) Sample read data. Specifies the number of coprocessor clock cycles after the rising edge
+ of SMIn_MDC to wait before sampling read data.
+
+ _ ([SAMPLE_HI],[SAMPLE]) \> 1
+
+ _ ([SAMPLE_HI],[SAMPLE]) + 3 \<= 2 * [PHASE] */
+ uint64_t phase : 8; /**< [ 7: 0](R/W) MDC clock phase. Specifies the number of coprocessor clock cycles that make up an SMIn_MDC
+ phase.
+
+ _ [PHASE] \> 2 */
+#else /* Word 0 - Little Endian */
+ uint64_t phase : 8; /**< [ 7: 0](R/W) MDC clock phase. Specifies the number of coprocessor clock cycles that make up an SMIn_MDC
+ phase.
+
+ _ [PHASE] \> 2 */
+ uint64_t sample : 4; /**< [ 11: 8](R/W) Sample read data. Specifies the number of coprocessor clock cycles after the rising edge
+ of SMIn_MDC to wait before sampling read data.
+
+ _ ([SAMPLE_HI],[SAMPLE]) \> 1
+
+ _ ([SAMPLE_HI],[SAMPLE]) + 3 \<= 2 * [PHASE] */
+ uint64_t preamble : 1; /**< [ 12: 12](R/W) Preamble. When this bit is set, the 32-bit preamble is sent first on SMI transactions.
+ This field must be set to 1 when [MODE] = 1 in order for the receiving PHY to correctly
+ frame the transaction. */
+ uint64_t clk_idle : 1; /**< [ 13: 13](R/W) SMIn_MDC toggle. When set, this bit causes SMIn_MDC not to toggle on idle cycles. */
+ uint64_t reserved_14 : 1;
+ uint64_t sample_mode : 1; /**< [ 15: 15](R/W) Read data sampling mode.
+ According to the 802.3 specification, on read operations, the STA transitions SMIn_MDC and
+ the PHY drives SMIn_MDIO with some delay relative to that edge. This is Edge1.
+ The STA then samples SMIn_MDIO on the next rising edge of SMIn_MDC. This is Edge2. The
+ read data can be sampled relative to either edge.
+ 0 = Sample time is relative to Edge2.
+ 1 = Sample time is relative to Edge1. */
+ uint64_t sample_hi : 5; /**< [ 20: 16](R/W) Sample (extended bits). Specifies in coprocessor clock cycles when to sample read data. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t mode : 1; /**< [ 24: 24](R/W) IEEE operating mode; 0 = Clause 22 compliant, 1 = Clause 45 compliant. */
+ uint64_t reserved_25_63 : 39;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_smi_x_clk_s cn8; */
+ struct bdk_smi_x_clk_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_25_63 : 39;
+ uint64_t mode : 1; /**< [ 24: 24](R/W) IEEE operating mode; 0 = Clause 22 compliant, 1 = Clause 45 compliant. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t sample_hi : 5; /**< [ 20: 16](R/W) Sample (extended bits). Specifies in coprocessor clock cycles when to sample read data. */
+ uint64_t sample_mode : 1; /**< [ 15: 15](R/W) Read data sampling mode.
+ According to the 802.3 specification, on read operations, the STA transitions SMIn_MDC and
+ the PHY drives SMIn_MDIO with some delay relative to that edge. This is Edge1.
+ The STA then samples SMIn_MDIO on the next rising edge of SMIn_MDC. This is Edge2. The
+ read data can be sampled relative to either edge.
+ 0 = Sample time is relative to Edge2.
+ 1 = Sample time is relative to Edge1. */
+ uint64_t reserved_14 : 1;
+ uint64_t clk_idle : 1; /**< [ 13: 13](R/W) SMIn_MDC toggle. When set, this bit causes SMIn_MDC not to toggle on idle cycles. */
+ uint64_t preamble : 1; /**< [ 12: 12](R/W) Preamble. When this bit is set, the 32-bit preamble is sent first on SMI transactions.
+ This field must be set to 1 when [MODE] = 1 in order for the receiving PHY to correctly
+ frame the transaction. */
+ uint64_t sample : 4; /**< [ 11: 8](R/W) Sample read data. Specifies the number of coprocessor clock cycles after the rising edge
+ of SMIn_MDC to wait before sampling read data.
+
+ _ ([SAMPLE_HI],[SAMPLE]) \> 1
+
+ _ ([SAMPLE_HI],[SAMPLE]) + 3 \<= 2 * [PHASE] */
+ uint64_t phase : 8; /**< [ 7: 0](R/W) MDC clock phase. Specifies the number of coprocessor clock cycles that make up an SMIn_MDC
+ phase.
+
+ _ [PHASE] \> 2
+
+ Internal:
+ FIXME number of 100MHz clocks or coproc clocks based on CSR that defaults to 100MHz. */
+#else /* Word 0 - Little Endian */
+ uint64_t phase : 8; /**< [ 7: 0](R/W) MDC clock phase. Specifies the number of coprocessor clock cycles that make up an SMIn_MDC
+ phase.
+
+ _ [PHASE] \> 2
+
+ Internal:
+ FIXME number of 100MHz clocks or coproc clocks based on CSR that defaults to 100MHz. */
+ uint64_t sample : 4; /**< [ 11: 8](R/W) Sample read data. Specifies the number of coprocessor clock cycles after the rising edge
+ of SMIn_MDC to wait before sampling read data.
+
+ _ ([SAMPLE_HI],[SAMPLE]) \> 1
+
+ _ ([SAMPLE_HI],[SAMPLE]) + 3 \<= 2 * [PHASE] */
+ uint64_t preamble : 1; /**< [ 12: 12](R/W) Preamble. When this bit is set, the 32-bit preamble is sent first on SMI transactions.
+ This field must be set to 1 when [MODE] = 1 in order for the receiving PHY to correctly
+ frame the transaction. */
+ uint64_t clk_idle : 1; /**< [ 13: 13](R/W) SMIn_MDC toggle. When set, this bit causes SMIn_MDC not to toggle on idle cycles. */
+ uint64_t reserved_14 : 1;
+ uint64_t sample_mode : 1; /**< [ 15: 15](R/W) Read data sampling mode.
+ According to the 802.3 specification, on read operations, the STA transitions SMIn_MDC and
+ the PHY drives SMIn_MDIO with some delay relative to that edge. This is Edge1.
+ The STA then samples SMIn_MDIO on the next rising edge of SMIn_MDC. This is Edge2. The
+ read data can be sampled relative to either edge.
+ 0 = Sample time is relative to Edge2.
+ 1 = Sample time is relative to Edge1. */
+ uint64_t sample_hi : 5; /**< [ 20: 16](R/W) Sample (extended bits). Specifies in coprocessor clock cycles when to sample read data. */
+ uint64_t reserved_21_23 : 3;
+ uint64_t mode : 1; /**< [ 24: 24](R/W) IEEE operating mode; 0 = Clause 22 compliant, 1 = Clause 45 compliant. */
+ uint64_t reserved_25_63 : 39;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_smi_x_clk bdk_smi_x_clk_t;
+
+static inline uint64_t BDK_SMI_X_CLK(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SMI_X_CLK(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e005003818ll + 0x80ll * ((a) & 0x1);
+ __bdk_csr_fatal("SMI_X_CLK", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SMI_X_CLK(a) bdk_smi_x_clk_t
+#define bustype_BDK_SMI_X_CLK(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_SMI_X_CLK(a) "SMI_X_CLK"
+#define device_bar_BDK_SMI_X_CLK(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SMI_X_CLK(a) (a)
+#define arguments_BDK_SMI_X_CLK(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) smi_#_clken
+ *
+ * SMI Clock Enable Register
+ * This register is to force conditional clock enable.
+ */
+union bdk_smi_x_clken
+{
+ uint64_t u;
+ struct bdk_smi_x_clken_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t clken : 1; /**< [ 0: 0](R/W) Force the conditional clocking within SMI to be always on. For diagnostic use only. */
+#else /* Word 0 - Little Endian */
+ uint64_t clken : 1; /**< [ 0: 0](R/W) Force the conditional clocking within SMI to be always on. For diagnostic use only. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_smi_x_clken_s cn; */
+};
+typedef union bdk_smi_x_clken bdk_smi_x_clken_t;
+
+static inline uint64_t BDK_SMI_X_CLKEN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SMI_X_CLKEN(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x87e005003830ll + 0x80ll * ((a) & 0x1);
+ __bdk_csr_fatal("SMI_X_CLKEN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SMI_X_CLKEN(a) bdk_smi_x_clken_t
+#define bustype_BDK_SMI_X_CLKEN(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_SMI_X_CLKEN(a) "SMI_X_CLKEN"
+#define device_bar_BDK_SMI_X_CLKEN(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SMI_X_CLKEN(a) (a)
+#define arguments_BDK_SMI_X_CLKEN(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) smi_#_cmd
+ *
+ * SMI Command Control Register
+ * This register forces a read or write command to the PHY. Write operations to this register
+ * create SMI transactions. Software will poll (depending on the transaction type).
+ */
+union bdk_smi_x_cmd
+{
+ uint64_t u;
+ struct bdk_smi_x_cmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t phy_op : 2; /**< [ 17: 16](R/W) PHY opcode, depending on SMI_()_CLK[MODE] setting.
+ * If SMI_()_CLK[MODE] = 0 (\<=1Gbs / Clause 22):
+ 0 = Write operation, encoded in the frame as 01.
+ 1 = Read operation, encoded in the frame as 10.
+
+ * If SMI_()_CLK[MODE] = 1 (\>1Gbs / Clause 45):
+ 0x0 = Address.
+ 0x1 = Write.
+ 0x2 = Post-read-increment-address.
+ 0x3 = Read. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t phy_adr : 5; /**< [ 12: 8](R/W) PHY address. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t reg_adr : 5; /**< [ 4: 0](R/W) PHY register offset. */
+#else /* Word 0 - Little Endian */
+ uint64_t reg_adr : 5; /**< [ 4: 0](R/W) PHY register offset. */
+ uint64_t reserved_5_7 : 3;
+ uint64_t phy_adr : 5; /**< [ 12: 8](R/W) PHY address. */
+ uint64_t reserved_13_15 : 3;
+ uint64_t phy_op : 2; /**< [ 17: 16](R/W) PHY opcode, depending on SMI_()_CLK[MODE] setting.
+ * If SMI_()_CLK[MODE] = 0 (\<=1Gbs / Clause 22):
+ 0 = Write operation, encoded in the frame as 01.
+ 1 = Read operation, encoded in the frame as 10.
+
+ * If SMI_()_CLK[MODE] = 1 (\>1Gbs / Clause 45):
+ 0x0 = Address.
+ 0x1 = Write.
+ 0x2 = Post-read-increment-address.
+ 0x3 = Read. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_smi_x_cmd_s cn; */
+};
+typedef union bdk_smi_x_cmd bdk_smi_x_cmd_t;
+
+static inline uint64_t BDK_SMI_X_CMD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SMI_X_CMD(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e005003800ll + 0x80ll * ((a) & 0x1);
+ __bdk_csr_fatal("SMI_X_CMD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SMI_X_CMD(a) bdk_smi_x_cmd_t
+#define bustype_BDK_SMI_X_CMD(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_SMI_X_CMD(a) "SMI_X_CMD"
+#define device_bar_BDK_SMI_X_CMD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SMI_X_CMD(a) (a)
+#define arguments_BDK_SMI_X_CMD(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) smi_#_en
+ *
+ * SMI Enable Register
+ * Enables the SMI interface.
+ */
+union bdk_smi_x_en
+{
+ uint64_t u;
+ struct bdk_smi_x_en_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t en : 1; /**< [ 0: 0](R/W) SMI/MDIO interface enable:
+ 1 = Enable interface.
+ 0 = Disable interface: no transactions, no SMIn_MDC transitions. */
+#else /* Word 0 - Little Endian */
+ uint64_t en : 1; /**< [ 0: 0](R/W) SMI/MDIO interface enable:
+ 1 = Enable interface.
+ 0 = Disable interface: no transactions, no SMIn_MDC transitions. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_smi_x_en_s cn; */
+};
+typedef union bdk_smi_x_en bdk_smi_x_en_t;
+
+static inline uint64_t BDK_SMI_X_EN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SMI_X_EN(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e005003820ll + 0x80ll * ((a) & 0x1);
+ __bdk_csr_fatal("SMI_X_EN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SMI_X_EN(a) bdk_smi_x_en_t
+#define bustype_BDK_SMI_X_EN(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_SMI_X_EN(a) "SMI_X_EN"
+#define device_bar_BDK_SMI_X_EN(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SMI_X_EN(a) (a)
+#define arguments_BDK_SMI_X_EN(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) smi_#_rd_dat
+ *
+ * SMI Read Data Register
+ * This register contains the data in a read operation.
+ */
+union bdk_smi_x_rd_dat
+{
+ uint64_t u;
+ struct bdk_smi_x_rd_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t pending : 1; /**< [ 17: 17](RO/H) Read transaction pending. Indicates that an SMI read transaction is in flight. */
+ uint64_t val : 1; /**< [ 16: 16](RO/H) Read data valid. Asserts when the read transaction completes. A read to this register clears [VAL]. */
+ uint64_t dat : 16; /**< [ 15: 0](RO/H) Read data. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 16; /**< [ 15: 0](RO/H) Read data. */
+ uint64_t val : 1; /**< [ 16: 16](RO/H) Read data valid. Asserts when the read transaction completes. A read to this register clears [VAL]. */
+ uint64_t pending : 1; /**< [ 17: 17](RO/H) Read transaction pending. Indicates that an SMI read transaction is in flight. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_smi_x_rd_dat_s cn; */
+};
+typedef union bdk_smi_x_rd_dat bdk_smi_x_rd_dat_t;
+
+static inline uint64_t BDK_SMI_X_RD_DAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SMI_X_RD_DAT(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e005003810ll + 0x80ll * ((a) & 0x1);
+ __bdk_csr_fatal("SMI_X_RD_DAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SMI_X_RD_DAT(a) bdk_smi_x_rd_dat_t
+#define bustype_BDK_SMI_X_RD_DAT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_SMI_X_RD_DAT(a) "SMI_X_RD_DAT"
+#define device_bar_BDK_SMI_X_RD_DAT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SMI_X_RD_DAT(a) (a)
+#define arguments_BDK_SMI_X_RD_DAT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) smi_#_wr_dat
+ *
+ * SMI Write Data Register
+ * This register provides the data for a write operation.
+ */
+union bdk_smi_x_wr_dat
+{
+ uint64_t u;
+ struct bdk_smi_x_wr_dat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_18_63 : 46;
+ uint64_t pending : 1; /**< [ 17: 17](RO/H) Write transaction pending. Indicates that an SMI write transaction is in flight. */
+ uint64_t val : 1; /**< [ 16: 16](RO/H) Write data valid. Asserts when the write transaction completes. A read to this
+ register clears [VAL]. */
+ uint64_t dat : 16; /**< [ 15: 0](R/W/H) Write data. */
+#else /* Word 0 - Little Endian */
+ uint64_t dat : 16; /**< [ 15: 0](R/W/H) Write data. */
+ uint64_t val : 1; /**< [ 16: 16](RO/H) Write data valid. Asserts when the write transaction completes. A read to this
+ register clears [VAL]. */
+ uint64_t pending : 1; /**< [ 17: 17](RO/H) Write transaction pending. Indicates that an SMI write transaction is in flight. */
+ uint64_t reserved_18_63 : 46;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_smi_x_wr_dat_s cn; */
+};
+typedef union bdk_smi_x_wr_dat bdk_smi_x_wr_dat_t;
+
+static inline uint64_t BDK_SMI_X_WR_DAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SMI_X_WR_DAT(unsigned long a)
+{
+ if (a<=1)
+ return 0x87e005003808ll + 0x80ll * ((a) & 0x1);
+ __bdk_csr_fatal("SMI_X_WR_DAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_SMI_X_WR_DAT(a) bdk_smi_x_wr_dat_t
+#define bustype_BDK_SMI_X_WR_DAT(a) BDK_CSR_TYPE_RSL
+#define basename_BDK_SMI_X_WR_DAT(a) "SMI_X_WR_DAT"
+#define device_bar_BDK_SMI_X_WR_DAT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_SMI_X_WR_DAT(a) (a)
+#define arguments_BDK_SMI_X_WR_DAT(a) (a),-1,-1,-1
+
+/**
+ * Register (RSL) smi_drv_ctl
+ *
+ * SMI Drive Strength Control Register
+ * Enables the SMI interface.
+ */
+union bdk_smi_drv_ctl
+{
+ uint64_t u;
+ struct bdk_smi_drv_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t pctl : 3; /**< [ 10: 8](R/W) PCTL drive strength control bits. Suggested values:
+ 0x4 = 60 ohm.
+ 0x6 = 40 ohm.
+ 0x7 = 30 ohm. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t nctl : 3; /**< [ 2: 0](R/W) NCTL drive strength control bits. Suggested values:
+ 0x4 = 60 ohm.
+ 0x6 = 40 ohm.
+ 0x7 = 30 ohm. */
+#else /* Word 0 - Little Endian */
+ uint64_t nctl : 3; /**< [ 2: 0](R/W) NCTL drive strength control bits. Suggested values:
+ 0x4 = 60 ohm.
+ 0x6 = 40 ohm.
+ 0x7 = 30 ohm. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t pctl : 3; /**< [ 10: 8](R/W) PCTL drive strength control bits. Suggested values:
+ 0x4 = 60 ohm.
+ 0x6 = 40 ohm.
+ 0x7 = 30 ohm. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_smi_drv_ctl_s cn; */
+};
+typedef union bdk_smi_drv_ctl bdk_smi_drv_ctl_t;
+
+#define BDK_SMI_DRV_CTL BDK_SMI_DRV_CTL_FUNC()
+static inline uint64_t BDK_SMI_DRV_CTL_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SMI_DRV_CTL_FUNC(void)
+{
+ return 0x87e005003828ll;
+}
+
+#define typedef_BDK_SMI_DRV_CTL bdk_smi_drv_ctl_t
+#define bustype_BDK_SMI_DRV_CTL BDK_CSR_TYPE_RSL
+#define basename_BDK_SMI_DRV_CTL "SMI_DRV_CTL"
+#define device_bar_BDK_SMI_DRV_CTL 0x0 /* PF_BAR0 */
+#define busnum_BDK_SMI_DRV_CTL 0
+#define arguments_BDK_SMI_DRV_CTL -1,-1,-1,-1
+
+/**
+ * Register (RSL) smi_drv_rsvd
+ *
+ * INTERNAL: SMI Drive Reserve Register
+ *
+ * Enables the SMI1 interface.
+ */
+union bdk_smi_drv_rsvd
+{
+ uint64_t u;
+ struct bdk_smi_drv_rsvd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_11_63 : 53;
+ uint64_t pctl : 3; /**< [ 10: 8](R/W) Reserved. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t nctl : 3; /**< [ 2: 0](R/W) Reserved. */
+#else /* Word 0 - Little Endian */
+ uint64_t nctl : 3; /**< [ 2: 0](R/W) Reserved. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t pctl : 3; /**< [ 10: 8](R/W) Reserved. */
+ uint64_t reserved_11_63 : 53;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_smi_drv_rsvd_s cn; */
+};
+typedef union bdk_smi_drv_rsvd bdk_smi_drv_rsvd_t;
+
+#define BDK_SMI_DRV_RSVD BDK_SMI_DRV_RSVD_FUNC()
+static inline uint64_t BDK_SMI_DRV_RSVD_FUNC(void) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_SMI_DRV_RSVD_FUNC(void)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX))
+ return 0x87e0050038a8ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX))
+ return 0x87e0050038a8ll;
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX))
+ return 0x87e0050038a8ll;
+ __bdk_csr_fatal("SMI_DRV_RSVD", 0, 0, 0, 0, 0);
+}
+
+#define typedef_BDK_SMI_DRV_RSVD bdk_smi_drv_rsvd_t
+#define bustype_BDK_SMI_DRV_RSVD BDK_CSR_TYPE_RSL
+#define basename_BDK_SMI_DRV_RSVD "SMI_DRV_RSVD"
+#define device_bar_BDK_SMI_DRV_RSVD 0x0 /* PF_BAR0 */
+#define busnum_BDK_SMI_DRV_RSVD 0
+#define arguments_BDK_SMI_DRV_RSVD -1,-1,-1,-1
+
+#endif /* __BDK_CSRS_SMI_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-usbdrd.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-usbdrd.h
new file mode 100644
index 0000000000..fc28b58067
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-usbdrd.h
@@ -0,0 +1,14020 @@
+#ifndef __BDK_CSRS_USBDRD_H__
+#define __BDK_CSRS_USBDRD_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium USBDRD.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration uctl_dma_read_cmd_e
+ *
+ * USB UCTL DMA Read Command Enumeration
+ * Enumerate NCB inbound command selections for DMA read operations.
+ */
+#define BDK_UCTL_DMA_READ_CMD_E_LDI (0)
+#define BDK_UCTL_DMA_READ_CMD_E_LDT (1)
+#define BDK_UCTL_DMA_READ_CMD_E_LDY (2)
+
+/**
+ * Enumeration uctl_dma_write_cmd_e
+ *
+ * USB UCTL DMA Write Command Enumeration
+ * Enumerate NCB inbound command selections for DMA write operations.
+ */
+#define BDK_UCTL_DMA_WRITE_CMD_E_RSTP (1)
+#define BDK_UCTL_DMA_WRITE_CMD_E_STP (0)
+
+/**
+ * Enumeration uctl_endian_mode_e
+ *
+ * USB UCTL Endian-Mode Enumeration
+ * Enumerate endian mode selections.
+ */
+#define BDK_UCTL_ENDIAN_MODE_E_BIG (1)
+#define BDK_UCTL_ENDIAN_MODE_E_LITTLE (0)
+#define BDK_UCTL_ENDIAN_MODE_E_RSVD2 (2)
+#define BDK_UCTL_ENDIAN_MODE_E_RSVD3 (3)
+
+/**
+ * Enumeration uctl_xm_bad_dma_type_e
+ *
+ * USB UCTL XM Bad DMA Type Enumeration
+ * Enumerate type of DMA error seen.
+ */
+#define BDK_UCTL_XM_BAD_DMA_TYPE_E_ADDR_OOB (1)
+#define BDK_UCTL_XM_BAD_DMA_TYPE_E_LEN_GT_16 (2)
+#define BDK_UCTL_XM_BAD_DMA_TYPE_E_MULTIBEAT_BYTE (3)
+#define BDK_UCTL_XM_BAD_DMA_TYPE_E_MULTIBEAT_HALFWORD (4)
+#define BDK_UCTL_XM_BAD_DMA_TYPE_E_MULTIBEAT_QWORD (6)
+#define BDK_UCTL_XM_BAD_DMA_TYPE_E_MULTIBEAT_WORD (5)
+#define BDK_UCTL_XM_BAD_DMA_TYPE_E_NONE (0)
+
+/**
+ * Enumeration usbdrd_bar_e
+ *
+ * USB Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_USBDRD_BAR_E_USBDRDX_PF_BAR0(a) (0x868000000000ll + 0x1000000000ll * (a))
+#define BDK_USBDRD_BAR_E_USBDRDX_PF_BAR0_SIZE 0x200000ull
+#define BDK_USBDRD_BAR_E_USBDRDX_PF_BAR4(a) (0x868000200000ll + 0x1000000000ll * (a))
+#define BDK_USBDRD_BAR_E_USBDRDX_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration usbdrd_int_vec_e
+ *
+ * USB MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_USBDRD_INT_VEC_E_UAHC_IMAN_IP (0)
+#define BDK_USBDRD_INT_VEC_E_UAHC_USBSTS_HSE (2)
+#define BDK_USBDRD_INT_VEC_E_UAHC_USBSTS_HSE_CLEAR (3)
+#define BDK_USBDRD_INT_VEC_E_UCTL_INTSTAT (1)
+#define BDK_USBDRD_INT_VEC_E_UCTL_RAS (4)
+
+/**
+ * Enumeration usbdrd_uahc_dgcmd_cmdtype_e
+ *
+ * USB UAHC Device Generic Command Enumeration
+ * Commands for USBDRD()_UAHC_DGCMD[CMDTYPE].
+ * Any command encodings that are not present are considered Reserved.
+ *
+ * Internal:
+ * Synopsys DWC_usb3 Databook v3.10a, section 6.3.1.6.1 for details.
+ */
+#define BDK_USBDRD_UAHC_DGCMD_CMDTYPE_E_ALL_FIFO_FLUSH (0xa)
+#define BDK_USBDRD_UAHC_DGCMD_CMDTYPE_E_RUN_SOC_BUS_LOOPBACK_TEST (0x10)
+#define BDK_USBDRD_UAHC_DGCMD_CMDTYPE_E_SELECTED_FIFO_FLUSH (9)
+#define BDK_USBDRD_UAHC_DGCMD_CMDTYPE_E_SET_ENDPOINT_NRDY (0xc)
+#define BDK_USBDRD_UAHC_DGCMD_CMDTYPE_E_SET_PERIODIC_PARAMETERS (2)
+#define BDK_USBDRD_UAHC_DGCMD_CMDTYPE_E_SET_SCRATCHPAD_BUFFER_ARRAY_ADDR_H (5)
+#define BDK_USBDRD_UAHC_DGCMD_CMDTYPE_E_SET_SCRATCHPAD_BUFFER_ARRAY_ADDR_L (4)
+#define BDK_USBDRD_UAHC_DGCMD_CMDTYPE_E_TRANSMIT_DEVICE_NOTIFICATION (7)
+
+/**
+ * Register (NCB) usbdrd#_bp_test0
+ *
+ * INTERNAL: USB Backpressure Test Register
+ */
+union bdk_usbdrdx_bp_test0
+{
+ uint64_t u;
+ struct bdk_usbdrdx_bp_test0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Limit the NCBI posted FIFO, backpressure doing posted requests to NCB GNT.
+ \<62\> = Limit the NCBI nonposted FIFO, backpressure doing nonposted requests to NCB GNT.
+ \<61\> = Limit the NCBI completion FIFO, backpressure doing completion requests to NCB GNT.
+ \<60\> = Limit the NCBI CSR completion FIFO, backpressure doing requests for CSR responses
+ to NCB GNT. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+#else /* Word 0 - Little Endian */
+ uint64_t lfsr_freq : 12; /**< [ 11: 0](R/W) Test LFSR update frequency in coprocessor-clocks minus one. */
+ uint64_t reserved_12_15 : 4;
+ uint64_t bp_cfg : 8; /**< [ 23: 16](R/W) Backpressure weight. For diagnostic use only.
+ Internal:
+ There are 2 backpressure configuration bits per enable, with the two bits
+ defined as 0x0=100% of the time, 0x1=75% of the time, 0x2=50% of the time,
+ 0x3=25% of the time.
+ \<23:22\> = Config 3.
+ \<21:20\> = Config 2.
+ \<19:18\> = Config 1.
+ \<17:16\> = Config 0. */
+ uint64_t reserved_24_59 : 36;
+ uint64_t enable : 4; /**< [ 63: 60](R/W) Enable test mode. For diagnostic use only.
+ Internal:
+ Once a bit is set, random backpressure is generated
+ at the corresponding point to allow for more frequent backpressure.
+ \<63\> = Limit the NCBI posted FIFO, backpressure doing posted requests to NCB GNT.
+ \<62\> = Limit the NCBI nonposted FIFO, backpressure doing nonposted requests to NCB GNT.
+ \<61\> = Limit the NCBI completion FIFO, backpressure doing completion requests to NCB GNT.
+ \<60\> = Limit the NCBI CSR completion FIFO, backpressure doing requests for CSR responses
+ to NCB GNT. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_bp_test0_s cn; */
+};
+typedef union bdk_usbdrdx_bp_test0 bdk_usbdrdx_bp_test0_t;
+
+static inline uint64_t BDK_USBDRDX_BP_TEST0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_BP_TEST0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100070ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_BP_TEST0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_BP_TEST0(a) bdk_usbdrdx_bp_test0_t
+#define bustype_BDK_USBDRDX_BP_TEST0(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_BP_TEST0(a) "USBDRDX_BP_TEST0"
+#define device_bar_BDK_USBDRDX_BP_TEST0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_BP_TEST0(a) (a)
+#define arguments_BDK_USBDRDX_BP_TEST0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_const
+ *
+ * USB Constants Register
+ */
+union bdk_usbdrdx_const
+{
+ uint64_t u;
+ struct bdk_usbdrdx_const_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_0_63 : 64;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_63 : 64;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_const_s cn; */
+};
+typedef union bdk_usbdrdx_const bdk_usbdrdx_const_t;
+
+static inline uint64_t BDK_USBDRDX_CONST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_CONST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100078ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_CONST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_CONST(a) bdk_usbdrdx_const_t
+#define bustype_BDK_USBDRDX_CONST(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_CONST(a) "USBDRDX_CONST"
+#define device_bar_BDK_USBDRDX_CONST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_CONST(a) (a)
+#define arguments_BDK_USBDRDX_CONST(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_msix_pba#
+ *
+ * USB MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table, the bit number is indexed by the USBDRD_INT_VEC_E
+ * enumeration.
+ */
+union bdk_usbdrdx_msix_pbax
+{
+ uint64_t u;
+ struct bdk_usbdrdx_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated USBDRD()_MSIX_VEC()_CTL, enumerated by
+ USBDRD_INT_VEC_E.
+ Bits that have no associated USBDRD_INT_VEC_E are zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated USBDRD()_MSIX_VEC()_CTL, enumerated by
+ USBDRD_INT_VEC_E.
+ Bits that have no associated USBDRD_INT_VEC_E are zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_msix_pbax_s cn; */
+};
+typedef union bdk_usbdrdx_msix_pbax bdk_usbdrdx_msix_pbax_t;
+
+static inline uint64_t BDK_USBDRDX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x8680002f0000ll + 0x1000000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x8680002f0000ll + 0x1000000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x8680002f0000ll + 0x1000000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_MSIX_PBAX(a,b) bdk_usbdrdx_msix_pbax_t
+#define bustype_BDK_USBDRDX_MSIX_PBAX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_MSIX_PBAX(a,b) "USBDRDX_MSIX_PBAX"
+#define device_bar_BDK_USBDRDX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_USBDRDX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_USBDRDX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbdrd#_msix_vec#_addr
+ *
+ * USB MSI-X Vector Table Address Registers
+ * This register is the MSI-X vector table, indexed by the USBDRD_INT_VEC_E enumeration.
+ */
+union bdk_usbdrdx_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_usbdrdx_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_53_63 : 11;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) Address to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's USBDRD()_MSIX_VEC()_ADDR, USBDRD()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of USBDRD()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_USBDRD()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's USBDRD()_MSIX_VEC()_ADDR, USBDRD()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of USBDRD()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_USBDRD()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 51; /**< [ 52: 2](R/W) Address to use for MSI-X delivery of this vector. */
+ uint64_t reserved_53_63 : 11;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_msix_vecx_addr_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) Address to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's USBDRD()_MSIX_VEC()_ADDR, USBDRD()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of USBDRD()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_USBDRD()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's USBDRD()_MSIX_VEC()_ADDR, USBDRD()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of USBDRD()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_USBDRD()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) Address to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_usbdrdx_msix_vecx_addr_s cn9; */
+};
+typedef union bdk_usbdrdx_msix_vecx_addr bdk_usbdrdx_msix_vecx_addr_t;
+
+static inline uint64_t BDK_USBDRDX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x868000200000ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=3)))
+ return 0x868000200000ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=4)))
+ return 0x868000200000ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x7);
+ __bdk_csr_fatal("USBDRDX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_MSIX_VECX_ADDR(a,b) bdk_usbdrdx_msix_vecx_addr_t
+#define bustype_BDK_USBDRDX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_MSIX_VECX_ADDR(a,b) "USBDRDX_MSIX_VECX_ADDR"
+#define device_bar_BDK_USBDRDX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_USBDRDX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_USBDRDX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbdrd#_msix_vec#_ctl
+ *
+ * USB MSI-X Vector Table Control and Data Registers
+ * This register is the MSI-X vector table, indexed by the USBDRD_INT_VEC_E enumeration.
+ */
+union bdk_usbdrdx_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_usbdrdx_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 32; /**< [ 31: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_msix_vecx_ctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_usbdrdx_msix_vecx_ctl_s cn9; */
+};
+typedef union bdk_usbdrdx_msix_vecx_ctl bdk_usbdrdx_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_USBDRDX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x868000200008ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=3)))
+ return 0x868000200008ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=4)))
+ return 0x868000200008ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x7);
+ __bdk_csr_fatal("USBDRDX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_MSIX_VECX_CTL(a,b) bdk_usbdrdx_msix_vecx_ctl_t
+#define bustype_BDK_USBDRDX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_MSIX_VECX_CTL(a,b) "USBDRDX_MSIX_VECX_CTL"
+#define device_bar_BDK_USBDRDX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_USBDRDX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_USBDRDX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_caplength
+ *
+ * USB XHCI Capability Length Register
+ * This register is used as an offset to add to register base to find the beginning of the
+ * operational register
+ * space. For information on this register, refer to the xHCI Specification, v1.1, section 5.3.1.
+ */
+union bdk_usbdrdx_uahc_caplength
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_caplength_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t hciversion : 16; /**< [ 31: 16](RO) Host controller interface version number. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t caplength : 8; /**< [ 7: 0](RO) Capability registers length. */
+#else /* Word 0 - Little Endian */
+ uint32_t caplength : 8; /**< [ 7: 0](RO) Capability registers length. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t hciversion : 16; /**< [ 31: 16](RO) Host controller interface version number. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_caplength_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_caplength bdk_usbdrdx_uahc_caplength_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_CAPLENGTH(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_CAPLENGTH(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000000ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000000ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000000ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_CAPLENGTH", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_CAPLENGTH(a) bdk_usbdrdx_uahc_caplength_t
+#define bustype_BDK_USBDRDX_UAHC_CAPLENGTH(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_CAPLENGTH(a) "USBDRDX_UAHC_CAPLENGTH"
+#define device_bar_BDK_USBDRDX_UAHC_CAPLENGTH(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_CAPLENGTH(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_CAPLENGTH(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_config
+ *
+ * USB XHCI Configuration Register
+ * This register defines runtime xHC configuration parameters.
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.7.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_config
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_config_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t maxslotsen : 8; /**< [ 7: 0](R/W) Maximum device slots enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t maxslotsen : 8; /**< [ 7: 0](R/W) Maximum device slots enabled. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_config_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_config bdk_usbdrdx_uahc_config_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_CONFIG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_CONFIG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000058ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000058ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000058ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_CONFIG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_CONFIG(a) bdk_usbdrdx_uahc_config_t
+#define bustype_BDK_USBDRDX_UAHC_CONFIG(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_CONFIG(a) "USBDRDX_UAHC_CONFIG"
+#define device_bar_BDK_USBDRDX_UAHC_CONFIG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_CONFIG(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_CONFIG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uahc_crcr
+ *
+ * USB XHCI Command Ring Control Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.5.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_crcr
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uahc_crcr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cmd_ring_ptr : 58; /**< [ 63: 6](WO) Command ring pointer. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t crr : 1; /**< [ 3: 3](RO/H) Command ring running. */
+ uint64_t ca : 1; /**< [ 2: 2](WO) Command abort. */
+ uint64_t cs : 1; /**< [ 1: 1](WO) Command stop. */
+ uint64_t rcs : 1; /**< [ 0: 0](WO) Ring cycle state. */
+#else /* Word 0 - Little Endian */
+ uint64_t rcs : 1; /**< [ 0: 0](WO) Ring cycle state. */
+ uint64_t cs : 1; /**< [ 1: 1](WO) Command stop. */
+ uint64_t ca : 1; /**< [ 2: 2](WO) Command abort. */
+ uint64_t crr : 1; /**< [ 3: 3](RO/H) Command ring running. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t cmd_ring_ptr : 58; /**< [ 63: 6](WO) Command ring pointer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_crcr_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_crcr bdk_usbdrdx_uahc_crcr_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_CRCR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_CRCR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000038ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000038ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000038ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_CRCR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_CRCR(a) bdk_usbdrdx_uahc_crcr_t
+#define bustype_BDK_USBDRDX_UAHC_CRCR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UAHC_CRCR(a) "USBDRDX_UAHC_CRCR"
+#define device_bar_BDK_USBDRDX_UAHC_CRCR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_CRCR(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_CRCR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_dalepena
+ *
+ * USB Device Active USB Endpoint Enable Register
+ * This register indicates whether a USB endpoint is active in a given configuration or
+ * interface.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST] or
+ * USBDRD()_UAHC_GCTL[CORESOFTRESET] or
+ * USBDRD()_UAHC_USBCMD[HCRST] or USBDRD()_UAHC_USBCMD[LHCRST] or
+ * USBDRD()_UAHC_DCTL[CSFTRST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.7
+ */
+union bdk_usbdrdx_uahc_dalepena
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_dalepena_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t usbactep : 32; /**< [ 31: 0](R/W) This field indicates if a USB endpoint is active in the current configuration
+ and interface. It applies to USB IN endpoints 0-15 and OUT endpoints 0-15,
+ with one bit for each of the 32 possible endpoints. Even numbers are for
+ USB OUT endpoints, and odd numbers are for USB IN endpoints, as
+ follows:
+
+ \<0\> = USB EP0-OUT.
+ \<1\> = USB EP0-IN.
+ \<2\> = USB EP1-OUT.
+ \<3\> = USB EP1-IN.
+
+ The entity programming this register must set bits 0 and 1 because they
+ enable control endpoints that map to physical endpoints (resources) after
+ USBReset.
+
+ Application software clears these bits for all endpoints (other than EP0-OUT
+ and EP0-IN) after detecting a USB reset. After receiving SetConfiguration
+ and SetInterface requests, the application must program endpoint registers
+ accordingly and set these bits.
+
+ Internal:
+ For more information, see 'Flexible Endpoint Mapping' on Synopsys DWC_usb3
+ Databook v2.80a, page 82. */
+#else /* Word 0 - Little Endian */
+ uint32_t usbactep : 32; /**< [ 31: 0](R/W) This field indicates if a USB endpoint is active in the current configuration
+ and interface. It applies to USB IN endpoints 0-15 and OUT endpoints 0-15,
+ with one bit for each of the 32 possible endpoints. Even numbers are for
+ USB OUT endpoints, and odd numbers are for USB IN endpoints, as
+ follows:
+
+ \<0\> = USB EP0-OUT.
+ \<1\> = USB EP0-IN.
+ \<2\> = USB EP1-OUT.
+ \<3\> = USB EP1-IN.
+
+ The entity programming this register must set bits 0 and 1 because they
+ enable control endpoints that map to physical endpoints (resources) after
+ USBReset.
+
+ Application software clears these bits for all endpoints (other than EP0-OUT
+ and EP0-IN) after detecting a USB reset. After receiving SetConfiguration
+ and SetInterface requests, the application must program endpoint registers
+ accordingly and set these bits.
+
+ Internal:
+ For more information, see 'Flexible Endpoint Mapping' on Synopsys DWC_usb3
+ Databook v2.80a, page 82. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dalepena_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dalepena bdk_usbdrdx_uahc_dalepena_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DALEPENA(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DALEPENA(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c720ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c720ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c720ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_DALEPENA", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DALEPENA(a) bdk_usbdrdx_uahc_dalepena_t
+#define bustype_BDK_USBDRDX_UAHC_DALEPENA(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DALEPENA(a) "USBDRDX_UAHC_DALEPENA"
+#define device_bar_BDK_USBDRDX_UAHC_DALEPENA(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DALEPENA(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_DALEPENA(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_db#
+ *
+ * USB XHCI Doorbell Registers
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.6.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ *
+ * Internal:
+ * xHCI spec, page 32: there are USBDRD()_UAHC_HCSPARAMS1[MAXSLOTS]+1 doorbell
+ * registers.
+ */
+union bdk_usbdrdx_uahc_dbx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_dbx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dbstreamid : 16; /**< [ 31: 16](WO) Doorbell stream ID. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t dbtarget : 8; /**< [ 7: 0](WO) Doorbell target. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbtarget : 8; /**< [ 7: 0](WO) Doorbell target. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t dbstreamid : 16; /**< [ 31: 16](WO) Doorbell stream ID. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dbx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dbx bdk_usbdrdx_uahc_dbx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DBX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DBX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=64)))
+ return 0x868000000480ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=64)))
+ return 0x868000000480ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x7f);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=64)))
+ return 0x868000000480ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x7f);
+ __bdk_csr_fatal("USBDRDX_UAHC_DBX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DBX(a,b) bdk_usbdrdx_uahc_dbx_t
+#define bustype_BDK_USBDRDX_UAHC_DBX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DBX(a,b) "USBDRDX_UAHC_DBX"
+#define device_bar_BDK_USBDRDX_UAHC_DBX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DBX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_DBX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_dboff
+ *
+ * USB XHCI Doorbell Array Offset Register
+ * This register defines the offset of the doorbell array base address from the base. For
+ * information on this register, refer to the xHCI Specification, v1.1, section 5.3.7.
+ */
+union bdk_usbdrdx_uahc_dboff
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_dboff_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dboff : 30; /**< [ 31: 2](RO) Doorbell array offset. */
+ uint32_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_1 : 2;
+ uint32_t dboff : 30; /**< [ 31: 2](RO) Doorbell array offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dboff_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dboff bdk_usbdrdx_uahc_dboff_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DBOFF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DBOFF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000014ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000014ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000014ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_DBOFF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DBOFF(a) bdk_usbdrdx_uahc_dboff_t
+#define bustype_BDK_USBDRDX_UAHC_DBOFF(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DBOFF(a) "USBDRDX_UAHC_DBOFF"
+#define device_bar_BDK_USBDRDX_UAHC_DBOFF(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DBOFF(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_DBOFF(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uahc_dcbaap
+ *
+ * USB XHCI Device Context Base-Address-Array Pointer Register
+ * The device context base address array pointer register identifies the base address of the
+ * device
+ * context base address array.
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.6.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_dcbaap
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uahc_dcbaap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dcbaap : 58; /**< [ 63: 6](R/W) Device context base address array pointer. */
+ uint64_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_5 : 6;
+ uint64_t dcbaap : 58; /**< [ 63: 6](R/W) Device context base address array pointer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dcbaap_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dcbaap bdk_usbdrdx_uahc_dcbaap_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DCBAAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DCBAAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000050ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000050ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000050ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_DCBAAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DCBAAP(a) bdk_usbdrdx_uahc_dcbaap_t
+#define bustype_BDK_USBDRDX_UAHC_DCBAAP(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UAHC_DCBAAP(a) "USBDRDX_UAHC_DCBAAP"
+#define device_bar_BDK_USBDRDX_UAHC_DCBAAP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DCBAAP(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_DCBAAP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_dcfg
+ *
+ * USB Device Configuration Register
+ * This register configures the core in device mode after power-on or after certain control
+ * commands or enumeration. Do not make changes to this register after initial programming.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.1.1.
+ */
+union bdk_usbdrdx_uahc_dcfg
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_dcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t ignorestreampp : 1; /**< [ 23: 23](R/W) This bit only affects stream-capable bulk endpoints.
+ When this bit is set to 0x0 and the controller receives a data packet with the
+ packet pending (PP) bit set to 0 for OUT endpoints, or it receives an ACK
+ with the NumP field set to 0 and PP set to 0 for IN endpoints, the core
+ attempts to search for another stream (CStream) to initiate to the host.
+
+ However, there are two situations where this behavior is not optimal:
+
+ * When the host is setting PP=0 even though it has not finished the
+ stream, or.
+
+ * When the endpoint on the device is configured with one transfer
+ resource and therefore does not have any other streams to initiate to the
+ host.
+
+ When this bit is set to 0x1, the core ignores the packet pending bit for the
+ purposes of stream selection and does not search for another stream when
+ it receives DP(PP=0) or ACK(NumP=0, PP=0). This can enhance the
+ performance when the device system bus bandwidth is low */
+ uint32_t lpmcap : 1; /**< [ 22: 22](R/W) LPM capable.
+ The application uses this bit to control the controller's core LPM
+ capabilities. If the core operates as a non-LPM-capable device, it cannot
+ respond to LPM transactions.
+ 0x0 = LPM capability is not enabled.
+ 0x1 = LPM capability is enabled. */
+ uint32_t nump : 5; /**< [ 21: 17](R/W) Number of receive buffers.
+ This bit indicates the number of receive buffers to be reported in the ACK
+ TP.
+ The DWC_usb3 controller uses this field if USBDRD()_UAHC_GRXTHRCFG[USBRXPKTCNTSEL]
+ is set to 0x0. The application can program this value based on RxFIFO size,
+ buffer sizes programmed in descriptors, and system latency.
+ For an OUT endpoint, this field controls the number of receive buffers
+ reported in the NumP field of the ACK TP transmitted by the core.
+
+ Internal:
+ Note: This bit is used in host mode when Debug Capability is enabled. */
+ uint32_t intrnum : 5; /**< [ 16: 12](R/W) Interrupt number.
+ Indicates interrupt/EventQ number on which non-endpoint-specific device-related
+ interrupts (see DEVT) are generated. */
+ uint32_t reserved_10_11 : 2;
+ uint32_t devaddr : 7; /**< [ 9: 3](R/W) Device address.
+ The application must perform the following:
+ * Program this field after every SetAddress request.
+ * Reset this field to zero after USB reset. */
+ uint32_t devspd : 3; /**< [ 2: 0](R/W) Device speed.
+ Indicates the speed at which the application requires the core to connect, or
+ the maximum speed the application can support. However, the actual bus
+ speed is determined only after the chirp sequence is completed, and is
+ based on the speed of the USB host to which the core is connected.
+ 0x0 = High-speed (USB 2.0 PHY clock is 30 MHz or 60 MHz).
+ 0x1 = Full-speed (USB 2.0 PHY clock is 30 MHz or 60 MHz).
+ 0x4 = SuperSpeed (USB 3.0 PHY clock is 125 MHz or 250 MHz). */
+#else /* Word 0 - Little Endian */
+ uint32_t devspd : 3; /**< [ 2: 0](R/W) Device speed.
+ Indicates the speed at which the application requires the core to connect, or
+ the maximum speed the application can support. However, the actual bus
+ speed is determined only after the chirp sequence is completed, and is
+ based on the speed of the USB host to which the core is connected.
+ 0x0 = High-speed (USB 2.0 PHY clock is 30 MHz or 60 MHz).
+ 0x1 = Full-speed (USB 2.0 PHY clock is 30 MHz or 60 MHz).
+ 0x4 = SuperSpeed (USB 3.0 PHY clock is 125 MHz or 250 MHz). */
+ uint32_t devaddr : 7; /**< [ 9: 3](R/W) Device address.
+ The application must perform the following:
+ * Program this field after every SetAddress request.
+ * Reset this field to zero after USB reset. */
+ uint32_t reserved_10_11 : 2;
+ uint32_t intrnum : 5; /**< [ 16: 12](R/W) Interrupt number.
+ Indicates interrupt/EventQ number on which non-endpoint-specific device-related
+ interrupts (see DEVT) are generated. */
+ uint32_t nump : 5; /**< [ 21: 17](R/W) Number of receive buffers.
+ This bit indicates the number of receive buffers to be reported in the ACK
+ TP.
+ The DWC_usb3 controller uses this field if USBDRD()_UAHC_GRXTHRCFG[USBRXPKTCNTSEL]
+ is set to 0x0. The application can program this value based on RxFIFO size,
+ buffer sizes programmed in descriptors, and system latency.
+ For an OUT endpoint, this field controls the number of receive buffers
+ reported in the NumP field of the ACK TP transmitted by the core.
+
+ Internal:
+ Note: This bit is used in host mode when Debug Capability is enabled. */
+ uint32_t lpmcap : 1; /**< [ 22: 22](R/W) LPM capable.
+ The application uses this bit to control the controller's core LPM
+ capabilities. If the core operates as a non-LPM-capable device, it cannot
+ respond to LPM transactions.
+ 0x0 = LPM capability is not enabled.
+ 0x1 = LPM capability is enabled. */
+ uint32_t ignorestreampp : 1; /**< [ 23: 23](R/W) This bit only affects stream-capable bulk endpoints.
+ When this bit is set to 0x0 and the controller receives a data packet with the
+ packet pending (PP) bit set to 0 for OUT endpoints, or it receives an ACK
+ with the NumP field set to 0 and PP set to 0 for IN endpoints, the core
+ attempts to search for another stream (CStream) to initiate to the host.
+
+ However, there are two situations where this behavior is not optimal:
+
+ * When the host is setting PP=0 even though it has not finished the
+ stream, or.
+
+ * When the endpoint on the device is configured with one transfer
+ resource and therefore does not have any other streams to initiate to the
+ host.
+
+ When this bit is set to 0x1, the core ignores the packet pending bit for the
+ purposes of stream selection and does not search for another stream when
+ it receives DP(PP=0) or ACK(NumP=0, PP=0). This can enhance the
+ performance when the device system bus bandwidth is low */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dcfg_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dcfg bdk_usbdrdx_uahc_dcfg_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c700ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c700ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c700ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_DCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DCFG(a) bdk_usbdrdx_uahc_dcfg_t
+#define bustype_BDK_USBDRDX_UAHC_DCFG(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DCFG(a) "USBDRDX_UAHC_DCFG"
+#define device_bar_BDK_USBDRDX_UAHC_DCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DCFG(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_DCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_dctl
+ *
+ * USB Device Control Register
+ * This register controls device mode.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.1.2.
+ */
+union bdk_usbdrdx_uahc_dctl
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_dctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rs : 1; /**< [ 31: 31](R/W) Run/Stop.
+ The software writes 1 to this bit to start the device controller operation.
+ To stop the device controller operation, the software must remove any active
+ transfers and write 0 to this bit. When the controller is stopped, it sets the
+ USBDRD()_UAHC_DSTS[DEVCTRLHLT] bit when the core is idle and the lower layer finishes
+ the disconnect process.
+
+ The Run/Stop bit must be used in following cases as specified:
+
+ 1. After power-on reset and CSR initialization, the software must write 1 to this bit
+ to start the device controller. The controller does not signal connect to the host
+ until this bit is set.
+
+ 2. The software uses this bit to control the device controller to perform a soft
+ disconnect. When the software writes 0 to this bit, the host does not see that
+ the device is connected. The device controller stays in the disconnected state
+ until the software writes 1 to this bit. The minimum duration of keeping this bit
+ cleared is 30 ms in SuperSpeed and 10 ms in high-speed/full-speed/low-speed.
+
+ If the software attempts a connect after the soft disconnect or detects a
+ disconnect event, it must set USBDRD()_UAHC_DCTL[ULSTCHNGREQ] to
+ "Rx.Detect" before reasserting the Run/Stop bit.
+
+ Internal:
+ 3. When the USB or Link is in a lower power state and the Two Power Rails
+ configuration is selected, software writes 0 to this bit to indicate that it is going
+ to turn off the Core Power Rail. After the software turns on the Core Power Rail
+ again and re-initializes the device controller, it must set this bit to start the
+ device controller. For more details, see Low Power Operation on page 599. */
+ uint32_t csftrst : 1; /**< [ 30: 30](R/W1S/H) Core soft reset.
+ Resets the all clock domains as follows:
+ * Clears the interrupts and all the CSRs except the following registers:
+ GCTL, GUCTL, GSTS, GSNPSID, GGPIO, GUID, GUSB2PHYCFGn registers,
+ GUSB3PIPECTLn registers, DCFG, DCTL, DEVTEN, DSTS.
+
+ * All module state machines (except the SoC Bus Slave Unit) are reset to the
+ IDLE state, and all the TxFIFOs and the RxFIFO are flushed.
+
+ * Any transactions on the SoC bus Master are terminated as soon as possible,
+ after gracefully completing the last data phase of a SoC bus transfer. Any
+ transactions on the USB are terminated immediately.
+
+ The application can write this bit at any time to reset the core. This is a self-clearing
+ bit; the core clears this bit after all necessary logic is reset in the core,
+ which may take several clocks depending on the core's current state. Once this
+ bit is cleared, the software must wait at least 3 PHY clocks before accessing the
+ PHY domain (synchronization delay). Typically, software reset is used during
+ software development and also when you dynamically change the PHY selection
+ bits in the USB configuration registers listed above. When you change the PHY,
+ the corresponding clock for the PHY is selected and used in the PHY domain.
+ Once a new clock is selected, the PHY domain must be reset for proper
+ operation. */
+ uint32_t reserved_29 : 1;
+ uint32_t hird_thres : 5; /**< [ 28: 24](R/W) HIRD threshold.
+ The core asserts output signals utmi_l1_suspend_n and utmi_sleep_n on the basis of this
+ signal:
+
+ * The core asserts utmi_l1_suspend_n to put the PHY into Deep Low-Power
+ mode in L1 when both of the following are true:
+ - HIRD value is greater than or equal to the value in HIRD_Thres[3:0]
+ - HIRD_Thres[4] is set to 1'b1.
+
+ * The core asserts utmi_sleep_n on L1 when one of the following is true:
+ - If the HIRD value is less than HIRD_Thres[3:0] or
+ - HIRD_Thres[4] is set to 1'b0. */
+ uint32_t appl1res : 1; /**< [ 23: 23](R/W) LPM response programmed by application.
+ Handshake response to LPM token specified by device application. Response
+ depends on USBDRD()_UAHC_DCFG[LPMCAP].
+
+ LPMCAP is 0x0 - The core always responds with timeout (that is, no
+ response).
+
+ LPMCAP is 0x1 and this bit is 0:
+ The core responds with an ACK upon a successful LPM transaction,
+ which requires all of the following are satisfied:
+
+ * There are no PID/CRC5 errors in both the EXT token and the LPM token
+ (if not true, inactivity results in a timeout ERROR).
+
+ * A valid bLinkState = 0001B (L1) is received in the LPM transaction (else
+ STALL).
+
+ * No data is pending in the Transmit FIFO and OUT endpoints not in flow
+ controlled state (else NYET).
+
+ LPMCAP is 0x1 and this bit is 1:
+ The core responds with an ACK upon a successful LPM, independent
+ of transmit FIFO status and OUT endpoint flow control state. The LPM
+ transaction is successful if all of the following are satisfied:
+
+ * There are no PID/CRC5 errors in both the EXT token and the LPM token
+ (else ERROR).
+
+ * A valid bLinkState = 0001B (L1) is received in the LPM transaction (else
+ STALL). */
+ uint32_t reserved_20_22 : 3;
+ uint32_t keepconnect : 1; /**< [ 19: 19](WO) Always write 0.
+ Internal:
+ Writing this bit to 0x1 does nothing since we don't have hibernation feature. */
+ uint32_t l1hibernationen : 1; /**< [ 18: 18](WO) Always write 0.
+ Internal:
+ Writing this bit to 0x1 does nothing since we don't have hibernation feature. */
+ uint32_t crs : 1; /**< [ 17: 17](WO) Controller restore state.
+ This command is similar to the USBDRD()_UAHC_USBCMD[CRS] bit in host mode and
+ initiates the restore process. When software sets this bit to 1, the controller
+ immediately sets USBDRD()_UAHC_DSTS[RSS] to 1. When the controller has finished
+ the restore process, it sets USBDRD()_UAHC_DSTS[RSS] to 0.
+ Note: When read, this field always returns 0. */
+ uint32_t css : 1; /**< [ 16: 16](WO) Controller save state.
+ This command is similar to the USBDRD()_UAHC_USBCMD[CSS] bit in host mode and
+ initiates the restore process. When software sets this bit to 1, the controller
+ immediately sets USBDRD()_UAHC_DSTS[SSS] to 1. When the controller has finished
+ the save process, it sets USBDRD()_UAHC_DSTS[SSS] to 0.
+ Note: When read, this field always returns 0. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t initu2ena : 1; /**< [ 12: 12](R/W) Initiate U2 enable.
+ 0 = May not initiate U2 (default).
+ 1 = May initiate U2.
+
+ On USB reset, hardware clears this bit to 0. Software sets this bit after receiving
+ SetFeature(U2_ENABLE), and clears this bit when ClearFeature(U2_ENABLE) is
+ received.
+
+ If USBDRD()_UAHC_DCTL[ACCEPTU2ENA] is 0, the link immediately exits U2 state. */
+ uint32_t acceptu2ena : 1; /**< [ 11: 11](R/W) Accept U2 enable.
+ 0 = Reject U2 except when Force_LinkPM_Accept bit is set (default).
+ 1 = Core accepts transition to U2 state if nothing is pending on the
+ application side.
+
+ On USB reset, hardware clears this bit to 0. Software sets this bit after receiving
+ a SetConfiguration command. */
+ uint32_t initu1ena : 1; /**< [ 10: 10](R/W) Initiate U1 enable.
+ 0 = May not initiate U1 (default).
+ 1 = May initiate U1.
+
+ On USB reset, hardware clears this bit to 0. Software sets this bit after receiving
+ SetFeature(U1_ENABLE), and clears this bit when ClearFeature(U1_ENABLE) is
+ received.
+
+ If USBDRD()_UAHC_DCTL[ACCEPTU1ENA] is 0, the link immediately exits U1 state. */
+ uint32_t acceptu1ena : 1; /**< [ 9: 9](R/W) Accept U1 enable.
+ 0 = Reject U1 except when Force_LinkPM_Accept bit is set (default)
+ 1 = Core accepts transition to U1 state if nothing is pending on the
+ application side.
+
+ On USB reset, hardware clears this bit to 0. Software sets this bit after receiving
+ a SetConfiguration command. */
+ uint32_t ulstchngreq : 4; /**< [ 8: 5](WO) USB/link state change request.
+ Software writes this field to issue a USB/link state change request. A change in
+ this field indicates a new request to the core. If software wants to issue the same
+ request back-to-back, it must write a 0 to this field between the two requests. The
+ result of the state change request is reflected in USBDRD()_UAHC_DSTS[USBLNKST].
+ These bits are self-cleared on the MAC Layer exiting suspended state.
+
+ If software is updating other fields of the USBDRD()_UAHC_DCTL register and not
+ intending to force any link state change, then it must write a 0 to this field.
+ SuperSpeed compliance mode is normally entered and controlled by the remote link
+ partner. Refer to the USB3 specification. Alternatively, you can force the local link
+ directly into Compliance mode, by resetting the SuperSpeed link with the
+ USBDRD()_UAHC_DCTL[RS] bit set to zero. If you then write 0xA to the ULSTCHNGREQ
+ field and 1 to USBDRD()_UAHC_DCTL[RS], the Link will go to compliance. Once you
+ are in compliance, you may alternately write 0x0 and 0xA to this field to advance
+ the compliance pattern.
+
+ In SS mode:
+ 0x0 = No action.
+ 0x4 = SS.Disabled.
+ 0x5 = Rx.Detect.
+ 0x6 = SS.Inactive.
+ 0x8 = Recovery.
+ 0xA = Compliance.
+ Others = Reserved.
+
+ In HS/FS/LS mode:
+
+ 0x8 = Remote wakeup request.
+ The remote wakeup request should be issued 2us after the device goes into
+ suspend state (USBDRD()_UAHC_DSTS[USBLNKST] is 0x3).
+ Others = Reserved. */
+ uint32_t tstctl : 4; /**< [ 4: 1](R/W) Test control.
+ 0x0 = Test mode disabled.
+ 0x1 = Test_J mode.
+ 0x2 = Test_K mode.
+ 0x3 = Test_SE0_NAK mode.
+ 0x4 = Test_Packet mode.
+ 0x5 = Test_Force_Enable.
+ Others = Reserved. */
+ uint32_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0 : 1;
+ uint32_t tstctl : 4; /**< [ 4: 1](R/W) Test control.
+ 0x0 = Test mode disabled.
+ 0x1 = Test_J mode.
+ 0x2 = Test_K mode.
+ 0x3 = Test_SE0_NAK mode.
+ 0x4 = Test_Packet mode.
+ 0x5 = Test_Force_Enable.
+ Others = Reserved. */
+ uint32_t ulstchngreq : 4; /**< [ 8: 5](WO) USB/link state change request.
+ Software writes this field to issue a USB/link state change request. A change in
+ this field indicates a new request to the core. If software wants to issue the same
+ request back-to-back, it must write a 0 to this field between the two requests. The
+ result of the state change request is reflected in USBDRD()_UAHC_DSTS[USBLNKST].
+ These bits are self-cleared on the MAC Layer exiting suspended state.
+
+ If software is updating other fields of the USBDRD()_UAHC_DCTL register and not
+ intending to force any link state change, then it must write a 0 to this field.
+ SuperSpeed compliance mode is normally entered and controlled by the remote link
+ partner. Refer to the USB3 specification. Alternatively, you can force the local link
+ directly into Compliance mode, by resetting the SuperSpeed link with the
+ USBDRD()_UAHC_DCTL[RS] bit set to zero. If you then write 0xA to the ULSTCHNGREQ
+ field and 1 to USBDRD()_UAHC_DCTL[RS], the Link will go to compliance. Once you
+ are in compliance, you may alternately write 0x0 and 0xA to this field to advance
+ the compliance pattern.
+
+ In SS mode:
+ 0x0 = No action.
+ 0x4 = SS.Disabled.
+ 0x5 = Rx.Detect.
+ 0x6 = SS.Inactive.
+ 0x8 = Recovery.
+ 0xA = Compliance.
+ Others = Reserved.
+
+ In HS/FS/LS mode:
+
+ 0x8 = Remote wakeup request.
+ The remote wakeup request should be issued 2us after the device goes into
+ suspend state (USBDRD()_UAHC_DSTS[USBLNKST] is 0x3).
+ Others = Reserved. */
+ uint32_t acceptu1ena : 1; /**< [ 9: 9](R/W) Accept U1 enable.
+ 0 = Reject U1 except when Force_LinkPM_Accept bit is set (default)
+ 1 = Core accepts transition to U1 state if nothing is pending on the
+ application side.
+
+ On USB reset, hardware clears this bit to 0. Software sets this bit after receiving
+ a SetConfiguration command. */
+ uint32_t initu1ena : 1; /**< [ 10: 10](R/W) Initiate U1 enable.
+ 0 = May not initiate U1 (default).
+ 1 = May initiate U1.
+
+ On USB reset, hardware clears this bit to 0. Software sets this bit after receiving
+ SetFeature(U1_ENABLE), and clears this bit when ClearFeature(U1_ENABLE) is
+ received.
+
+ If USBDRD()_UAHC_DCTL[ACCEPTU1ENA] is 0, the link immediately exits U1 state. */
+ uint32_t acceptu2ena : 1; /**< [ 11: 11](R/W) Accept U2 enable.
+ 0 = Reject U2 except when Force_LinkPM_Accept bit is set (default).
+ 1 = Core accepts transition to U2 state if nothing is pending on the
+ application side.
+
+ On USB reset, hardware clears this bit to 0. Software sets this bit after receiving
+ a SetConfiguration command. */
+ uint32_t initu2ena : 1; /**< [ 12: 12](R/W) Initiate U2 enable.
+ 0 = May not initiate U2 (default).
+ 1 = May initiate U2.
+
+ On USB reset, hardware clears this bit to 0. Software sets this bit after receiving
+ SetFeature(U2_ENABLE), and clears this bit when ClearFeature(U2_ENABLE) is
+ received.
+
+ If USBDRD()_UAHC_DCTL[ACCEPTU2ENA] is 0, the link immediately exits U2 state. */
+ uint32_t reserved_13_15 : 3;
+ uint32_t css : 1; /**< [ 16: 16](WO) Controller save state.
+ This command is similar to the USBDRD()_UAHC_USBCMD[CSS] bit in host mode and
+ initiates the restore process. When software sets this bit to 1, the controller
+ immediately sets USBDRD()_UAHC_DSTS[SSS] to 1. When the controller has finished
+ the save process, it sets USBDRD()_UAHC_DSTS[SSS] to 0.
+ Note: When read, this field always returns 0. */
+ uint32_t crs : 1; /**< [ 17: 17](WO) Controller restore state.
+ This command is similar to the USBDRD()_UAHC_USBCMD[CRS] bit in host mode and
+ initiates the restore process. When software sets this bit to 1, the controller
+ immediately sets USBDRD()_UAHC_DSTS[RSS] to 1. When the controller has finished
+ the restore process, it sets USBDRD()_UAHC_DSTS[RSS] to 0.
+ Note: When read, this field always returns 0. */
+ uint32_t l1hibernationen : 1; /**< [ 18: 18](WO) Always write 0.
+ Internal:
+ Writing this bit to 0x1 does nothing since we don't have hibernation feature. */
+ uint32_t keepconnect : 1; /**< [ 19: 19](WO) Always write 0.
+ Internal:
+ Writing this bit to 0x1 does nothing since we don't have hibernation feature. */
+ uint32_t reserved_20_22 : 3;
+ uint32_t appl1res : 1; /**< [ 23: 23](R/W) LPM response programmed by application.
+ Handshake response to LPM token specified by device application. Response
+ depends on USBDRD()_UAHC_DCFG[LPMCAP].
+
+ LPMCAP is 0x0 - The core always responds with timeout (that is, no
+ response).
+
+ LPMCAP is 0x1 and this bit is 0:
+ The core responds with an ACK upon a successful LPM transaction,
+ which requires all of the following are satisfied:
+
+ * There are no PID/CRC5 errors in both the EXT token and the LPM token
+ (if not true, inactivity results in a timeout ERROR).
+
+ * A valid bLinkState = 0001B (L1) is received in the LPM transaction (else
+ STALL).
+
+ * No data is pending in the Transmit FIFO and OUT endpoints not in flow
+ controlled state (else NYET).
+
+ LPMCAP is 0x1 and this bit is 1:
+ The core responds with an ACK upon a successful LPM, independent
+ of transmit FIFO status and OUT endpoint flow control state. The LPM
+ transaction is successful if all of the following are satisfied:
+
+ * There are no PID/CRC5 errors in both the EXT token and the LPM token
+ (else ERROR).
+
+ * A valid bLinkState = 0001B (L1) is received in the LPM transaction (else
+ STALL). */
+ uint32_t hird_thres : 5; /**< [ 28: 24](R/W) HIRD threshold.
+ The core asserts output signals utmi_l1_suspend_n and utmi_sleep_n on the basis of this
+ signal:
+
+ * The core asserts utmi_l1_suspend_n to put the PHY into Deep Low-Power
+ mode in L1 when both of the following are true:
+ - HIRD value is greater than or equal to the value in HIRD_Thres[3:0]
+ - HIRD_Thres[4] is set to 1'b1.
+
+ * The core asserts utmi_sleep_n on L1 when one of the following is true:
+ - If the HIRD value is less than HIRD_Thres[3:0] or
+ - HIRD_Thres[4] is set to 1'b0. */
+ uint32_t reserved_29 : 1;
+ uint32_t csftrst : 1; /**< [ 30: 30](R/W1S/H) Core soft reset.
+ Resets the all clock domains as follows:
+ * Clears the interrupts and all the CSRs except the following registers:
+ GCTL, GUCTL, GSTS, GSNPSID, GGPIO, GUID, GUSB2PHYCFGn registers,
+ GUSB3PIPECTLn registers, DCFG, DCTL, DEVTEN, DSTS.
+
+ * All module state machines (except the SoC Bus Slave Unit) are reset to the
+ IDLE state, and all the TxFIFOs and the RxFIFO are flushed.
+
+ * Any transactions on the SoC bus Master are terminated as soon as possible,
+ after gracefully completing the last data phase of a SoC bus transfer. Any
+ transactions on the USB are terminated immediately.
+
+ The application can write this bit at any time to reset the core. This is a self-clearing
+ bit; the core clears this bit after all necessary logic is reset in the core,
+ which may take several clocks depending on the core's current state. Once this
+ bit is cleared, the software must wait at least 3 PHY clocks before accessing the
+ PHY domain (synchronization delay). Typically, software reset is used during
+ software development and also when you dynamically change the PHY selection
+ bits in the USB configuration registers listed above. When you change the PHY,
+ the corresponding clock for the PHY is selected and used in the PHY domain.
+ Once a new clock is selected, the PHY domain must be reset for proper
+ operation. */
+ uint32_t rs : 1; /**< [ 31: 31](R/W) Run/Stop.
+ The software writes 1 to this bit to start the device controller operation.
+ To stop the device controller operation, the software must remove any active
+ transfers and write 0 to this bit. When the controller is stopped, it sets the
+ USBDRD()_UAHC_DSTS[DEVCTRLHLT] bit when the core is idle and the lower layer finishes
+ the disconnect process.
+
+ The Run/Stop bit must be used in following cases as specified:
+
+ 1. After power-on reset and CSR initialization, the software must write 1 to this bit
+ to start the device controller. The controller does not signal connect to the host
+ until this bit is set.
+
+ 2. The software uses this bit to control the device controller to perform a soft
+ disconnect. When the software writes 0 to this bit, the host does not see that
+ the device is connected. The device controller stays in the disconnected state
+ until the software writes 1 to this bit. The minimum duration of keeping this bit
+ cleared is 30 ms in SuperSpeed and 10 ms in high-speed/full-speed/low-speed.
+
+ If the software attempts a connect after the soft disconnect or detects a
+ disconnect event, it must set USBDRD()_UAHC_DCTL[ULSTCHNGREQ] to
+ "Rx.Detect" before reasserting the Run/Stop bit.
+
+ Internal:
+ 3. When the USB or Link is in a lower power state and the Two Power Rails
+ configuration is selected, software writes 0 to this bit to indicate that it is going
+ to turn off the Core Power Rail. After the software turns on the Core Power Rail
+ again and re-initializes the device controller, it must set this bit to start the
+ device controller. For more details, see Low Power Operation on page 599. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dctl_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dctl bdk_usbdrdx_uahc_dctl_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DCTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DCTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c704ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c704ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c704ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_DCTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DCTL(a) bdk_usbdrdx_uahc_dctl_t
+#define bustype_BDK_USBDRDX_UAHC_DCTL(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DCTL(a) "USBDRDX_UAHC_DCTL"
+#define device_bar_BDK_USBDRDX_UAHC_DCTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DCTL(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_DCTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_depcmd#
+ *
+ * USB Device Physical Endpoint-n Command Register
+ * This register enables software to issue physical endpoint-specific commands. This register
+ * contains command, control, and status fields relevant to the current generic command,
+ * while the USBDRD()_UAHC_DEPCMDPAR* registers provide command parameters and return
+ * status information.
+ *
+ * Several fields (including CMDTYPE) are write-only, so their read values are undefined. After
+ * power-on, prior to issuing the first endpoint command, the read value of this register is
+ * undefined. In particular, [CMDACT] may be set after power-on. In this case, it is safe
+ * to issue an endpoint command.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST] or
+ * USBDRD()_UAHC_GCTL[CORESOFTRESET] or
+ * USBDRD()_UAHC_USBCMD[HCRST] or USBDRD()_UAHC_USBCMD[LHCRST] or
+ * USBDRD()_UAHC_DCTL[CSFTRST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.12
+ */
+union bdk_usbdrdx_uahc_depcmdx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_depcmdx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t commandparam : 16; /**< [ 31: 16](R/W) Command or event parameters.
+ When this register is written:
+
+ Command parameters:
+
+ For start transfer command:
+ _ - [31:16]: StreamID. The USB StreamID assigned to this transfer
+
+ For start transfer command applied to an isochronous endpoint:
+ _ - [31:16]: StartMicroFramNum: Indicates the (micro)frame number to
+ which the first TRB applies
+
+ For update transfer, end transfer, and start new configuration
+ commands:
+ _ - [22:16]: Transfer resource index (XferRscIdx). The hardware-assigned
+ transfer resource index for the transfer, which was returned
+ in response to the start transfer command. The application
+ software-assigned transfer resource index for a start new
+ configuration command.
+
+ When this register is read:
+
+ For XferNotReady, XferComplete, and stream events on bulk endpoints:
+ _ - [31:16]: StreamID. Applies only to bulk endpoints that support streams. This
+ indicates the StreamID of the transfer for which the event is
+ generated
+ For XferInProgress:
+ _ - [31:16]: Isochronous microframe number (IsocMicroFrameNum): Indicates the
+ microframe number of the beginning of the interval that generated
+ the XferInProgress event (debug purposes only)
+ For XferNotReady events on Isochronous Endpoints:
+ _ - [31:16]: Isochronous microframe number (IsocMicroFrameNum). Indicates the
+ microframe number during which the endpoint was not ready
+
+ Note: controller core represents USB bus time as a 14-bit value on the bus and also
+ in the DSTS register (USBDRD()_UAHC_DSTS[SOFFN]), but as a 16-bit value in the
+ XferNotReady event. Use the 16-bit value to interact with isochronous endpoints via
+ the StartXfer command. The extra two bits that the controller core produces will be
+ necessary for handling wrap-around conditions in the interaction between software
+ and hardware.
+
+ For all EPCmdCmplt events
+ _ - [27:24]: Command type. The command type that completed (Valid only in a DEPEVT
+ event. Undefined when read from the
+ USBDRD()_UAHC_DEPCMD()[COMMANDPARAM] field).
+
+ For EPCmdCmplt event in response to start transfer command:
+ _ - [22:16]: Transfer resource index (XferRscIdx). The internal hardware transfer
+ resource index assigned to this transfer. This index must be used in
+ all update transfer and end transfer commands. */
+ uint32_t cmdstatus : 4; /**< [ 15: 12](R/W) Command completion status.
+ Additional information about the completion of this command is available in
+ this field.
+
+ Within an XferNotReady event:
+ _ [15]: Indicates the reason why the XferNotReady event is generated:
+ _ [15] = 0: XferNotActive: Host initiated a transfer, but the requested transfer is not
+ present in the hardware.
+ _ [15] = 1: XferActive: Host initiated a transfer, the transfer is present, but no valid
+ TRBs
+ are available
+ _ [14]: Not Used
+ _ [13:12]: For control endpoints, indicates what stage was requested when the transfer was
+ not ready:
+ _ [13:12] = 0x1: Control data request
+ _ [13:12] = 0x2: Control status request
+
+ Within an XferComplete or XferInProgress event:
+ _ [15]: LST bit of the completed TRB (XferComplete only)
+ _ [15]: MissedIsoc: Indicates the interval did not complete successfully (XferInProgress
+ only)
+ _ [14]: IOC bit of the TRB that completed.
+ _ [13]: Indicates the TRB completed with a short packet reception or the last packet of an
+ isochronous interval
+ _ [12]: Reserved.
+ If the host aborts the data stage of a control transfer, software may receive a
+ XferComplete event with the EventStatus field equal to 0. This is a valid event
+ that must be processed as a part of the control transfer programming model.
+
+ Within a stream event:
+ _ [15:12] = 0x2: StreamNotFound: This stream event is issued when the stream-capable
+ endpoint
+ performed a search in its transfer resource cache, but could not find an active
+ and ready stream.
+ _ [15:12] = 0x1: StreamFound: This stream event is issued when the stream-capable endpoint
+ found
+ an active and ready stream in its transfer resource cache, and initiated traffic for
+ that stream to the host. The ID of the selected Stream is in the EventParam field.
+
+ In response to a start transfer command:
+ _ [15:12] = 0x2: Indicates expiry of the bus time reflected in the start transfer command.
+ _ [15:12] = 0x1: Indicates there is no transfer resource available on the endpoint.
+
+ In response to a set transfer resource (DEPXFERCFG) command:
+ _ [15:12] = 0x1: Indicates an error has occurred because software is requesting more
+ transfer
+ resources to be assigned than have been configured in the hardware.
+
+ In response to a end transfer command:
+ _ [15:12] = 0x1: Indicates an invalid transfer resource was specified.
+
+ Internal:
+ For abort handling, see also Synopsys DWC_usb3 Databook v2.80a, Section 8.4. */
+ uint32_t hipri_forcerm : 1; /**< [ 11: 11](R/W) HighPriority: Only valid for start transfer command.
+ ForceRM: Only valid for end transfer command. */
+ uint32_t cmdact : 1; /**< [ 10: 10](R/W) Software sets this bit to 1 to enable the device endpoint controller to
+ execute the generic command.
+ The device controller sets this bit to 0 when [CMDSTATUS] is valid and
+ the endpoint is ready to accept another command. This does not imply that
+ all the effects of the previously-issued command have taken place. */
+ uint32_t reserved_9 : 1;
+ uint32_t cmdioc : 1; /**< [ 8: 8](R/W) Command interrupt on complete.
+ When this bit is set, the device controller issues a generic endpoint
+ command complete event after executing the command. Note that this
+ interrupt is mapped to DEPCFG.IntrNum. When the DEPCFG command is
+ executed, the command interrupt on completion goes to the interrupt
+ pointed by the USBDRD()_UAHC_DCFG[INTRNUM] in the current command.
+ Note: This field must not set to 1 if the USBDRD()_UAHC_DCTL[RS] field is 0. */
+ uint32_t reserved_4_7 : 4;
+ uint32_t cmdtyp : 4; /**< [ 3: 0](R/W) Command type.
+ Specifies the type of command the software driver is requesting the core to
+ perform.
+ 0x0 = Reserved.
+ 0x1 = Set endpoint configuration (64 or 96-bit parameter).
+ 0x2 = Set endpoint transfer resource configuration (32-bit parameter).
+ 0x3 = Get endpoint state (no parameter needed).
+ 0x4 = Set stall (no parameter needed).
+ 0x5 = Clear stall (see set stall, no parameter needed).
+ 0x6 = Start transfer (64-bit parameter).
+ 0x7 = Update transfer (no parameter needed).
+ 0x8 = End transfer (no parameter needed).
+ 0x9 = Start new configuration (no parameter needed). */
+#else /* Word 0 - Little Endian */
+ uint32_t cmdtyp : 4; /**< [ 3: 0](R/W) Command type.
+ Specifies the type of command the software driver is requesting the core to
+ perform.
+ 0x0 = Reserved.
+ 0x1 = Set endpoint configuration (64 or 96-bit parameter).
+ 0x2 = Set endpoint transfer resource configuration (32-bit parameter).
+ 0x3 = Get endpoint state (no parameter needed).
+ 0x4 = Set stall (no parameter needed).
+ 0x5 = Clear stall (see set stall, no parameter needed).
+ 0x6 = Start transfer (64-bit parameter).
+ 0x7 = Update transfer (no parameter needed).
+ 0x8 = End transfer (no parameter needed).
+ 0x9 = Start new configuration (no parameter needed). */
+ uint32_t reserved_4_7 : 4;
+ uint32_t cmdioc : 1; /**< [ 8: 8](R/W) Command interrupt on complete.
+ When this bit is set, the device controller issues a generic endpoint
+ command complete event after executing the command. Note that this
+ interrupt is mapped to DEPCFG.IntrNum. When the DEPCFG command is
+ executed, the command interrupt on completion goes to the interrupt
+ pointed by the USBDRD()_UAHC_DCFG[INTRNUM] in the current command.
+ Note: This field must not set to 1 if the USBDRD()_UAHC_DCTL[RS] field is 0. */
+ uint32_t reserved_9 : 1;
+ uint32_t cmdact : 1; /**< [ 10: 10](R/W) Software sets this bit to 1 to enable the device endpoint controller to
+ execute the generic command.
+ The device controller sets this bit to 0 when [CMDSTATUS] is valid and
+ the endpoint is ready to accept another command. This does not imply that
+ all the effects of the previously-issued command have taken place. */
+ uint32_t hipri_forcerm : 1; /**< [ 11: 11](R/W) HighPriority: Only valid for start transfer command.
+ ForceRM: Only valid for end transfer command. */
+ uint32_t cmdstatus : 4; /**< [ 15: 12](R/W) Command completion status.
+ Additional information about the completion of this command is available in
+ this field.
+
+ Within an XferNotReady event:
+ _ [15]: Indicates the reason why the XferNotReady event is generated:
+ _ [15] = 0: XferNotActive: Host initiated a transfer, but the requested transfer is not
+ present in the hardware.
+ _ [15] = 1: XferActive: Host initiated a transfer, the transfer is present, but no valid
+ TRBs
+ are available
+ _ [14]: Not Used
+ _ [13:12]: For control endpoints, indicates what stage was requested when the transfer was
+ not ready:
+ _ [13:12] = 0x1: Control data request
+ _ [13:12] = 0x2: Control status request
+
+ Within an XferComplete or XferInProgress event:
+ _ [15]: LST bit of the completed TRB (XferComplete only)
+ _ [15]: MissedIsoc: Indicates the interval did not complete successfully (XferInProgress
+ only)
+ _ [14]: IOC bit of the TRB that completed.
+ _ [13]: Indicates the TRB completed with a short packet reception or the last packet of an
+ isochronous interval
+ _ [12]: Reserved.
+ If the host aborts the data stage of a control transfer, software may receive a
+ XferComplete event with the EventStatus field equal to 0. This is a valid event
+ that must be processed as a part of the control transfer programming model.
+
+ Within a stream event:
+ _ [15:12] = 0x2: StreamNotFound: This stream event is issued when the stream-capable
+ endpoint
+ performed a search in its transfer resource cache, but could not find an active
+ and ready stream.
+ _ [15:12] = 0x1: StreamFound: This stream event is issued when the stream-capable endpoint
+ found
+ an active and ready stream in its transfer resource cache, and initiated traffic for
+ that stream to the host. The ID of the selected Stream is in the EventParam field.
+
+ In response to a start transfer command:
+ _ [15:12] = 0x2: Indicates expiry of the bus time reflected in the start transfer command.
+ _ [15:12] = 0x1: Indicates there is no transfer resource available on the endpoint.
+
+ In response to a set transfer resource (DEPXFERCFG) command:
+ _ [15:12] = 0x1: Indicates an error has occurred because software is requesting more
+ transfer
+ resources to be assigned than have been configured in the hardware.
+
+ In response to a end transfer command:
+ _ [15:12] = 0x1: Indicates an invalid transfer resource was specified.
+
+ Internal:
+ For abort handling, see also Synopsys DWC_usb3 Databook v2.80a, Section 8.4. */
+ uint32_t commandparam : 16; /**< [ 31: 16](R/W) Command or event parameters.
+ When this register is written:
+
+ Command parameters:
+
+ For start transfer command:
+ _ - [31:16]: StreamID. The USB StreamID assigned to this transfer
+
+ For start transfer command applied to an isochronous endpoint:
+ _ - [31:16]: StartMicroFramNum: Indicates the (micro)frame number to
+ which the first TRB applies
+
+ For update transfer, end transfer, and start new configuration
+ commands:
+ _ - [22:16]: Transfer resource index (XferRscIdx). The hardware-assigned
+ transfer resource index for the transfer, which was returned
+ in response to the start transfer command. The application
+ software-assigned transfer resource index for a start new
+ configuration command.
+
+ When this register is read:
+
+ For XferNotReady, XferComplete, and stream events on bulk endpoints:
+ _ - [31:16]: StreamID. Applies only to bulk endpoints that support streams. This
+ indicates the StreamID of the transfer for which the event is
+ generated
+ For XferInProgress:
+ _ - [31:16]: Isochronous microframe number (IsocMicroFrameNum): Indicates the
+ microframe number of the beginning of the interval that generated
+ the XferInProgress event (debug purposes only)
+ For XferNotReady events on Isochronous Endpoints:
+ _ - [31:16]: Isochronous microframe number (IsocMicroFrameNum). Indicates the
+ microframe number during which the endpoint was not ready
+
+ Note: controller core represents USB bus time as a 14-bit value on the bus and also
+ in the DSTS register (USBDRD()_UAHC_DSTS[SOFFN]), but as a 16-bit value in the
+ XferNotReady event. Use the 16-bit value to interact with isochronous endpoints via
+ the StartXfer command. The extra two bits that the controller core produces will be
+ necessary for handling wrap-around conditions in the interaction between software
+ and hardware.
+
+ For all EPCmdCmplt events
+ _ - [27:24]: Command type. The command type that completed (Valid only in a DEPEVT
+ event. Undefined when read from the
+ USBDRD()_UAHC_DEPCMD()[COMMANDPARAM] field).
+
+ For EPCmdCmplt event in response to start transfer command:
+ _ - [22:16]: Transfer resource index (XferRscIdx). The internal hardware transfer
+ resource index assigned to this transfer. This index must be used in
+ all update transfer and end transfer commands. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_depcmdx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_depcmdx bdk_usbdrdx_uahc_depcmdx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DEPCMDX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DEPCMDX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=15)))
+ return 0x86800000c80cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=15)))
+ return 0x86800000c80cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=15)))
+ return 0x86800000c80cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ __bdk_csr_fatal("USBDRDX_UAHC_DEPCMDX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DEPCMDX(a,b) bdk_usbdrdx_uahc_depcmdx_t
+#define bustype_BDK_USBDRDX_UAHC_DEPCMDX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DEPCMDX(a,b) "USBDRDX_UAHC_DEPCMDX"
+#define device_bar_BDK_USBDRDX_UAHC_DEPCMDX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DEPCMDX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_DEPCMDX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_depcmdpar0_#
+ *
+ * USB Device Physical Endpoint-n Command Parameter 0 Register
+ * This register indicates the physical endpoint command parameter 0. It must be programmed
+ * before issuing the command.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST] or
+ * USBDRD()_UAHC_GCTL[CORESOFTRESET] or
+ * USBDRD()_UAHC_USBCMD[HCRST] or USBDRD()_UAHC_USBCMD[LHCRST] or
+ * USBDRD()_UAHC_DCTL[CSFTRST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.11
+ */
+union bdk_usbdrdx_uahc_depcmdpar0_x
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_depcmdpar0_x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t param0 : 32; /**< [ 31: 0](R/W) Physical endpoint command parameter 0 */
+#else /* Word 0 - Little Endian */
+ uint32_t param0 : 32; /**< [ 31: 0](R/W) Physical endpoint command parameter 0 */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_depcmdpar0_x_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_depcmdpar0_x bdk_usbdrdx_uahc_depcmdpar0_x_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DEPCMDPAR0_X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DEPCMDPAR0_X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=15)))
+ return 0x86800000c808ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=15)))
+ return 0x86800000c808ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=15)))
+ return 0x86800000c808ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ __bdk_csr_fatal("USBDRDX_UAHC_DEPCMDPAR0_X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DEPCMDPAR0_X(a,b) bdk_usbdrdx_uahc_depcmdpar0_x_t
+#define bustype_BDK_USBDRDX_UAHC_DEPCMDPAR0_X(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DEPCMDPAR0_X(a,b) "USBDRDX_UAHC_DEPCMDPAR0_X"
+#define device_bar_BDK_USBDRDX_UAHC_DEPCMDPAR0_X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DEPCMDPAR0_X(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_DEPCMDPAR0_X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_depcmdpar1_#
+ *
+ * USB Device Physical Endpoint-n Command Parameter 1 Register
+ * This register indicates the physical endpoint command parameter 1. It must be programmed
+ * before issuing the command.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST] or
+ * USBDRD()_UAHC_GCTL[CORESOFTRESET] or
+ * USBDRD()_UAHC_USBCMD[HCRST] or USBDRD()_UAHC_USBCMD[LHCRST] or
+ * USBDRD()_UAHC_DCTL[CSFTRST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.10
+ */
+union bdk_usbdrdx_uahc_depcmdpar1_x
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_depcmdpar1_x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t param1 : 32; /**< [ 31: 0](R/W) Physical endpoint command parameter 1 */
+#else /* Word 0 - Little Endian */
+ uint32_t param1 : 32; /**< [ 31: 0](R/W) Physical endpoint command parameter 1 */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_depcmdpar1_x_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_depcmdpar1_x bdk_usbdrdx_uahc_depcmdpar1_x_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DEPCMDPAR1_X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DEPCMDPAR1_X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=15)))
+ return 0x86800000c804ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=15)))
+ return 0x86800000c804ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=15)))
+ return 0x86800000c804ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ __bdk_csr_fatal("USBDRDX_UAHC_DEPCMDPAR1_X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DEPCMDPAR1_X(a,b) bdk_usbdrdx_uahc_depcmdpar1_x_t
+#define bustype_BDK_USBDRDX_UAHC_DEPCMDPAR1_X(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DEPCMDPAR1_X(a,b) "USBDRDX_UAHC_DEPCMDPAR1_X"
+#define device_bar_BDK_USBDRDX_UAHC_DEPCMDPAR1_X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DEPCMDPAR1_X(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_DEPCMDPAR1_X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_depcmdpar2_#
+ *
+ * USB Device Physical Endpoint-n Command Parameter 2 Register
+ * This register indicates the physical endpoint command parameter 2. It must be programmed
+ * before issuing the command.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST] or
+ * USBDRD()_UAHC_GCTL[CORESOFTRESET] or
+ * USBDRD()_UAHC_USBCMD[HCRST] or USBDRD()_UAHC_USBCMD[LHCRST] or
+ * USBDRD()_UAHC_DCTL[CSFTRST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.9
+ */
+union bdk_usbdrdx_uahc_depcmdpar2_x
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_depcmdpar2_x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t param2 : 32; /**< [ 31: 0](R/W) Physical endpoint command parameter 2 */
+#else /* Word 0 - Little Endian */
+ uint32_t param2 : 32; /**< [ 31: 0](R/W) Physical endpoint command parameter 2 */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_depcmdpar2_x_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_depcmdpar2_x bdk_usbdrdx_uahc_depcmdpar2_x_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DEPCMDPAR2_X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DEPCMDPAR2_X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=15)))
+ return 0x86800000c800ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=15)))
+ return 0x86800000c800ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=15)))
+ return 0x86800000c800ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ __bdk_csr_fatal("USBDRDX_UAHC_DEPCMDPAR2_X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DEPCMDPAR2_X(a,b) bdk_usbdrdx_uahc_depcmdpar2_x_t
+#define bustype_BDK_USBDRDX_UAHC_DEPCMDPAR2_X(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DEPCMDPAR2_X(a,b) "USBDRDX_UAHC_DEPCMDPAR2_X"
+#define device_bar_BDK_USBDRDX_UAHC_DEPCMDPAR2_X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DEPCMDPAR2_X(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_DEPCMDPAR2_X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_dev_imod#
+ *
+ * USB Device Interrupt Moderation Register
+ * This register controls the interrupt moderation feature that allows the device software to
+ * throttle the interrupt rate.
+ */
+union bdk_usbdrdx_uahc_dev_imodx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_dev_imodx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t device_imodc : 16; /**< [ 31: 16](R/W) Interrupt moderation down counter. Loaded with the [DEVICE_IMODI] value,
+ whenever the hardware interrupt(n) line is de-asserted from the asserted state,
+ counts down to 0, and stops. */
+ uint32_t device_imodi : 16; /**< [ 15: 0](R/W) Minimum inter-interrupt interval between events. The interval is
+ specified in terms of 250 ns increments. */
+#else /* Word 0 - Little Endian */
+ uint32_t device_imodi : 16; /**< [ 15: 0](R/W) Minimum inter-interrupt interval between events. The interval is
+ specified in terms of 250 ns increments. */
+ uint32_t device_imodc : 16; /**< [ 31: 16](R/W) Interrupt moderation down counter. Loaded with the [DEVICE_IMODI] value,
+ whenever the hardware interrupt(n) line is de-asserted from the asserted state,
+ counts down to 0, and stops. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dev_imodx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dev_imodx bdk_usbdrdx_uahc_dev_imodx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DEV_IMODX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DEV_IMODX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=15)))
+ return 0x86800000ca00ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0xf);
+ __bdk_csr_fatal("USBDRDX_UAHC_DEV_IMODX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DEV_IMODX(a,b) bdk_usbdrdx_uahc_dev_imodx_t
+#define bustype_BDK_USBDRDX_UAHC_DEV_IMODX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DEV_IMODX(a,b) "USBDRDX_UAHC_DEV_IMODX"
+#define device_bar_BDK_USBDRDX_UAHC_DEV_IMODX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DEV_IMODX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_DEV_IMODX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_devten
+ *
+ * USB Device Event Enable Register
+ * This register controls the generation of device-specific events.
+ * If an enable bit is set to 0, the event will not be generated.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.3
+ */
+union bdk_usbdrdx_uahc_devten
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_devten_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_15_31 : 17;
+ uint32_t l1wkupevten : 1; /**< [ 14: 14](R/W) L1 resume detected event enable. */
+ uint32_t stopondisconnecten : 1; /**< [ 13: 13](RO/H) Vendor device test LMP received event. */
+ uint32_t vndrdevtstrcveden : 1; /**< [ 12: 12](R/W) Vendor device test LMP received event. */
+ uint32_t reserved_10_11 : 2;
+ uint32_t errticerren : 1; /**< [ 9: 9](R/W) Erratic error event enable. */
+ uint32_t l1suspen : 1; /**< [ 8: 8](R/W) Reserved. */
+ uint32_t sofen : 1; /**< [ 7: 7](R/W) Start of (micro)frame enable.
+ For debug purposes only; normally software must disable this event. */
+ uint32_t u3l2l1suspen : 1; /**< [ 6: 6](R/W) U3/L2-L1 suspend event enable. */
+ uint32_t hibernationreqevten : 1; /**< [ 5: 5](R/W) This bit enables/disables the generation of the hibernation request event.
+ Internal:
+ Writing this bit to 0x1 does nothing since we don't have hibernation feature. */
+ uint32_t wkupevten : 1; /**< [ 4: 4](R/W) Resume/remote wakeup detected event enable. */
+ uint32_t ulstcngen : 1; /**< [ 3: 3](R/W) USB/link state change event enable. */
+ uint32_t connectdoneen : 1; /**< [ 2: 2](R/W) Connection done enable. */
+ uint32_t usbrsten : 1; /**< [ 1: 1](R/W) USB reset enable. */
+ uint32_t disconnevten : 1; /**< [ 0: 0](R/W) Disconnect detected event enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t disconnevten : 1; /**< [ 0: 0](R/W) Disconnect detected event enable. */
+ uint32_t usbrsten : 1; /**< [ 1: 1](R/W) USB reset enable. */
+ uint32_t connectdoneen : 1; /**< [ 2: 2](R/W) Connection done enable. */
+ uint32_t ulstcngen : 1; /**< [ 3: 3](R/W) USB/link state change event enable. */
+ uint32_t wkupevten : 1; /**< [ 4: 4](R/W) Resume/remote wakeup detected event enable. */
+ uint32_t hibernationreqevten : 1; /**< [ 5: 5](R/W) This bit enables/disables the generation of the hibernation request event.
+ Internal:
+ Writing this bit to 0x1 does nothing since we don't have hibernation feature. */
+ uint32_t u3l2l1suspen : 1; /**< [ 6: 6](R/W) U3/L2-L1 suspend event enable. */
+ uint32_t sofen : 1; /**< [ 7: 7](R/W) Start of (micro)frame enable.
+ For debug purposes only; normally software must disable this event. */
+ uint32_t l1suspen : 1; /**< [ 8: 8](R/W) Reserved. */
+ uint32_t errticerren : 1; /**< [ 9: 9](R/W) Erratic error event enable. */
+ uint32_t reserved_10_11 : 2;
+ uint32_t vndrdevtstrcveden : 1; /**< [ 12: 12](R/W) Vendor device test LMP received event. */
+ uint32_t stopondisconnecten : 1; /**< [ 13: 13](RO/H) Vendor device test LMP received event. */
+ uint32_t l1wkupevten : 1; /**< [ 14: 14](R/W) L1 resume detected event enable. */
+ uint32_t reserved_15_31 : 17;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uahc_devten_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_13_31 : 19;
+ uint32_t vndrdevtstrcveden : 1; /**< [ 12: 12](R/W) Vendor device test LMP received event. */
+ uint32_t reserved_10_11 : 2;
+ uint32_t errticerren : 1; /**< [ 9: 9](R/W) Erratic error event enable. */
+ uint32_t reserved_8 : 1;
+ uint32_t sofen : 1; /**< [ 7: 7](R/W) Start of (micro)frame enable.
+ For debug purposes only; normally software must disable this event. */
+ uint32_t u3l2l1suspen : 1; /**< [ 6: 6](R/W) U3/L2-L1 suspend event enable. */
+ uint32_t hibernationreqevten : 1; /**< [ 5: 5](R/W) This bit enables/disables the generation of the hibernation request event.
+ Internal:
+ Writing this bit to 0x1 does nothing since we don't have hibernation feature. */
+ uint32_t wkupevten : 1; /**< [ 4: 4](R/W) Resume/remote wakeup detected event enable. */
+ uint32_t ulstcngen : 1; /**< [ 3: 3](R/W) USB/link state change event enable. */
+ uint32_t connectdoneen : 1; /**< [ 2: 2](R/W) Connection done enable. */
+ uint32_t usbrsten : 1; /**< [ 1: 1](R/W) USB reset enable. */
+ uint32_t disconnevten : 1; /**< [ 0: 0](R/W) Disconnect detected event enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t disconnevten : 1; /**< [ 0: 0](R/W) Disconnect detected event enable. */
+ uint32_t usbrsten : 1; /**< [ 1: 1](R/W) USB reset enable. */
+ uint32_t connectdoneen : 1; /**< [ 2: 2](R/W) Connection done enable. */
+ uint32_t ulstcngen : 1; /**< [ 3: 3](R/W) USB/link state change event enable. */
+ uint32_t wkupevten : 1; /**< [ 4: 4](R/W) Resume/remote wakeup detected event enable. */
+ uint32_t hibernationreqevten : 1; /**< [ 5: 5](R/W) This bit enables/disables the generation of the hibernation request event.
+ Internal:
+ Writing this bit to 0x1 does nothing since we don't have hibernation feature. */
+ uint32_t u3l2l1suspen : 1; /**< [ 6: 6](R/W) U3/L2-L1 suspend event enable. */
+ uint32_t sofen : 1; /**< [ 7: 7](R/W) Start of (micro)frame enable.
+ For debug purposes only; normally software must disable this event. */
+ uint32_t reserved_8 : 1;
+ uint32_t errticerren : 1; /**< [ 9: 9](R/W) Erratic error event enable. */
+ uint32_t reserved_10_11 : 2;
+ uint32_t vndrdevtstrcveden : 1; /**< [ 12: 12](R/W) Vendor device test LMP received event. */
+ uint32_t reserved_13_31 : 19;
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_usbdrdx_uahc_devten_s cn9; */
+};
+typedef union bdk_usbdrdx_uahc_devten bdk_usbdrdx_uahc_devten_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DEVTEN(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DEVTEN(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c708ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c708ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c708ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_DEVTEN", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DEVTEN(a) bdk_usbdrdx_uahc_devten_t
+#define bustype_BDK_USBDRDX_UAHC_DEVTEN(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DEVTEN(a) "USBDRDX_UAHC_DEVTEN"
+#define device_bar_BDK_USBDRDX_UAHC_DEVTEN(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DEVTEN(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_DEVTEN(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_dgcmd
+ *
+ * USB Device Generic Command Register
+ * This register enables software to program the core using a single generic command interface to
+ * send link management packets and notifications. This register contains command, control, and
+ * status fields relevant to the current generic command, while the USBDRD()_UAHC_DGCMDPAR
+ * register provides the command parameter.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST] or
+ * USBDRD()_UAHC_GCTL[CORESOFTRESET] or
+ * USBDRD()_UAHC_USBCMD[HCRST] or USBDRD()_UAHC_USBCMD[LHCRST] or
+ * USBDRD()_UAHC_DCTL[CSFTRST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.6
+ */
+union bdk_usbdrdx_uahc_dgcmd
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_dgcmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t cmdstatus : 1; /**< [ 15: 15](RO) Command status.
+ 0 = Indicates command success.
+ 1 = CmdErr - Indicates that the device controller encountered an error
+ while processing the command. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t cmdact : 1; /**< [ 10: 10](R/W1S/H) Command active.
+ The software sets this bit to 1 to enable the device controller to execute the
+ generic command.
+ The device controller sets this bit to 0 after executing the command. */
+ uint32_t reserved_9 : 1;
+ uint32_t cmdioc : 1; /**< [ 8: 8](WO) Command interrupt on complete.
+ When this bit is set, the device controller issues a generic command
+ completion event after executing the command. Note that this interrupt is
+ mapped to USBDRD()_UAHC_DCFG[INTRNUM].
+ Note: This field must not set to 1 if the USBDRD()_UAHC_DCTL[RS] field is 0. */
+ uint32_t cmdtyp : 8; /**< [ 7: 0](WO) Specifies the type of command the software driver is requesting the core to
+ perform. See USBDRD_UAHC_DGCMD_CMDTYPE_E for encodings and usage. */
+#else /* Word 0 - Little Endian */
+ uint32_t cmdtyp : 8; /**< [ 7: 0](WO) Specifies the type of command the software driver is requesting the core to
+ perform. See USBDRD_UAHC_DGCMD_CMDTYPE_E for encodings and usage. */
+ uint32_t cmdioc : 1; /**< [ 8: 8](WO) Command interrupt on complete.
+ When this bit is set, the device controller issues a generic command
+ completion event after executing the command. Note that this interrupt is
+ mapped to USBDRD()_UAHC_DCFG[INTRNUM].
+ Note: This field must not set to 1 if the USBDRD()_UAHC_DCTL[RS] field is 0. */
+ uint32_t reserved_9 : 1;
+ uint32_t cmdact : 1; /**< [ 10: 10](R/W1S/H) Command active.
+ The software sets this bit to 1 to enable the device controller to execute the
+ generic command.
+ The device controller sets this bit to 0 after executing the command. */
+ uint32_t reserved_11_14 : 4;
+ uint32_t cmdstatus : 1; /**< [ 15: 15](RO) Command status.
+ 0 = Indicates command success.
+ 1 = CmdErr - Indicates that the device controller encountered an error
+ while processing the command. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dgcmd_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dgcmd bdk_usbdrdx_uahc_dgcmd_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DGCMD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DGCMD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c714ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c714ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c714ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_DGCMD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DGCMD(a) bdk_usbdrdx_uahc_dgcmd_t
+#define bustype_BDK_USBDRDX_UAHC_DGCMD(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DGCMD(a) "USBDRDX_UAHC_DGCMD"
+#define device_bar_BDK_USBDRDX_UAHC_DGCMD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DGCMD(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_DGCMD(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_dgcmdpar
+ *
+ * USB Device Generic Command Parameter Register
+ * This register indicates the device command parameter.
+ * This must be programmed before or along with USBDRD()_UAHC_DGCMD.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST] or
+ * USBDRD()_UAHC_GCTL[CORESOFTRESET] or
+ * USBDRD()_UAHC_USBCMD[HCRST] or USBDRD()_UAHC_USBCMD[LHCRST] or
+ * USBDRD()_UAHC_DCTL[CSFTRST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.5
+ */
+union bdk_usbdrdx_uahc_dgcmdpar
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_dgcmdpar_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t param : 32; /**< [ 31: 0](R/W) Device generic command parameter.
+ Usage depends on which USBDRD()_UAHC_DGCMD[CMDTYPE] is used,
+ see usage notes in USBDRD_UAHC_DGCMD_CMDTYPE_E descriptions. */
+#else /* Word 0 - Little Endian */
+ uint32_t param : 32; /**< [ 31: 0](R/W) Device generic command parameter.
+ Usage depends on which USBDRD()_UAHC_DGCMD[CMDTYPE] is used,
+ see usage notes in USBDRD_UAHC_DGCMD_CMDTYPE_E descriptions. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dgcmdpar_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dgcmdpar bdk_usbdrdx_uahc_dgcmdpar_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DGCMDPAR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DGCMDPAR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c710ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c710ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c710ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_DGCMDPAR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DGCMDPAR(a) bdk_usbdrdx_uahc_dgcmdpar_t
+#define bustype_BDK_USBDRDX_UAHC_DGCMDPAR(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DGCMDPAR(a) "USBDRDX_UAHC_DGCMDPAR"
+#define device_bar_BDK_USBDRDX_UAHC_DGCMDPAR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DGCMDPAR(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_DGCMDPAR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_dnctrl
+ *
+ * USB XHCI Device Notification Control Register
+ * This register is used by software to enable or disable the reporting of the reception of
+ * specific USB device
+ * notification transaction packets.
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.4.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_dnctrl
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_dnctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t n : 16; /**< [ 15: 0](R/W) Notification enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t n : 16; /**< [ 15: 0](R/W) Notification enable. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dnctrl_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dnctrl bdk_usbdrdx_uahc_dnctrl_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DNCTRL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DNCTRL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000034ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000034ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000034ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_DNCTRL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DNCTRL(a) bdk_usbdrdx_uahc_dnctrl_t
+#define bustype_BDK_USBDRDX_UAHC_DNCTRL(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DNCTRL(a) "USBDRDX_UAHC_DNCTRL"
+#define device_bar_BDK_USBDRDX_UAHC_DNCTRL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DNCTRL(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_DNCTRL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_dsts
+ *
+ * USB Device Status Register
+ * This register indicates the status of the device controller with respect to USB-related
+ * events.
+ *
+ * This register can be reset by IOI reset or USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.3.4
+ */
+union bdk_usbdrdx_uahc_dsts
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_dsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t dcnrd : 1; /**< [ 29: 29](RO/H) Device controller not ready.
+ Will always read-as-zero.
+
+ Internal:
+ Bit is only used with hibernation. */
+ uint32_t sre : 1; /**< [ 28: 28](R/W1C/H) Save/restore error.
+ This bit is currently not supported. */
+ uint32_t reserved_26_27 : 2;
+ uint32_t rss : 1; /**< [ 25: 25](RO) Restore state status.
+ This bit is similar to the USBDRD()_UAHC_USBSTS[RSS] in host mode.
+ When the controller has finished the restore process, it will complete the
+ command by setting RSS to 0.
+
+ Will always read-as-zero.
+
+ Internal:
+ Bit is only used with hibernation. */
+ uint32_t sss : 1; /**< [ 24: 24](RO) Save state status.
+ This bit is similar to the USBDRD()_UAHC_USBSTS[SSS] in host mode.
+ When the controller has finished the save process, it will complete the
+ command by setting SSS to 0.
+
+ Will always read-as-zero.
+
+ Internal:
+ Bit is only used with hibernation. */
+ uint32_t coreidle : 1; /**< [ 23: 23](RO/H) Core idle.
+ The bit indicates that the core finished transferring all RxFIFO data to
+ system memory, writing out all completed descriptors, and all event counts
+ are zero.
+
+ Note: While testing for reset values, mask out the read value. This bit
+ represents the changing state of the core and does not hold a static value. */
+ uint32_t devctrlhlt : 1; /**< [ 22: 22](RO/H) Device controller halted.
+ When 1, the core does not generate device events.
+ - This bit is set to 0 when the USBDRD()_UAHC_DCTL[RS] register is set to 1.
+ - The core sets this bit to 1 when, after software sets USBDRD()_UAHC_DCTL[RS] to 0,
+ the core is
+ idle and the lower layer finishes the disconnect process. */
+ uint32_t usblnkst : 4; /**< [ 21: 18](RO/H) USB/link state.
+ In SuperSpeed mode, uses LTSSM State:
+ 0x0 = U0.
+ 0x1 = U1.
+ 0x2 = U2.
+ 0x3 = U3.
+ 0x4 = SS_DIS.
+ 0x5 = RX_DET.
+ 0x6 = SS_INACT.
+ 0x7 = POLL.
+ 0x8 = RECOV.
+ 0x9 = HRESET.
+ 0xa = CMPLY.
+ 0xb = LPBK.
+ 0xf = Resume/Reset.
+ others: Reserved.
+
+ In high-speed/full-speed/low-speed mode:
+ 0x0 = On state.
+ 0x2 = Sleep (L1) state.
+ 0x3 = Suspend (L2) state.
+ 0x4 = Disconnected state (Default state).
+ 0x5 = Early Suspend state.
+ others: Reserved.
+
+ The link state resume/reset indicates that the core received a resume or USB
+ reset request from the host while the link was in hibernation. Software must
+ write 0x8 (recovery) to the USBDRD()_UAHC_DCTL[ULSTCHNGREQ] field to acknowledge
+ the resume/reset request. */
+ uint32_t rxfifoempty : 1; /**< [ 17: 17](RO/H) RxFIFO empty indication. */
+ uint32_t soffn : 14; /**< [ 16: 3](RO/H) Frame/MicroFrame number of the received SOF.
+
+ When the core is operating at high-speed:
+ \<16:6\> = Frame number.
+ \<5:3\> = Microframe number.
+
+ When the core is operating at full-speed:
+ \<16:14\> = Not used, software can ignore these three bits.
+ \<13:3\> = Frame number. */
+ uint32_t connectspd : 3; /**< [ 2: 0](RO/H) Connected speed.
+ Indicates the speed at which the controller core has come up after speed
+ detection through a chirp sequence.
+ 0x0 = High-speed (PHY clock is running at 60 MHz).
+ 0x1 = Full-speed (PHY clock is running at 60 MHz).
+ 0x2 = Low-speed (not supported).
+ 0x3 = Full-speed (PHY clock is running at 48 MHz).
+ 0x4 = SuperSpeed (PHY clock is running at 125 or 250 MHz). */
+#else /* Word 0 - Little Endian */
+ uint32_t connectspd : 3; /**< [ 2: 0](RO/H) Connected speed.
+ Indicates the speed at which the controller core has come up after speed
+ detection through a chirp sequence.
+ 0x0 = High-speed (PHY clock is running at 60 MHz).
+ 0x1 = Full-speed (PHY clock is running at 60 MHz).
+ 0x2 = Low-speed (not supported).
+ 0x3 = Full-speed (PHY clock is running at 48 MHz).
+ 0x4 = SuperSpeed (PHY clock is running at 125 or 250 MHz). */
+ uint32_t soffn : 14; /**< [ 16: 3](RO/H) Frame/MicroFrame number of the received SOF.
+
+ When the core is operating at high-speed:
+ \<16:6\> = Frame number.
+ \<5:3\> = Microframe number.
+
+ When the core is operating at full-speed:
+ \<16:14\> = Not used, software can ignore these three bits.
+ \<13:3\> = Frame number. */
+ uint32_t rxfifoempty : 1; /**< [ 17: 17](RO/H) RxFIFO empty indication. */
+ uint32_t usblnkst : 4; /**< [ 21: 18](RO/H) USB/link state.
+ In SuperSpeed mode, uses LTSSM State:
+ 0x0 = U0.
+ 0x1 = U1.
+ 0x2 = U2.
+ 0x3 = U3.
+ 0x4 = SS_DIS.
+ 0x5 = RX_DET.
+ 0x6 = SS_INACT.
+ 0x7 = POLL.
+ 0x8 = RECOV.
+ 0x9 = HRESET.
+ 0xa = CMPLY.
+ 0xb = LPBK.
+ 0xf = Resume/Reset.
+ others: Reserved.
+
+ In high-speed/full-speed/low-speed mode:
+ 0x0 = On state.
+ 0x2 = Sleep (L1) state.
+ 0x3 = Suspend (L2) state.
+ 0x4 = Disconnected state (Default state).
+ 0x5 = Early Suspend state.
+ others: Reserved.
+
+ The link state resume/reset indicates that the core received a resume or USB
+ reset request from the host while the link was in hibernation. Software must
+ write 0x8 (recovery) to the USBDRD()_UAHC_DCTL[ULSTCHNGREQ] field to acknowledge
+ the resume/reset request. */
+ uint32_t devctrlhlt : 1; /**< [ 22: 22](RO/H) Device controller halted.
+ When 1, the core does not generate device events.
+ - This bit is set to 0 when the USBDRD()_UAHC_DCTL[RS] register is set to 1.
+ - The core sets this bit to 1 when, after software sets USBDRD()_UAHC_DCTL[RS] to 0,
+ the core is
+ idle and the lower layer finishes the disconnect process. */
+ uint32_t coreidle : 1; /**< [ 23: 23](RO/H) Core idle.
+ The bit indicates that the core finished transferring all RxFIFO data to
+ system memory, writing out all completed descriptors, and all event counts
+ are zero.
+
+ Note: While testing for reset values, mask out the read value. This bit
+ represents the changing state of the core and does not hold a static value. */
+ uint32_t sss : 1; /**< [ 24: 24](RO) Save state status.
+ This bit is similar to the USBDRD()_UAHC_USBSTS[SSS] in host mode.
+ When the controller has finished the save process, it will complete the
+ command by setting SSS to 0.
+
+ Will always read-as-zero.
+
+ Internal:
+ Bit is only used with hibernation. */
+ uint32_t rss : 1; /**< [ 25: 25](RO) Restore state status.
+ This bit is similar to the USBDRD()_UAHC_USBSTS[RSS] in host mode.
+ When the controller has finished the restore process, it will complete the
+ command by setting RSS to 0.
+
+ Will always read-as-zero.
+
+ Internal:
+ Bit is only used with hibernation. */
+ uint32_t reserved_26_27 : 2;
+ uint32_t sre : 1; /**< [ 28: 28](R/W1C/H) Save/restore error.
+ This bit is currently not supported. */
+ uint32_t dcnrd : 1; /**< [ 29: 29](RO/H) Device controller not ready.
+ Will always read-as-zero.
+
+ Internal:
+ Bit is only used with hibernation. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_dsts_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_dsts bdk_usbdrdx_uahc_dsts_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_DSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_DSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c70cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c70cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c70cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_DSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_DSTS(a) bdk_usbdrdx_uahc_dsts_t
+#define bustype_BDK_USBDRDX_UAHC_DSTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_DSTS(a) "USBDRDX_UAHC_DSTS"
+#define device_bar_BDK_USBDRDX_UAHC_DSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_DSTS(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_DSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uahc_erdp#
+ *
+ * USB XHCI Event Ring Dequeue Pointer Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.5.2.3.3.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_erdpx
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uahc_erdpx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t erdp : 60; /**< [ 63: 4](R/W) Event ring dequeue pointer bits \<63:4\>. */
+ uint64_t ehb : 1; /**< [ 3: 3](R/W1C/H) Event handler busy */
+ uint64_t desi : 3; /**< [ 2: 0](R/W) Dequeue ERST segment index. */
+#else /* Word 0 - Little Endian */
+ uint64_t desi : 3; /**< [ 2: 0](R/W) Dequeue ERST segment index. */
+ uint64_t ehb : 1; /**< [ 3: 3](R/W1C/H) Event handler busy */
+ uint64_t erdp : 60; /**< [ 63: 4](R/W) Event ring dequeue pointer bits \<63:4\>. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_erdpx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_erdpx bdk_usbdrdx_uahc_erdpx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_ERDPX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_ERDPX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000000478ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000000478ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000000478ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_ERDPX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_ERDPX(a,b) bdk_usbdrdx_uahc_erdpx_t
+#define bustype_BDK_USBDRDX_UAHC_ERDPX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UAHC_ERDPX(a,b) "USBDRDX_UAHC_ERDPX"
+#define device_bar_BDK_USBDRDX_UAHC_ERDPX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_ERDPX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_ERDPX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uahc_erstba#
+ *
+ * USB XHCI Event-Ring Segment-Table Base-Address Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.5.2.3.2.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_erstbax
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uahc_erstbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t erstba : 58; /**< [ 63: 6](R/W) Event-ring segment-table base-address bits\<63:6\>. */
+ uint64_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_5 : 6;
+ uint64_t erstba : 58; /**< [ 63: 6](R/W) Event-ring segment-table base-address bits\<63:6\>. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_erstbax_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_erstbax bdk_usbdrdx_uahc_erstbax_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_ERSTBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_ERSTBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000000470ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000000470ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000000470ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_ERSTBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_ERSTBAX(a,b) bdk_usbdrdx_uahc_erstbax_t
+#define bustype_BDK_USBDRDX_UAHC_ERSTBAX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UAHC_ERSTBAX(a,b) "USBDRDX_UAHC_ERSTBAX"
+#define device_bar_BDK_USBDRDX_UAHC_ERSTBAX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_ERSTBAX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_ERSTBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_erstsz#
+ *
+ * USB XHCI Event-Ring Segment-Table Size Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.5.2.3.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_erstszx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_erstszx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t erstsz : 16; /**< [ 15: 0](R/W) Event-ring segment-table size. */
+#else /* Word 0 - Little Endian */
+ uint32_t erstsz : 16; /**< [ 15: 0](R/W) Event-ring segment-table size. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_erstszx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_erstszx bdk_usbdrdx_uahc_erstszx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_ERSTSZX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_ERSTSZX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000000468ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000000468ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000000468ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_ERSTSZX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_ERSTSZX(a,b) bdk_usbdrdx_uahc_erstszx_t
+#define bustype_BDK_USBDRDX_UAHC_ERSTSZX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_ERSTSZX(a,b) "USBDRDX_UAHC_ERSTSZX"
+#define device_bar_BDK_USBDRDX_UAHC_ERSTSZX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_ERSTSZX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_ERSTSZX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uahc_gbuserraddr
+ *
+ * USB UAHC Bus-Error-Address Register
+ * When the AXI master bus returns error response, the SoC bus error is generated. In the host
+ * mode, the host_system_err port indicates this condition. In addition, it is also indicated in
+ * USBDRD()_UAHC_USBSTS[HSE]. Due to the nature of AXI, it is possible that multiple AXI
+ * transactions
+ * are active at a time. The host controller does not keep track of the start address of all
+ * outstanding transactions. Instead, it keeps track of the start address of the DMA transfer
+ * associated with all active transactions. It is this address that is reported in
+ * USBDRD()_UAHC_GBUSERRADDR when a bus error occurs. For example, if the host controller
+ * initiates
+ * a DMA
+ * transfer to write 1k of packet data starting at buffer address 0xABCD0000, and this DMA is
+ * broken up into multiple 256B bursts on the AXI, then if a bus error occurs on any of these
+ * associated AXI transfers, USBDRD()_UAHC_GBUSERRADDR reflects the DMA start address of
+ * 0xABCD0000
+ * regardless of which AXI transaction received the error.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.13.
+ */
+union bdk_usbdrdx_uahc_gbuserraddr
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uahc_gbuserraddr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t busaddr : 64; /**< [ 63: 0](RO/H) Bus address. Contains the first bus address that encountered an SoC bus error. It is valid
+ when the USBDRD()_UAHC_GSTS[BUSERRADDRVLD] = 1. It can only be cleared by resetting the
+ core. */
+#else /* Word 0 - Little Endian */
+ uint64_t busaddr : 64; /**< [ 63: 0](RO/H) Bus address. Contains the first bus address that encountered an SoC bus error. It is valid
+ when the USBDRD()_UAHC_GSTS[BUSERRADDRVLD] = 1. It can only be cleared by resetting the
+ core. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gbuserraddr_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gbuserraddr bdk_usbdrdx_uahc_gbuserraddr_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GBUSERRADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GBUSERRADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c130ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c130ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c130ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GBUSERRADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GBUSERRADDR(a) bdk_usbdrdx_uahc_gbuserraddr_t
+#define bustype_BDK_USBDRDX_UAHC_GBUSERRADDR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UAHC_GBUSERRADDR(a) "USBDRDX_UAHC_GBUSERRADDR"
+#define device_bar_BDK_USBDRDX_UAHC_GBUSERRADDR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GBUSERRADDR(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GBUSERRADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gctl
+ *
+ * USB UAHC Control Register
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.5.
+ */
+union bdk_usbdrdx_uahc_gctl
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pwrdnscale : 13; /**< [ 31: 19](R/W) Power down scale. The USB3 suspend-clock input replaces pipe3_rx_pclk as a clock source to
+ a small part of the USB3 core that operates when the SuperSpeed PHY is in its lowest power
+ (P3) state, and therefore does not provide a clock. This field specifies how many suspend-
+ clock periods fit into a 16 kHz clock period. When performing the division, round up the
+ remainder.
+
+ For example, when using an 32-bit PHY and 25-MHz suspend clock, PWRDNSCALE = 25000 kHz/16
+ kHz = 1563 (rounded up).
+
+ The minimum suspend-clock frequency is 32 KHz, and maximum suspend-clock frequency is 125
+ MHz.
+
+ The LTSSM uses suspend clock for 12-ms and 100-ms timers during suspend mode. According to
+ the USB 3.0 specification, the accuracy on these timers is 0% to +50%. 12 ms + 0~+50%
+ accuracy = 18 ms (Range is 12 ms - 18 ms)
+ 100 ms + 0~+50% accuracy = 150 ms (Range is 100 ms - 150 ms).
+
+ The suspend clock accuracy requirement is:
+ _ (12,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 12,000 and
+ 18,000
+ _ (100,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 100,000 and
+ 150,000
+
+ For example, if your suspend_clk frequency varies from 7.5 MHz to 10.5 MHz, then the value
+ needs to programmed is: power down scale = 10500/16 = 657 (rounded up; and fastest
+ frequency used). */
+ uint32_t masterfiltbypass : 1; /**< [ 18: 18](R/W) Master filter bypass. Not relevant for Cavium's configuration. */
+ uint32_t bypssetaddr : 1; /**< [ 17: 17](R/W) Bypass SetAddress in device mode.
+ Always set to 0.
+
+ Internal:
+ When set, core uses the value in USBDRD()_UAHC_DCFG[DEVADDR] directly
+ for comparing the device address tokens. In simulation, this can be used to avoid
+ sending a SET_ADDRESS command. */
+ uint32_t u2rstecn : 1; /**< [ 16: 16](R/W) If the SuperSpeed connection fails during POLL or LMP exchange, the device connects
+ at non-SuperSpeed mode. If this bit is set, then the device attempts three more times to
+ connect at SuperSpeed, even if it previously failed to operate in SuperSpeed mode.
+ This bit is only applicable in device mode. */
+ uint32_t frmscldwn : 2; /**< [ 15: 14](R/W) Frame scale down. Scales down device view of a SOF/USOF/ITP duration.
+ For SuperSpeed/high-speed mode:
+ 0x0 = Interval is 125 us.
+ 0x1 = Interval is 62.5 us.
+ 0x2 = Interval is 31.25 us.
+ 0x3 = Interval is 15.625 us.
+
+ For full speed mode, the scale down value is multiplied by 8. */
+ uint32_t prtcapdir : 2; /**< [ 13: 12](R/W) 0x1 = for Host configurations.
+ 0x2 = for Device configurations. */
+ uint32_t coresoftreset : 1; /**< [ 11: 11](R/W) Core soft reset: 1 = soft reset to core, 0 = no soft reset.
+ Clears the interrupts and all the USBDRD()_UAHC_* CSRs except the
+ following registers: USBDRD()_UAHC_GCTL, USBDRD()_UAHC_GUCTL, USBDRD()_UAHC_GSTS,
+ USBDRD()_UAHC_GRLSID, USBDRD()_UAHC_GGPIO, USBDRD()_UAHC_GUID,
+ USBDRD()_UAHC_GUSB2PHYCFG(),
+ USBDRD()_UAHC_GUSB3PIPECTL().
+
+ When you reset PHYs (using USBDRD()_UAHC_GUSB2PHYCFG() or
+ USBDRD()_UAHC_GUSB3PIPECTL()), you must keep the core in reset state until PHY
+ clocks are stable. This controls the bus, RAM, and MAC domain resets.
+
+ Internal:
+ Refer to Reset Generation on Synopsys Databook page 250.
+ Under soft reset, accesses to USBDRD()_UAHC_* CSRs other than USBDRD()_UAHC_GCTL may fail
+ (timeout).
+ This bit is for debug purposes only. Use USBDRD()_UAHC_USBCMD[HCRST] for soft reset. */
+ uint32_t sofitpsync : 1; /**< [ 10: 10](R/W) Synchronize ITP to reference clock. In host mode, if this bit is set to:
+ 0 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever
+ there is a SuperSpeed port that is not in Rx.Detect, SS.Disable, and U3 state.
+ 1 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever the
+ other non-SuperSpeed ports are not in suspended state.
+
+ This feature is useful because it saves power by suspending UTMI/ULPI when SuperSpeed only
+ is active and it helps resolve when the PHY does not transmit a host resume unless it is
+ placed in suspend state.
+ USBDRD()_UAHC_GUSB2PHYCFG()[SUSPHY] eventually decides to put the UTMI/ULPI PHY in to
+ suspend
+ state. In addition, when this bit is set to 1, the core generates ITP off of the REF_CLK-
+ based counter. Otherwise, ITP and SOF are generated off of UTMI/ULPI_CLK[0] based counter.
+
+ To program the reference clock period inside the core, refer to
+ USBDRD()_UAHC_GUCTL[REFCLKPER].
+
+ If you do not plan to ever use this feature or the
+ USBDRD()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL]
+ feature, the minimum frequency for the ref_clk can be as low as 32 KHz. You can connect
+ the
+ SUSPEND_CLK (as low as 32 KHz) to REF_CLK.
+
+ If you plan to enable hardware-based LPM (PORTPMSC[HLE] = 1), this feature cannot be used.
+ Turn off this feature by setting this bit to zero and use the
+ USBDRD()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL] feature.
+
+ If you set this bit to 1, the USBDRD()_UAHC_GUSB2PHYCFG()[U2_FREECLK_EXISTS] bit
+ must be set to 0. */
+ uint32_t u1u2timerscale : 1; /**< [ 9: 9](R/W) Disable U1/U2 timer scaledown. If set to 1, along with SCALEDOWN = 0x1, disables the scale
+ down of U1/U2 inactive timer values.
+ This is for simulation mode only. */
+ uint32_t debugattach : 1; /**< [ 8: 8](R/W) Debug attach. When this bit is set:
+ * SuperSpeed link proceeds directly to the polling-link state (USBDRD()_UAHC_DCTL[RS] = 1)
+ without checking remote termination.
+ * Link LFPS polling timeout is infinite.
+ * Polling timeout during TS1 is infinite (in case link is waiting for TXEQ to finish). */
+ uint32_t ramclksel : 2; /**< [ 7: 6](R/W) RAM clock select. Always keep set to 0x0. */
+ uint32_t scaledown : 2; /**< [ 5: 4](R/W) Scale-down mode. When scale-down mode is enabled for simulation, the core uses scaled-down
+ timing values, resulting in faster simulations. When scale-down mode is disabled, actual
+ timing values are used. This is required for hardware operation.
+
+ High-speed/full-speed/low-speed modes:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scale-down of all timing values. These include:
+ * Speed enumeration.
+ * HNP/SRP.
+ * Suspend and resume.
+
+ 0x2 = N/A
+ 0x3 = Enables bits \<0\> and \<1\> scale-down timing values.
+
+ SuperSpeed mode:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scaled down SuperSpeed timing and repeat values including:
+ * Number of TxEq training sequences reduce to eight.
+ * LFPS polling burst time reduce to 100 ns.
+ * LFPS warm reset receive reduce to 30 us.
+
+ Internal:
+ Refer to the rtl_vip_scaledown_mapping.xls file under \<workspace\>/sim/SoC_sim
+ directory for the complete list.
+ 0x2 = No TxEq training sequences are sent. Overrides bit\<4\>.
+ 0x3 = Enables bits\<0\> and \<1\> scale-down timing values. */
+ uint32_t disscramble : 1; /**< [ 3: 3](R/W) Disable scrambling. Transmit request to link partner on next transition to recovery or polling. */
+ uint32_t u2exit_lfps : 1; /**< [ 2: 2](R/W) LFPS U2 exit.
+ 0 = The link treats 248 ns LFPS as a valid U2 exit.
+ 1 = The link waits for 8 us of LFPS before it detects a valid U2 exit.
+
+ This bit is added to improve interoperability with a third party host controller. This
+ host controller in U2 state while performing receiver detection generates an LFPS glitch
+ of about 4s duration. This causes the device to exit from U2 state because the LFPS filter
+ value is 248 ns. With the new functionality enabled, the device can stay in U2 while
+ ignoring this glitch from the host controller. */
+ uint32_t gblhibernationen : 1; /**< [ 1: 1](RO) This bit enables hibernation at the global level. */
+ uint32_t dsblclkgtng : 1; /**< [ 0: 0](R/W) Disable clock gating. When set to 1 and the core is in low power mode, internal clock
+ gating is disabled, which means the clocks are always running. This bit can be set to 1
+ after power-up reset. */
+#else /* Word 0 - Little Endian */
+ uint32_t dsblclkgtng : 1; /**< [ 0: 0](R/W) Disable clock gating. When set to 1 and the core is in low power mode, internal clock
+ gating is disabled, which means the clocks are always running. This bit can be set to 1
+ after power-up reset. */
+ uint32_t gblhibernationen : 1; /**< [ 1: 1](RO) This bit enables hibernation at the global level. */
+ uint32_t u2exit_lfps : 1; /**< [ 2: 2](R/W) LFPS U2 exit.
+ 0 = The link treats 248 ns LFPS as a valid U2 exit.
+ 1 = The link waits for 8 us of LFPS before it detects a valid U2 exit.
+
+ This bit is added to improve interoperability with a third party host controller. This
+ host controller in U2 state while performing receiver detection generates an LFPS glitch
+ of about 4s duration. This causes the device to exit from U2 state because the LFPS filter
+ value is 248 ns. With the new functionality enabled, the device can stay in U2 while
+ ignoring this glitch from the host controller. */
+ uint32_t disscramble : 1; /**< [ 3: 3](R/W) Disable scrambling. Transmit request to link partner on next transition to recovery or polling. */
+ uint32_t scaledown : 2; /**< [ 5: 4](R/W) Scale-down mode. When scale-down mode is enabled for simulation, the core uses scaled-down
+ timing values, resulting in faster simulations. When scale-down mode is disabled, actual
+ timing values are used. This is required for hardware operation.
+
+ High-speed/full-speed/low-speed modes:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scale-down of all timing values. These include:
+ * Speed enumeration.
+ * HNP/SRP.
+ * Suspend and resume.
+
+ 0x2 = N/A
+ 0x3 = Enables bits \<0\> and \<1\> scale-down timing values.
+
+ SuperSpeed mode:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scaled down SuperSpeed timing and repeat values including:
+ * Number of TxEq training sequences reduce to eight.
+ * LFPS polling burst time reduce to 100 ns.
+ * LFPS warm reset receive reduce to 30 us.
+
+ Internal:
+ Refer to the rtl_vip_scaledown_mapping.xls file under \<workspace\>/sim/SoC_sim
+ directory for the complete list.
+ 0x2 = No TxEq training sequences are sent. Overrides bit\<4\>.
+ 0x3 = Enables bits\<0\> and \<1\> scale-down timing values. */
+ uint32_t ramclksel : 2; /**< [ 7: 6](R/W) RAM clock select. Always keep set to 0x0. */
+ uint32_t debugattach : 1; /**< [ 8: 8](R/W) Debug attach. When this bit is set:
+ * SuperSpeed link proceeds directly to the polling-link state (USBDRD()_UAHC_DCTL[RS] = 1)
+ without checking remote termination.
+ * Link LFPS polling timeout is infinite.
+ * Polling timeout during TS1 is infinite (in case link is waiting for TXEQ to finish). */
+ uint32_t u1u2timerscale : 1; /**< [ 9: 9](R/W) Disable U1/U2 timer scaledown. If set to 1, along with SCALEDOWN = 0x1, disables the scale
+ down of U1/U2 inactive timer values.
+ This is for simulation mode only. */
+ uint32_t sofitpsync : 1; /**< [ 10: 10](R/W) Synchronize ITP to reference clock. In host mode, if this bit is set to:
+ 0 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever
+ there is a SuperSpeed port that is not in Rx.Detect, SS.Disable, and U3 state.
+ 1 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever the
+ other non-SuperSpeed ports are not in suspended state.
+
+ This feature is useful because it saves power by suspending UTMI/ULPI when SuperSpeed only
+ is active and it helps resolve when the PHY does not transmit a host resume unless it is
+ placed in suspend state.
+ USBDRD()_UAHC_GUSB2PHYCFG()[SUSPHY] eventually decides to put the UTMI/ULPI PHY in to
+ suspend
+ state. In addition, when this bit is set to 1, the core generates ITP off of the REF_CLK-
+ based counter. Otherwise, ITP and SOF are generated off of UTMI/ULPI_CLK[0] based counter.
+
+ To program the reference clock period inside the core, refer to
+ USBDRD()_UAHC_GUCTL[REFCLKPER].
+
+ If you do not plan to ever use this feature or the
+ USBDRD()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL]
+ feature, the minimum frequency for the ref_clk can be as low as 32 KHz. You can connect
+ the
+ SUSPEND_CLK (as low as 32 KHz) to REF_CLK.
+
+ If you plan to enable hardware-based LPM (PORTPMSC[HLE] = 1), this feature cannot be used.
+ Turn off this feature by setting this bit to zero and use the
+ USBDRD()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL] feature.
+
+ If you set this bit to 1, the USBDRD()_UAHC_GUSB2PHYCFG()[U2_FREECLK_EXISTS] bit
+ must be set to 0. */
+ uint32_t coresoftreset : 1; /**< [ 11: 11](R/W) Core soft reset: 1 = soft reset to core, 0 = no soft reset.
+ Clears the interrupts and all the USBDRD()_UAHC_* CSRs except the
+ following registers: USBDRD()_UAHC_GCTL, USBDRD()_UAHC_GUCTL, USBDRD()_UAHC_GSTS,
+ USBDRD()_UAHC_GRLSID, USBDRD()_UAHC_GGPIO, USBDRD()_UAHC_GUID,
+ USBDRD()_UAHC_GUSB2PHYCFG(),
+ USBDRD()_UAHC_GUSB3PIPECTL().
+
+ When you reset PHYs (using USBDRD()_UAHC_GUSB2PHYCFG() or
+ USBDRD()_UAHC_GUSB3PIPECTL()), you must keep the core in reset state until PHY
+ clocks are stable. This controls the bus, RAM, and MAC domain resets.
+
+ Internal:
+ Refer to Reset Generation on Synopsys Databook page 250.
+ Under soft reset, accesses to USBDRD()_UAHC_* CSRs other than USBDRD()_UAHC_GCTL may fail
+ (timeout).
+ This bit is for debug purposes only. Use USBDRD()_UAHC_USBCMD[HCRST] for soft reset. */
+ uint32_t prtcapdir : 2; /**< [ 13: 12](R/W) 0x1 = for Host configurations.
+ 0x2 = for Device configurations. */
+ uint32_t frmscldwn : 2; /**< [ 15: 14](R/W) Frame scale down. Scales down device view of a SOF/USOF/ITP duration.
+ For SuperSpeed/high-speed mode:
+ 0x0 = Interval is 125 us.
+ 0x1 = Interval is 62.5 us.
+ 0x2 = Interval is 31.25 us.
+ 0x3 = Interval is 15.625 us.
+
+ For full speed mode, the scale down value is multiplied by 8. */
+ uint32_t u2rstecn : 1; /**< [ 16: 16](R/W) If the SuperSpeed connection fails during POLL or LMP exchange, the device connects
+ at non-SuperSpeed mode. If this bit is set, then the device attempts three more times to
+ connect at SuperSpeed, even if it previously failed to operate in SuperSpeed mode.
+ This bit is only applicable in device mode. */
+ uint32_t bypssetaddr : 1; /**< [ 17: 17](R/W) Bypass SetAddress in device mode.
+ Always set to 0.
+
+ Internal:
+ When set, core uses the value in USBDRD()_UAHC_DCFG[DEVADDR] directly
+ for comparing the device address tokens. In simulation, this can be used to avoid
+ sending a SET_ADDRESS command. */
+ uint32_t masterfiltbypass : 1; /**< [ 18: 18](R/W) Master filter bypass. Not relevant for Cavium's configuration. */
+ uint32_t pwrdnscale : 13; /**< [ 31: 19](R/W) Power down scale. The USB3 suspend-clock input replaces pipe3_rx_pclk as a clock source to
+ a small part of the USB3 core that operates when the SuperSpeed PHY is in its lowest power
+ (P3) state, and therefore does not provide a clock. This field specifies how many suspend-
+ clock periods fit into a 16 kHz clock period. When performing the division, round up the
+ remainder.
+
+ For example, when using an 32-bit PHY and 25-MHz suspend clock, PWRDNSCALE = 25000 kHz/16
+ kHz = 1563 (rounded up).
+
+ The minimum suspend-clock frequency is 32 KHz, and maximum suspend-clock frequency is 125
+ MHz.
+
+ The LTSSM uses suspend clock for 12-ms and 100-ms timers during suspend mode. According to
+ the USB 3.0 specification, the accuracy on these timers is 0% to +50%. 12 ms + 0~+50%
+ accuracy = 18 ms (Range is 12 ms - 18 ms)
+ 100 ms + 0~+50% accuracy = 150 ms (Range is 100 ms - 150 ms).
+
+ The suspend clock accuracy requirement is:
+ _ (12,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 12,000 and
+ 18,000
+ _ (100,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 100,000 and
+ 150,000
+
+ For example, if your suspend_clk frequency varies from 7.5 MHz to 10.5 MHz, then the value
+ needs to programmed is: power down scale = 10500/16 = 657 (rounded up; and fastest
+ frequency used). */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uahc_gctl_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pwrdnscale : 13; /**< [ 31: 19](R/W) Power down scale. The USB3 suspend-clock input replaces pipe3_rx_pclk as a clock source to
+ a small part of the USB3 core that operates when the SuperSpeed PHY is in its lowest power
+ (P3) state, and therefore does not provide a clock. This field specifies how many suspend-
+ clock periods fit into a 16 kHz clock period. When performing the division, round up the
+ remainder.
+
+ For example, when using an 32-bit PHY and 25-MHz suspend clock, PWRDNSCALE = 25000 kHz/16
+ kHz = 1563 (rounded up).
+
+ The minimum suspend-clock frequency is 32 KHz, and maximum suspend-clock frequency is 125
+ MHz.
+
+ The LTSSM uses suspend clock for 12-ms and 100-ms timers during suspend mode. According to
+ the USB 3.0 specification, the accuracy on these timers is 0% to +50%. 12 ms + 0~+50%
+ accuracy = 18 ms (Range is 12 ms - 18 ms)
+ 100 ms + 0~+50% accuracy = 150 ms (Range is 100 ms - 150 ms).
+
+ The suspend clock accuracy requirement is:
+ _ (12,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 12,000 and
+ 18,000
+ _ (100,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 100,000 and
+ 150,000
+
+ For example, if your suspend_clk frequency varies from 7.5 MHz to 10.5 MHz, then the value
+ needs to programmed is: power down scale = 10500/16 = 657 (rounded up; and fastest
+ frequency used). */
+ uint32_t masterfiltbypass : 1; /**< [ 18: 18](R/W) Master filter bypass. Not relevant for Cavium's configuration. */
+ uint32_t bypssetaddr : 1; /**< [ 17: 17](R/W) Bypass SetAddress in device mode.
+ Always set to 0.
+
+ Internal:
+ When set, core uses the value in USBDRD()_UAHC_DCFG[DEVADDR] directly
+ for comparing the device address tokens. In simulation, this can be used to avoid
+ sending a SET_ADDRESS command. */
+ uint32_t u2rstecn : 1; /**< [ 16: 16](R/W) If the SuperSpeed connection fails during POLL or LMP exchange, the device connects
+ at non-SuperSpeed mode. If this bit is set, then the device attempts three more times to
+ connect at SuperSpeed, even if it previously failed to operate in SuperSpeed mode.
+ This bit is only applicable in device mode. */
+ uint32_t frmscldwn : 2; /**< [ 15: 14](R/W) Frame scale down. Scales down device view of a SOF/USOF/ITP duration.
+ For SuperSpeed/high-speed mode:
+ 0x0 = Interval is 125 us.
+ 0x1 = Interval is 62.5 us.
+ 0x2 = Interval is 31.25 us.
+ 0x3 = Interval is 15.625 us.
+
+ For full speed mode, the scale down value is multiplied by 8. */
+ uint32_t prtcapdir : 2; /**< [ 13: 12](R/W) 0x1 = for Host configurations.
+ 0x2 = for Device configurations. */
+ uint32_t coresoftreset : 1; /**< [ 11: 11](R/W) Core soft reset: 1 = soft reset to core, 0 = no soft reset.
+ Clears the interrupts and all the USBDRD()_UAHC_* CSRs except the
+ following registers: USBDRD()_UAHC_GCTL, USBDRD()_UAHC_GUCTL, USBDRD()_UAHC_GSTS,
+ USBDRD()_UAHC_GRLSID, USBDRD()_UAHC_GGPIO, USBDRD()_UAHC_GUID,
+ USBDRD()_UAHC_GUSB2PHYCFG(),
+ USBDRD()_UAHC_GUSB3PIPECTL().
+
+ When you reset PHYs (using USBDRD()_UAHC_GUSB2PHYCFG() or
+ USBDRD()_UAHC_GUSB3PIPECTL()), you must keep the core in reset state until PHY
+ clocks are stable. This controls the bus, RAM, and MAC domain resets.
+
+ Internal:
+ Refer to Reset Generation on Synopsys Databook page 250.
+ Under soft reset, accesses to USBDRD()_UAHC_* CSRs other than USBDRD()_UAHC_GCTL may fail
+ (timeout).
+ This bit is for debug purposes only. Use USBDRD()_UAHC_USBCMD[HCRST] for soft reset. */
+ uint32_t sofitpsync : 1; /**< [ 10: 10](R/W) Synchronize ITP to reference clock. In host mode, if this bit is set to:
+ 0 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever
+ there is a SuperSpeed port that is not in Rx.Detect, SS.Disable, and U3 state.
+ 1 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever the
+ other non-SuperSpeed ports are not in suspended state.
+
+ This feature is useful because it saves power by suspending UTMI/ULPI when SuperSpeed only
+ is active and it helps resolve when the PHY does not transmit a host resume unless it is
+ placed in suspend state.
+ USBDRD()_UAHC_GUSB2PHYCFG()[SUSPHY] eventually decides to put the UTMI/ULPI PHY in to
+ suspend
+ state. In addition, when this bit is set to 1, the core generates ITP off of the REF_CLK-
+ based counter. Otherwise, ITP and SOF are generated off of UTMI/ULPI_CLK[0] based counter.
+
+ To program the reference clock period inside the core, refer to
+ USBDRD()_UAHC_GUCTL[REFCLKPER].
+
+ If you do not plan to ever use this feature or the
+ USBDRD()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL]
+ feature, the minimum frequency for the ref_clk can be as low as 32 KHz. You can connect
+ the
+ SUSPEND_CLK (as low as 32 KHz) to REF_CLK.
+
+ If you plan to enable hardware-based LPM (PORTPMSC[HLE] = 1), this feature cannot be used.
+ Turn off this feature by setting this bit to zero and use the
+ USBDRD()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL] feature.
+
+ If you set this bit to 1, the USBDRD()_UAHC_GUSB2PHYCFG()[U2_FREECLK_EXISTS] bit
+ must be set to 0. */
+ uint32_t u1u2timerscale : 1; /**< [ 9: 9](R/W) Disable U1/U2 timer scaledown. If set to 1, along with SCALEDOWN = 0x1, disables the scale
+ down of U1/U2 inactive timer values.
+ This is for simulation mode only. */
+ uint32_t debugattach : 1; /**< [ 8: 8](R/W) Debug attach. When this bit is set:
+ * SuperSpeed link proceeds directly to the polling-link state (USBDRD()_UAHC_DCTL[RS] = 1)
+ without checking remote termination.
+ * Link LFPS polling timeout is infinite.
+ * Polling timeout during TS1 is infinite (in case link is waiting for TXEQ to finish). */
+ uint32_t ramclksel : 2; /**< [ 7: 6](R/W) RAM clock select. Always keep set to 0x0. */
+ uint32_t scaledown : 2; /**< [ 5: 4](R/W) Scale-down mode. When scale-down mode is enabled for simulation, the core uses scaled-down
+ timing values, resulting in faster simulations. When scale-down mode is disabled, actual
+ timing values are used. This is required for hardware operation.
+
+ High-speed/full-speed/low-speed modes:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scale-down of all timing values. These include:
+ * Speed enumeration.
+ * HNP/SRP.
+ * Suspend and resume.
+
+ 0x2 = N/A
+ 0x3 = Enables bits \<0\> and \<1\> scale-down timing values.
+
+ SuperSpeed mode:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scaled down SuperSpeed timing and repeat values including:
+ * Number of TxEq training sequences reduce to eight.
+ * LFPS polling burst time reduce to 100 ns.
+ * LFPS warm reset receive reduce to 30 us.
+
+ Internal:
+ Refer to the rtl_vip_scaledown_mapping.xls file under \<workspace\>/sim/SoC_sim
+ directory for the complete list.
+ 0x2 = No TxEq training sequences are sent. Overrides bit\<4\>.
+ 0x3 = Enables bits\<0\> and \<1\> scale-down timing values. */
+ uint32_t disscramble : 1; /**< [ 3: 3](R/W) Disable scrambling. Transmit request to link partner on next transition to recovery or polling. */
+ uint32_t u2exit_lfps : 1; /**< [ 2: 2](R/W) LFPS U2 exit.
+ 0 = The link treats 248 ns LFPS as a valid U2 exit.
+ 1 = The link waits for 8 us of LFPS before it detects a valid U2 exit.
+
+ This bit is added to improve interoperability with a third party host controller. This
+ host controller in U2 state while performing receiver detection generates an LFPS glitch
+ of about 4s duration. This causes the device to exit from U2 state because the LFPS filter
+ value is 248 ns. With the new functionality enabled, the device can stay in U2 while
+ ignoring this glitch from the host controller. */
+ uint32_t reserved_1 : 1;
+ uint32_t dsblclkgtng : 1; /**< [ 0: 0](R/W) Disable clock gating. When set to 1 and the core is in low power mode, internal clock
+ gating is disabled, which means the clocks are always running. This bit can be set to 1
+ after power-up reset. */
+#else /* Word 0 - Little Endian */
+ uint32_t dsblclkgtng : 1; /**< [ 0: 0](R/W) Disable clock gating. When set to 1 and the core is in low power mode, internal clock
+ gating is disabled, which means the clocks are always running. This bit can be set to 1
+ after power-up reset. */
+ uint32_t reserved_1 : 1;
+ uint32_t u2exit_lfps : 1; /**< [ 2: 2](R/W) LFPS U2 exit.
+ 0 = The link treats 248 ns LFPS as a valid U2 exit.
+ 1 = The link waits for 8 us of LFPS before it detects a valid U2 exit.
+
+ This bit is added to improve interoperability with a third party host controller. This
+ host controller in U2 state while performing receiver detection generates an LFPS glitch
+ of about 4s duration. This causes the device to exit from U2 state because the LFPS filter
+ value is 248 ns. With the new functionality enabled, the device can stay in U2 while
+ ignoring this glitch from the host controller. */
+ uint32_t disscramble : 1; /**< [ 3: 3](R/W) Disable scrambling. Transmit request to link partner on next transition to recovery or polling. */
+ uint32_t scaledown : 2; /**< [ 5: 4](R/W) Scale-down mode. When scale-down mode is enabled for simulation, the core uses scaled-down
+ timing values, resulting in faster simulations. When scale-down mode is disabled, actual
+ timing values are used. This is required for hardware operation.
+
+ High-speed/full-speed/low-speed modes:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scale-down of all timing values. These include:
+ * Speed enumeration.
+ * HNP/SRP.
+ * Suspend and resume.
+
+ 0x2 = N/A
+ 0x3 = Enables bits \<0\> and \<1\> scale-down timing values.
+
+ SuperSpeed mode:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scaled down SuperSpeed timing and repeat values including:
+ * Number of TxEq training sequences reduce to eight.
+ * LFPS polling burst time reduce to 100 ns.
+ * LFPS warm reset receive reduce to 30 us.
+
+ Internal:
+ Refer to the rtl_vip_scaledown_mapping.xls file under \<workspace\>/sim/SoC_sim
+ directory for the complete list.
+ 0x2 = No TxEq training sequences are sent. Overrides bit\<4\>.
+ 0x3 = Enables bits\<0\> and \<1\> scale-down timing values. */
+ uint32_t ramclksel : 2; /**< [ 7: 6](R/W) RAM clock select. Always keep set to 0x0. */
+ uint32_t debugattach : 1; /**< [ 8: 8](R/W) Debug attach. When this bit is set:
+ * SuperSpeed link proceeds directly to the polling-link state (USBDRD()_UAHC_DCTL[RS] = 1)
+ without checking remote termination.
+ * Link LFPS polling timeout is infinite.
+ * Polling timeout during TS1 is infinite (in case link is waiting for TXEQ to finish). */
+ uint32_t u1u2timerscale : 1; /**< [ 9: 9](R/W) Disable U1/U2 timer scaledown. If set to 1, along with SCALEDOWN = 0x1, disables the scale
+ down of U1/U2 inactive timer values.
+ This is for simulation mode only. */
+ uint32_t sofitpsync : 1; /**< [ 10: 10](R/W) Synchronize ITP to reference clock. In host mode, if this bit is set to:
+ 0 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever
+ there is a SuperSpeed port that is not in Rx.Detect, SS.Disable, and U3 state.
+ 1 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever the
+ other non-SuperSpeed ports are not in suspended state.
+
+ This feature is useful because it saves power by suspending UTMI/ULPI when SuperSpeed only
+ is active and it helps resolve when the PHY does not transmit a host resume unless it is
+ placed in suspend state.
+ USBDRD()_UAHC_GUSB2PHYCFG()[SUSPHY] eventually decides to put the UTMI/ULPI PHY in to
+ suspend
+ state. In addition, when this bit is set to 1, the core generates ITP off of the REF_CLK-
+ based counter. Otherwise, ITP and SOF are generated off of UTMI/ULPI_CLK[0] based counter.
+
+ To program the reference clock period inside the core, refer to
+ USBDRD()_UAHC_GUCTL[REFCLKPER].
+
+ If you do not plan to ever use this feature or the
+ USBDRD()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL]
+ feature, the minimum frequency for the ref_clk can be as low as 32 KHz. You can connect
+ the
+ SUSPEND_CLK (as low as 32 KHz) to REF_CLK.
+
+ If you plan to enable hardware-based LPM (PORTPMSC[HLE] = 1), this feature cannot be used.
+ Turn off this feature by setting this bit to zero and use the
+ USBDRD()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL] feature.
+
+ If you set this bit to 1, the USBDRD()_UAHC_GUSB2PHYCFG()[U2_FREECLK_EXISTS] bit
+ must be set to 0. */
+ uint32_t coresoftreset : 1; /**< [ 11: 11](R/W) Core soft reset: 1 = soft reset to core, 0 = no soft reset.
+ Clears the interrupts and all the USBDRD()_UAHC_* CSRs except the
+ following registers: USBDRD()_UAHC_GCTL, USBDRD()_UAHC_GUCTL, USBDRD()_UAHC_GSTS,
+ USBDRD()_UAHC_GRLSID, USBDRD()_UAHC_GGPIO, USBDRD()_UAHC_GUID,
+ USBDRD()_UAHC_GUSB2PHYCFG(),
+ USBDRD()_UAHC_GUSB3PIPECTL().
+
+ When you reset PHYs (using USBDRD()_UAHC_GUSB2PHYCFG() or
+ USBDRD()_UAHC_GUSB3PIPECTL()), you must keep the core in reset state until PHY
+ clocks are stable. This controls the bus, RAM, and MAC domain resets.
+
+ Internal:
+ Refer to Reset Generation on Synopsys Databook page 250.
+ Under soft reset, accesses to USBDRD()_UAHC_* CSRs other than USBDRD()_UAHC_GCTL may fail
+ (timeout).
+ This bit is for debug purposes only. Use USBDRD()_UAHC_USBCMD[HCRST] for soft reset. */
+ uint32_t prtcapdir : 2; /**< [ 13: 12](R/W) 0x1 = for Host configurations.
+ 0x2 = for Device configurations. */
+ uint32_t frmscldwn : 2; /**< [ 15: 14](R/W) Frame scale down. Scales down device view of a SOF/USOF/ITP duration.
+ For SuperSpeed/high-speed mode:
+ 0x0 = Interval is 125 us.
+ 0x1 = Interval is 62.5 us.
+ 0x2 = Interval is 31.25 us.
+ 0x3 = Interval is 15.625 us.
+
+ For full speed mode, the scale down value is multiplied by 8. */
+ uint32_t u2rstecn : 1; /**< [ 16: 16](R/W) If the SuperSpeed connection fails during POLL or LMP exchange, the device connects
+ at non-SuperSpeed mode. If this bit is set, then the device attempts three more times to
+ connect at SuperSpeed, even if it previously failed to operate in SuperSpeed mode.
+ This bit is only applicable in device mode. */
+ uint32_t bypssetaddr : 1; /**< [ 17: 17](R/W) Bypass SetAddress in device mode.
+ Always set to 0.
+
+ Internal:
+ When set, core uses the value in USBDRD()_UAHC_DCFG[DEVADDR] directly
+ for comparing the device address tokens. In simulation, this can be used to avoid
+ sending a SET_ADDRESS command. */
+ uint32_t masterfiltbypass : 1; /**< [ 18: 18](R/W) Master filter bypass. Not relevant for Cavium's configuration. */
+ uint32_t pwrdnscale : 13; /**< [ 31: 19](R/W) Power down scale. The USB3 suspend-clock input replaces pipe3_rx_pclk as a clock source to
+ a small part of the USB3 core that operates when the SuperSpeed PHY is in its lowest power
+ (P3) state, and therefore does not provide a clock. This field specifies how many suspend-
+ clock periods fit into a 16 kHz clock period. When performing the division, round up the
+ remainder.
+
+ For example, when using an 32-bit PHY and 25-MHz suspend clock, PWRDNSCALE = 25000 kHz/16
+ kHz = 1563 (rounded up).
+
+ The minimum suspend-clock frequency is 32 KHz, and maximum suspend-clock frequency is 125
+ MHz.
+
+ The LTSSM uses suspend clock for 12-ms and 100-ms timers during suspend mode. According to
+ the USB 3.0 specification, the accuracy on these timers is 0% to +50%. 12 ms + 0~+50%
+ accuracy = 18 ms (Range is 12 ms - 18 ms)
+ 100 ms + 0~+50% accuracy = 150 ms (Range is 100 ms - 150 ms).
+
+ The suspend clock accuracy requirement is:
+ _ (12,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 12,000 and
+ 18,000
+ _ (100,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 100,000 and
+ 150,000
+
+ For example, if your suspend_clk frequency varies from 7.5 MHz to 10.5 MHz, then the value
+ needs to programmed is: power down scale = 10500/16 = 657 (rounded up; and fastest
+ frequency used). */
+#endif /* Word 0 - End */
+ } cn8;
+ /* struct bdk_usbdrdx_uahc_gctl_s cn9; */
+};
+typedef union bdk_usbdrdx_uahc_gctl bdk_usbdrdx_uahc_gctl_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GCTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GCTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c110ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c110ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c110ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GCTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GCTL(a) bdk_usbdrdx_uahc_gctl_t
+#define bustype_BDK_USBDRDX_UAHC_GCTL(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GCTL(a) "USBDRDX_UAHC_GCTL"
+#define device_bar_BDK_USBDRDX_UAHC_GCTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GCTL(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GCTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gdbgbmu
+ *
+ * USB UAHC BMU Debug Register
+ * See description in USBDRD()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.30
+ */
+union bdk_usbdrdx_uahc_gdbgbmu
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gdbgbmu_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bmu_bcu_dbg : 24; /**< [ 31: 8](RO/H) BMU_BCU debug information. */
+ uint32_t bmu_dcu_dbg : 4; /**< [ 7: 4](RO/H) BMU_DCU debug information. */
+ uint32_t bmu_ccu_dbg : 4; /**< [ 3: 0](RO/H) BMU_CCU debug information. */
+#else /* Word 0 - Little Endian */
+ uint32_t bmu_ccu_dbg : 4; /**< [ 3: 0](RO/H) BMU_CCU debug information. */
+ uint32_t bmu_dcu_dbg : 4; /**< [ 7: 4](RO/H) BMU_DCU debug information. */
+ uint32_t bmu_bcu_dbg : 24; /**< [ 31: 8](RO/H) BMU_BCU debug information. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gdbgbmu_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gdbgbmu bdk_usbdrdx_uahc_gdbgbmu_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGBMU(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGBMU(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c16cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c16cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c16cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GDBGBMU", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GDBGBMU(a) bdk_usbdrdx_uahc_gdbgbmu_t
+#define bustype_BDK_USBDRDX_UAHC_GDBGBMU(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GDBGBMU(a) "USBDRDX_UAHC_GDBGBMU"
+#define device_bar_BDK_USBDRDX_UAHC_GDBGBMU(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GDBGBMU(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GDBGBMU(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uahc_gdbgepinfo
+ *
+ * USB UAHC Endpoint Information Debug Register
+ * See description in USBDRD()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ */
+union bdk_usbdrdx_uahc_gdbgepinfo
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uahc_gdbgepinfo_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t endpt_dbg : 64; /**< [ 63: 0](RO/H) Endpoint debug information. */
+#else /* Word 0 - Little Endian */
+ uint64_t endpt_dbg : 64; /**< [ 63: 0](RO/H) Endpoint debug information. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gdbgepinfo_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gdbgepinfo bdk_usbdrdx_uahc_gdbgepinfo_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGEPINFO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGEPINFO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c178ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c178ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c178ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GDBGEPINFO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GDBGEPINFO(a) bdk_usbdrdx_uahc_gdbgepinfo_t
+#define bustype_BDK_USBDRDX_UAHC_GDBGEPINFO(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UAHC_GDBGEPINFO(a) "USBDRDX_UAHC_GDBGEPINFO"
+#define device_bar_BDK_USBDRDX_UAHC_GDBGEPINFO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GDBGEPINFO(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GDBGEPINFO(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gdbgfifospace
+ *
+ * USB UAHC Debug FIFO Space Available Register
+ * This register is for debug purposes. It provides debug information on the internal status and
+ * state machines. Global debug registers have design-specific information, and are used by state
+ * machines. Global debug registers have design-specific information, and are used for debugging
+ * purposes. These registers are not intended to be used by the customer. If any debug assistance
+ * is needed for the silicon, contact customer support with a dump of these registers.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.27
+ * INTERNAL: Contact Synopsys directly.
+ */
+union bdk_usbdrdx_uahc_gdbgfifospace
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gdbgfifospace_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t spaceavailable : 16; /**< [ 31: 16](RO/H) Space available in the selected FIFO. */
+ uint32_t reserved_9_15 : 7;
+ uint32_t select : 9; /**< [ 8: 0](R/W) FIFO/queue select/port-select.
+ FIFO/queue select: \<7:5\> indicates the FIFO/queue type; \<4:0\> indicates the FIFO/queue
+ number.
+ For example, 0x21 refers to RxFIFO_1, and 0x5E refers to TxReqQ_30.
+ 0x1F-0x0: TxFIFO_31 to TxFIFO_0.
+ 0x3F-0x20: RxFIFO_31 to RxFIFO_0.
+ 0x5F-0x40: TxReqQ_31 to TxReqQ_0.
+ 0x7F-0x60: RxReqQ_31 to RxReqQ_0.
+ 0x9F-0x80: RxInfoQ_31 to RxInfoQ_0.
+ 0xA0: DescFetchQ.
+ 0xA1: EventQ.
+ 0xA2: ProtocolStatusQ.
+
+ Port-select: \<3:0\> selects the port-number when accessing USBDRD()_UAHC_GDBGLTSSM. */
+#else /* Word 0 - Little Endian */
+ uint32_t select : 9; /**< [ 8: 0](R/W) FIFO/queue select/port-select.
+ FIFO/queue select: \<7:5\> indicates the FIFO/queue type; \<4:0\> indicates the FIFO/queue
+ number.
+ For example, 0x21 refers to RxFIFO_1, and 0x5E refers to TxReqQ_30.
+ 0x1F-0x0: TxFIFO_31 to TxFIFO_0.
+ 0x3F-0x20: RxFIFO_31 to RxFIFO_0.
+ 0x5F-0x40: TxReqQ_31 to TxReqQ_0.
+ 0x7F-0x60: RxReqQ_31 to RxReqQ_0.
+ 0x9F-0x80: RxInfoQ_31 to RxInfoQ_0.
+ 0xA0: DescFetchQ.
+ 0xA1: EventQ.
+ 0xA2: ProtocolStatusQ.
+
+ Port-select: \<3:0\> selects the port-number when accessing USBDRD()_UAHC_GDBGLTSSM. */
+ uint32_t reserved_9_15 : 7;
+ uint32_t spaceavailable : 16; /**< [ 31: 16](RO/H) Space available in the selected FIFO. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gdbgfifospace_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gdbgfifospace bdk_usbdrdx_uahc_gdbgfifospace_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGFIFOSPACE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGFIFOSPACE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c160ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c160ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c160ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GDBGFIFOSPACE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GDBGFIFOSPACE(a) bdk_usbdrdx_uahc_gdbgfifospace_t
+#define bustype_BDK_USBDRDX_UAHC_GDBGFIFOSPACE(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GDBGFIFOSPACE(a) "USBDRDX_UAHC_GDBGFIFOSPACE"
+#define device_bar_BDK_USBDRDX_UAHC_GDBGFIFOSPACE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GDBGFIFOSPACE(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GDBGFIFOSPACE(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gdbglnmcc
+ *
+ * USB UAHC LNMCC Debug Register
+ * See description in USBDRD()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.29
+ */
+union bdk_usbdrdx_uahc_gdbglnmcc
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gdbglnmcc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t lnmcc_berc : 9; /**< [ 8: 0](RO/H) This field indicates the bit-error-rate information for the port selected in
+ USBDRD()_UAHC_GDBGFIFOSPACE[SELECT] (port-select).
+ This field is for debug purposes only. */
+#else /* Word 0 - Little Endian */
+ uint32_t lnmcc_berc : 9; /**< [ 8: 0](RO/H) This field indicates the bit-error-rate information for the port selected in
+ USBDRD()_UAHC_GDBGFIFOSPACE[SELECT] (port-select).
+ This field is for debug purposes only. */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gdbglnmcc_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gdbglnmcc bdk_usbdrdx_uahc_gdbglnmcc_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGLNMCC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGLNMCC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c168ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c168ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c168ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GDBGLNMCC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GDBGLNMCC(a) bdk_usbdrdx_uahc_gdbglnmcc_t
+#define bustype_BDK_USBDRDX_UAHC_GDBGLNMCC(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GDBGLNMCC(a) "USBDRDX_UAHC_GDBGLNMCC"
+#define device_bar_BDK_USBDRDX_UAHC_GDBGLNMCC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GDBGLNMCC(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GDBGLNMCC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gdbglsp
+ *
+ * USB UAHC LSP Debug Register
+ * See description in USBDRD()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ */
+union bdk_usbdrdx_uahc_gdbglsp
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gdbglsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lsp_dbg : 32; /**< [ 31: 0](RO/H) LSP debug information. */
+#else /* Word 0 - Little Endian */
+ uint32_t lsp_dbg : 32; /**< [ 31: 0](RO/H) LSP debug information. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gdbglsp_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gdbglsp bdk_usbdrdx_uahc_gdbglsp_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGLSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGLSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c174ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c174ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c174ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GDBGLSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GDBGLSP(a) bdk_usbdrdx_uahc_gdbglsp_t
+#define bustype_BDK_USBDRDX_UAHC_GDBGLSP(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GDBGLSP(a) "USBDRDX_UAHC_GDBGLSP"
+#define device_bar_BDK_USBDRDX_UAHC_GDBGLSP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GDBGLSP(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GDBGLSP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gdbglspmux
+ *
+ * USB UAHC LSP Multiplexer Debug Register
+ * See description in USBDRD()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.21
+ * INTERNAL: This register is for Synopsys internal use only.
+ */
+union bdk_usbdrdx_uahc_gdbglspmux
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gdbglspmux_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t latraceportmuxselect : 8; /**< [ 23: 16](R/W) logic_analyzer_trace port multiplexer select. Only bits\<21:16\> are used. For details on
+ how the mux controls the debug traces, refer to the Verilog file.
+ A value of 0x3F drives 0s on the logic_analyzer_trace signal. If you plan to OR (instead
+ using a mux) this signal with other trace signals in your system to generate a common
+ trace signal, you can use this feature. */
+ uint32_t endbc : 1; /**< [ 15: 15](R/W) Enable debugging of the debug capability LSP. Use HOSTSELECT to select the DbC LSP debug
+ information presented in the GDBGLSP register.
+
+ Internal:
+ Note this can only be used if DebugCapabaility was enabled at compile. */
+ uint32_t reserved_14 : 1;
+ uint32_t hostselect : 14; /**< [ 13: 0](R/W) Host select. Selects the LSP debug information presented in USBDRD()_UAHC_GDBGLSP. */
+#else /* Word 0 - Little Endian */
+ uint32_t hostselect : 14; /**< [ 13: 0](R/W) Host select. Selects the LSP debug information presented in USBDRD()_UAHC_GDBGLSP. */
+ uint32_t reserved_14 : 1;
+ uint32_t endbc : 1; /**< [ 15: 15](R/W) Enable debugging of the debug capability LSP. Use HOSTSELECT to select the DbC LSP debug
+ information presented in the GDBGLSP register.
+
+ Internal:
+ Note this can only be used if DebugCapabaility was enabled at compile. */
+ uint32_t latraceportmuxselect : 8; /**< [ 23: 16](R/W) logic_analyzer_trace port multiplexer select. Only bits\<21:16\> are used. For details on
+ how the mux controls the debug traces, refer to the Verilog file.
+ A value of 0x3F drives 0s on the logic_analyzer_trace signal. If you plan to OR (instead
+ using a mux) this signal with other trace signals in your system to generate a common
+ trace signal, you can use this feature. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gdbglspmux_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gdbglspmux bdk_usbdrdx_uahc_gdbglspmux_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGLSPMUX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGLSPMUX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c170ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c170ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c170ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GDBGLSPMUX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GDBGLSPMUX(a) bdk_usbdrdx_uahc_gdbglspmux_t
+#define bustype_BDK_USBDRDX_UAHC_GDBGLSPMUX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GDBGLSPMUX(a) "USBDRDX_UAHC_GDBGLSPMUX"
+#define device_bar_BDK_USBDRDX_UAHC_GDBGLSPMUX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GDBGLSPMUX(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GDBGLSPMUX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gdbgltssm
+ *
+ * USB UAHC LTSSM Debug Register
+ * In multiport host configuration, the port number is defined by
+ * USBDRD()_UAHC_GDBGFIFOSPACE[SELECT]\<3:0\>. Value of this register may change immediately after
+ * reset.
+ * See description in USBDRD()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.28
+ */
+union bdk_usbdrdx_uahc_gdbgltssm
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gdbgltssm_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t ltdbtimeout : 1; /**< [ 26: 26](RO/H) LTDB timeout. */
+ uint32_t ltdblinkstate : 4; /**< [ 25: 22](RO/H) LTDB link state. */
+ uint32_t ltdbsubstate : 4; /**< [ 21: 18](RO/H) LTDB substate. */
+ uint32_t debugpipestatus : 18; /**< [ 17: 0](RO/H) Debug PIPE status.
+ _ \<17\> Elastic buffer mode.
+ _ \<16\> TX elec idle.
+ _ \<15\> RX polarity.
+ _ \<14\> TX Detect RX/loopback.
+ _ \<13:11\> LTSSM PHY command state.
+ _ 0x0 = PHY_IDLE (PHY command state is in IDLE. No PHY request is pending.)
+ _ 0x1 = PHY_DET (Request to start receiver detection).
+ _ 0x2 = PHY_DET_3 (Wait for Phy_Status (receiver detection)).
+ _ 0x3 = PHY_PWR_DLY (delay Pipe3_PowerDown P0 -\> P1/P2/P3 request).
+ _ 0x4 = PHY_PWR_A (delay for internal logic).
+ _ 0x5 = PHY_PWR_B (wait for Phy_Status(Power-state change request)).
+
+ _ \<10:9\> Power down.
+ _ \<8\> RxEq train.
+ _ \<7:6\> TX de-emphasis.
+ _ \<5:3\> LTSSM clock state.
+ _ 0x0 = CLK_NORM (PHY is in non-P3 state and PCLK is running).
+ _ 0x1 = CLK_TO_P3 (P3 entry request to PHY).
+ _ 0x2 = CLK_WAIT1 (wait for Phy_Status (P3 request)).
+ _ 0x3 = CLK_P3 (PHY is in P3 and PCLK is not running).
+ _ 0x4 = CLK_TO_P0 (P3 exit request to PHY).
+ _ 0x5 = CLK_WAIT2 (Wait for Phy_Status (P3 exit request)).
+
+ _ \<2\> TX swing.
+ _ \<1\> RX termination.
+ _ \<0\> TX 1s/0s. */
+#else /* Word 0 - Little Endian */
+ uint32_t debugpipestatus : 18; /**< [ 17: 0](RO/H) Debug PIPE status.
+ _ \<17\> Elastic buffer mode.
+ _ \<16\> TX elec idle.
+ _ \<15\> RX polarity.
+ _ \<14\> TX Detect RX/loopback.
+ _ \<13:11\> LTSSM PHY command state.
+ _ 0x0 = PHY_IDLE (PHY command state is in IDLE. No PHY request is pending.)
+ _ 0x1 = PHY_DET (Request to start receiver detection).
+ _ 0x2 = PHY_DET_3 (Wait for Phy_Status (receiver detection)).
+ _ 0x3 = PHY_PWR_DLY (delay Pipe3_PowerDown P0 -\> P1/P2/P3 request).
+ _ 0x4 = PHY_PWR_A (delay for internal logic).
+ _ 0x5 = PHY_PWR_B (wait for Phy_Status(Power-state change request)).
+
+ _ \<10:9\> Power down.
+ _ \<8\> RxEq train.
+ _ \<7:6\> TX de-emphasis.
+ _ \<5:3\> LTSSM clock state.
+ _ 0x0 = CLK_NORM (PHY is in non-P3 state and PCLK is running).
+ _ 0x1 = CLK_TO_P3 (P3 entry request to PHY).
+ _ 0x2 = CLK_WAIT1 (wait for Phy_Status (P3 request)).
+ _ 0x3 = CLK_P3 (PHY is in P3 and PCLK is not running).
+ _ 0x4 = CLK_TO_P0 (P3 exit request to PHY).
+ _ 0x5 = CLK_WAIT2 (Wait for Phy_Status (P3 exit request)).
+
+ _ \<2\> TX swing.
+ _ \<1\> RX termination.
+ _ \<0\> TX 1s/0s. */
+ uint32_t ltdbsubstate : 4; /**< [ 21: 18](RO/H) LTDB substate. */
+ uint32_t ltdblinkstate : 4; /**< [ 25: 22](RO/H) LTDB link state. */
+ uint32_t ltdbtimeout : 1; /**< [ 26: 26](RO/H) LTDB timeout. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gdbgltssm_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gdbgltssm bdk_usbdrdx_uahc_gdbgltssm_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGLTSSM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GDBGLTSSM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c164ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c164ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c164ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GDBGLTSSM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GDBGLTSSM(a) bdk_usbdrdx_uahc_gdbgltssm_t
+#define bustype_BDK_USBDRDX_UAHC_GDBGLTSSM(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GDBGLTSSM(a) "USBDRDX_UAHC_GDBGLTSSM"
+#define device_bar_BDK_USBDRDX_UAHC_GDBGLTSSM(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GDBGLTSSM(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GDBGLTSSM(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gdmahlratio
+ *
+ * USB UAHC DMA High/Low Ratio Register
+ * This register specifies the relative priority of the SuperSpeed FIFOs with respect to the
+ * high-speed/full-speed/low-speed FIFOs. The DMA arbiter prioritizes the high-speed/full-speed
+ * /low-speed round-robin arbiter group every DMA high-low priority ratio grants as indicated in
+ * the register separately for TX and RX.
+ *
+ * To illustrate, consider that all FIFOs are requesting access simultaneously, and the ratio is
+ * 4. SuperSpeed gets priority for four packets, high-speed/full-speed/low-speed gets priority
+ * for one packet, SuperSpeed gets priority for four packets, high-speed/full-speed/low-speed
+ * gets priority for one packet, and so on.
+ *
+ * If FIFOs from both speed groups are not requesting access simultaneously then:
+ * * If SuperSpeed got grants four out of the last four times, then high-speed/full-speed/
+ * low-speed get the priority on any future request.
+ * * If high-speed/full-speed/low-speed got the grant last time, SuperSpeed gets the priority on
+ * the next request.
+ *
+ * If there is a valid request on either SuperSpeed or high-speed/full-speed/low-speed, a grant
+ * is always awarded; there is no idle.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.63
+ */
+union bdk_usbdrdx_uahc_gdmahlratio
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gdmahlratio_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_13_31 : 19;
+ uint32_t rx_ratio : 5; /**< [ 12: 8](R/W) Speed ratio for RX arbitration. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t tx_ratio : 5; /**< [ 4: 0](R/W) Speed ratio for TX arbitration. */
+#else /* Word 0 - Little Endian */
+ uint32_t tx_ratio : 5; /**< [ 4: 0](R/W) Speed ratio for TX arbitration. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t rx_ratio : 5; /**< [ 12: 8](R/W) Speed ratio for RX arbitration. */
+ uint32_t reserved_13_31 : 19;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gdmahlratio_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gdmahlratio bdk_usbdrdx_uahc_gdmahlratio_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GDMAHLRATIO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GDMAHLRATIO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c624ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c624ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c624ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GDMAHLRATIO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GDMAHLRATIO(a) bdk_usbdrdx_uahc_gdmahlratio_t
+#define bustype_BDK_USBDRDX_UAHC_GDMAHLRATIO(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GDMAHLRATIO(a) "USBDRDX_UAHC_GDMAHLRATIO"
+#define device_bar_BDK_USBDRDX_UAHC_GDMAHLRATIO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GDMAHLRATIO(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GDMAHLRATIO(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uahc_gevntadr#
+ *
+ * USB UAHC Global Event Buffer Address Register
+ * This register holds the event buffer DMA address pointer. Software must initialize this
+ * address once during power-on initialization. Software must not change the value of this
+ * register after it is initialized.
+ * Software must only use the GEVNTCOUNTn register for event processing. The lower n bits of the
+ * address must be USBDRD()_UAHC_GEVNTSIZ()[EVNTSIZ]-aligned.
+ *
+ * This register can be reset by IOI reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.53.
+ */
+union bdk_usbdrdx_uahc_gevntadrx
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uahc_gevntadrx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t evntadr : 64; /**< [ 63: 0](R/W/H) Holds the start address of the external memory
+ for the event buffer. During operation, hardware does not update
+ this address. */
+#else /* Word 0 - Little Endian */
+ uint64_t evntadr : 64; /**< [ 63: 0](R/W/H) Holds the start address of the external memory
+ for the event buffer. During operation, hardware does not update
+ this address. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gevntadrx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gevntadrx bdk_usbdrdx_uahc_gevntadrx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GEVNTADRX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GEVNTADRX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x86800000c400ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x86800000c400ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x86800000c400ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_GEVNTADRX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GEVNTADRX(a,b) bdk_usbdrdx_uahc_gevntadrx_t
+#define bustype_BDK_USBDRDX_UAHC_GEVNTADRX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UAHC_GEVNTADRX(a,b) "USBDRDX_UAHC_GEVNTADRX"
+#define device_bar_BDK_USBDRDX_UAHC_GEVNTADRX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GEVNTADRX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_GEVNTADRX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gevntcount#
+ *
+ * USB UAHC Global Event Buffer Count Register
+ * This register holds the number of valid bytes in the event buffer. During initialization,
+ * software must initialize the count by writing 0 to the event count field. Each time the
+ * hardware writes a new event to the event buffer, it increments this count. Most events
+ * are four bytes, but some events may span over multiple four byte entries. Whenever the
+ * count is greater than zero, the hardware raises the corresponding interrupt
+ * line (depending on the USBDRD()_UAHC_GEVNTSIZ()[EVNTINTMASK]). On an interrupt, software
+ * processes one or more events out of the event buffer. Afterwards, software must write the
+ * event count field with the number of bytes it processed.
+ *
+ * Clock crossing delays may result in the interrupt's continual assertion after software
+ * acknowledges the last event. Therefore, when the interrupt line is asserted, software must
+ * read the GEVNTCOUNT register and only process events if the GEVNTCOUNT is greater than 0.
+ *
+ * This register can be reset by IOI reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.57
+ */
+union bdk_usbdrdx_uahc_gevntcountx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gevntcountx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t evntcount : 16; /**< [ 15: 0](R/W/H) When read, returns the number of valid events in the event buffer (in bytes).
+ When written, hardware decrements the count by the value written.
+ The interrupt line remains high when count is not 0. */
+#else /* Word 0 - Little Endian */
+ uint32_t evntcount : 16; /**< [ 15: 0](R/W/H) When read, returns the number of valid events in the event buffer (in bytes).
+ When written, hardware decrements the count by the value written.
+ The interrupt line remains high when count is not 0. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gevntcountx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gevntcountx bdk_usbdrdx_uahc_gevntcountx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GEVNTCOUNTX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GEVNTCOUNTX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x86800000c40cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x86800000c40cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x86800000c40cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_GEVNTCOUNTX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GEVNTCOUNTX(a,b) bdk_usbdrdx_uahc_gevntcountx_t
+#define bustype_BDK_USBDRDX_UAHC_GEVNTCOUNTX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GEVNTCOUNTX(a,b) "USBDRDX_UAHC_GEVNTCOUNTX"
+#define device_bar_BDK_USBDRDX_UAHC_GEVNTCOUNTX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GEVNTCOUNTX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_GEVNTCOUNTX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gevntsiz#
+ *
+ * USB UAHC Global Event Buffer Size Register
+ * This register holds the event buffer size and the event interrupt mask bit. During power-on
+ * initialization, software must initialize the size with the number of bytes allocated for
+ * the event buffer. The event interrupt mask will mask the interrupt, but events are still
+ * queued. After configuration, software must preserve the event buffer size value when
+ * changing the event interrupt mask.
+ *
+ * This register can be reset by IOI reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.56
+ */
+union bdk_usbdrdx_uahc_gevntsizx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gevntsizx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t evntintmask : 1; /**< [ 31: 31](R/W) When set to 1, this prevents the interrupt from being generated.
+ However, even when the mask is set, the events are queued. */
+ uint32_t reserved_16_30 : 15;
+ uint32_t evntsiz : 16; /**< [ 15: 0](R/W) Holds the size of the event buffer in bytes; must be a multiple of
+ four. This is programmed by software once during initialization.
+ The minimum size of the event buffer is 32 bytes. */
+#else /* Word 0 - Little Endian */
+ uint32_t evntsiz : 16; /**< [ 15: 0](R/W) Holds the size of the event buffer in bytes; must be a multiple of
+ four. This is programmed by software once during initialization.
+ The minimum size of the event buffer is 32 bytes. */
+ uint32_t reserved_16_30 : 15;
+ uint32_t evntintmask : 1; /**< [ 31: 31](R/W) When set to 1, this prevents the interrupt from being generated.
+ However, even when the mask is set, the events are queued. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gevntsizx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gevntsizx bdk_usbdrdx_uahc_gevntsizx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GEVNTSIZX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GEVNTSIZX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x86800000c408ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x86800000c408ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x86800000c408ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_GEVNTSIZX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GEVNTSIZX(a,b) bdk_usbdrdx_uahc_gevntsizx_t
+#define bustype_BDK_USBDRDX_UAHC_GEVNTSIZX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GEVNTSIZX(a,b) "USBDRDX_UAHC_GEVNTSIZX"
+#define device_bar_BDK_USBDRDX_UAHC_GEVNTSIZX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GEVNTSIZX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_GEVNTSIZX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gfladj
+ *
+ * USB UAHC Global Frame Length Adjustment Register
+ * This register provides options for the software to control the core behavior with respect to
+ * SOF (start of frame) and ITP (isochronous timestamp packet) timers and frame timer
+ * functionality. It provides the option to override the sideband signal fladj_30mhz_reg. In
+ * addition, it enables running SOF or ITP frame timer counters completely off of the REF_CLK.
+ * This facilitates hardware LPM in host mode with the SOF or ITP counters being run off of the
+ * REF_CLK signal.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.64
+ */
+union bdk_usbdrdx_uahc_gfladj
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gfladj_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t gfladj_refclk_240mhzdecr_pls1 : 1;/**< [ 31: 31](R/W) This field indicates that the decrement value that the controller applies for each REF_CLK
+ must be GFLADJ_REFCLK_240MHZ_DECR and GFLADJ_REFCLK_240MHZ_DECR +1 alternatively on each
+ REF_CLK. Set this bit to 1 only if [GFLADJ_REFCLK_LPM_SEL] is set to 1 and the fractional
+ component of 240/ref_frequency is greater than or equal to 0.5.
+
+ Example:
+
+ If the REF_CLK is 19.2 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 52.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = (240/19.2) = 12.5.
+ * [GFLADJ_REFCLK_240MHZDECR_PLS1] = 1.
+
+ If the REF_CLK is 24 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 41.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = (240/24) = 10.
+ * [GFLADJ_REFCLK_240MHZDECR_PLS1] = 0. */
+ uint32_t gfladj_refclk_240mhz_decr : 7;/**< [ 30: 24](R/W) This field indicates the decrement value that the controller applies for each REF_CLK in
+ order to derive a frame timer in terms of a 240-MHz clock. This field must be programmed
+ to a nonzero value only if [GFLADJ_REFCLK_LPM_SEL] is set to 1.
+
+ The value is derived as follows:
+ _ [GFLADJ_REFCLK_240MHZ_DECR] = 240/ref_clk_frequency
+
+ Examples:
+
+ If the REF_CLK is 24 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 41.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/24 = 10.
+
+ If the REF_CLK is 48 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 20.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/48 = 5.
+
+ If the REF_CLK is 17 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 58.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/17 = 14. */
+ uint32_t gfladj_refclk_lpm_sel : 1; /**< [ 23: 23](R/W) This bit enables the functionality of running SOF/ITP counters on the REF_CLK.
+ This bit must not be set to 1 if USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1. Similarly, if
+ [GFLADJ_REFCLK_LPM_SEL] = 1, USBDRD()_UAHC_GCTL[SOFITPSYNC] must not be set to 1.
+ When [GFLADJ_REFCLK_LPM_SEL] = 1 the overloading of the suspend control of the USB 2.0
+ first
+ port PHY (UTMI) with USB 3.0 port states is removed. Note that the REF_CLK frequencies
+ supported in this mode are 16/17/19.2/20/24/39.7/40 MHz.
+
+ Internal:
+ The utmi_clk[0] signal of the core must be connected to the FREECLK of the PHY.
+ If you set this bit to 1, USBDRD()_UAHC_GUSB2PHYCFG()[U2_FREECLK_EXISTS] must be set to 0. */
+ uint32_t reserved_22 : 1;
+ uint32_t gfladj_refclk_fladj : 14; /**< [ 21: 8](R/W) This field indicates the frame length adjustment to be applied when SOF/ITP counter is
+ running off of the REF_CLK. This register value is used to adjust:.
+ * ITP interval when USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1
+ * both SOF and ITP interval when [GFLADJ_REFCLK_LPM_SEL] = 1.
+
+ This field must be programmed to a nonzero value only if [GFLADJ_REFCLK_LPM_SEL] = 1 or
+ USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1.
+
+ The value is derived as below:
+
+ _ FLADJ_REF_CLK_FLADJ = ((125000/ref_clk_period_integer) - (125000/ref_clk_period)) *
+ ref_clk_period
+
+ where,
+ * the ref_clk_period_integer is the integer value of the REF_CLK period got by truncating
+ the decimal (fractional) value that is programmed in USBDRD()_UAHC_GUCTL[REFCLKPER].
+ * the ref_clk_period is the REF_CLK period including the fractional value.
+
+ Examples:
+
+ If the REF_CLK is 24 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 41.
+ * GLADJ_REFCLK_FLADJ = ((125000/41) -
+ (125000/41.6666)) * 41.6666 = 2032 (ignoring the fractional value).
+
+ If the REF_CLK is 48 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 20.
+ * GLADJ_REFCLK_FLADJ = ((125000/20) -
+ (125000/20.8333)) * 20.8333 = 5208 (ignoring the fractional value). */
+ uint32_t gfladj_30mhz_reg_sel : 1; /**< [ 7: 7](R/W) This field selects whether to use the input signal fladj_30mhz_reg or the [GFLADJ_30MHZ]
+ to
+ adjust the frame length for the SOF/ITP. When this bit is set to, 1, the controller uses
+ [GFLADJ_30MHZ] value 0x0, the controller uses the input signal fladj_30mhz_reg value. */
+ uint32_t reserved_6 : 1;
+ uint32_t gfladj_30mhz : 6; /**< [ 5: 0](R/W) This field indicates the value that is used for frame length adjustment instead of
+ considering from the sideband input signal fladj_30mhz_reg. This enables post-silicon
+ frame length adjustment in case the input signal fladj_30mhz_reg is connected to a wrong
+ value or is not valid. The controller uses this value if [GFLADJ_30MHZ_REG_SEL] = 1 and
+ the
+ SOF/ITP counters are running off of UTMI(ULPI) clock ([GFLADJ_REFCLK_LPM_SEL] = 0 and
+ USBDRD()_UAHC_GCTL[SOFITPSYNC] is 1 or 0). For details on how to set this value, refer to
+ section 5.2.4 Frame Length Adjustment Register (FLADJ) of the xHCI Specification. */
+#else /* Word 0 - Little Endian */
+ uint32_t gfladj_30mhz : 6; /**< [ 5: 0](R/W) This field indicates the value that is used for frame length adjustment instead of
+ considering from the sideband input signal fladj_30mhz_reg. This enables post-silicon
+ frame length adjustment in case the input signal fladj_30mhz_reg is connected to a wrong
+ value or is not valid. The controller uses this value if [GFLADJ_30MHZ_REG_SEL] = 1 and
+ the
+ SOF/ITP counters are running off of UTMI(ULPI) clock ([GFLADJ_REFCLK_LPM_SEL] = 0 and
+ USBDRD()_UAHC_GCTL[SOFITPSYNC] is 1 or 0). For details on how to set this value, refer to
+ section 5.2.4 Frame Length Adjustment Register (FLADJ) of the xHCI Specification. */
+ uint32_t reserved_6 : 1;
+ uint32_t gfladj_30mhz_reg_sel : 1; /**< [ 7: 7](R/W) This field selects whether to use the input signal fladj_30mhz_reg or the [GFLADJ_30MHZ]
+ to
+ adjust the frame length for the SOF/ITP. When this bit is set to, 1, the controller uses
+ [GFLADJ_30MHZ] value 0x0, the controller uses the input signal fladj_30mhz_reg value. */
+ uint32_t gfladj_refclk_fladj : 14; /**< [ 21: 8](R/W) This field indicates the frame length adjustment to be applied when SOF/ITP counter is
+ running off of the REF_CLK. This register value is used to adjust:.
+ * ITP interval when USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1
+ * both SOF and ITP interval when [GFLADJ_REFCLK_LPM_SEL] = 1.
+
+ This field must be programmed to a nonzero value only if [GFLADJ_REFCLK_LPM_SEL] = 1 or
+ USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1.
+
+ The value is derived as below:
+
+ _ FLADJ_REF_CLK_FLADJ = ((125000/ref_clk_period_integer) - (125000/ref_clk_period)) *
+ ref_clk_period
+
+ where,
+ * the ref_clk_period_integer is the integer value of the REF_CLK period got by truncating
+ the decimal (fractional) value that is programmed in USBDRD()_UAHC_GUCTL[REFCLKPER].
+ * the ref_clk_period is the REF_CLK period including the fractional value.
+
+ Examples:
+
+ If the REF_CLK is 24 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 41.
+ * GLADJ_REFCLK_FLADJ = ((125000/41) -
+ (125000/41.6666)) * 41.6666 = 2032 (ignoring the fractional value).
+
+ If the REF_CLK is 48 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 20.
+ * GLADJ_REFCLK_FLADJ = ((125000/20) -
+ (125000/20.8333)) * 20.8333 = 5208 (ignoring the fractional value). */
+ uint32_t reserved_22 : 1;
+ uint32_t gfladj_refclk_lpm_sel : 1; /**< [ 23: 23](R/W) This bit enables the functionality of running SOF/ITP counters on the REF_CLK.
+ This bit must not be set to 1 if USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1. Similarly, if
+ [GFLADJ_REFCLK_LPM_SEL] = 1, USBDRD()_UAHC_GCTL[SOFITPSYNC] must not be set to 1.
+ When [GFLADJ_REFCLK_LPM_SEL] = 1 the overloading of the suspend control of the USB 2.0
+ first
+ port PHY (UTMI) with USB 3.0 port states is removed. Note that the REF_CLK frequencies
+ supported in this mode are 16/17/19.2/20/24/39.7/40 MHz.
+
+ Internal:
+ The utmi_clk[0] signal of the core must be connected to the FREECLK of the PHY.
+ If you set this bit to 1, USBDRD()_UAHC_GUSB2PHYCFG()[U2_FREECLK_EXISTS] must be set to 0. */
+ uint32_t gfladj_refclk_240mhz_decr : 7;/**< [ 30: 24](R/W) This field indicates the decrement value that the controller applies for each REF_CLK in
+ order to derive a frame timer in terms of a 240-MHz clock. This field must be programmed
+ to a nonzero value only if [GFLADJ_REFCLK_LPM_SEL] is set to 1.
+
+ The value is derived as follows:
+ _ [GFLADJ_REFCLK_240MHZ_DECR] = 240/ref_clk_frequency
+
+ Examples:
+
+ If the REF_CLK is 24 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 41.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/24 = 10.
+
+ If the REF_CLK is 48 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 20.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/48 = 5.
+
+ If the REF_CLK is 17 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 58.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/17 = 14. */
+ uint32_t gfladj_refclk_240mhzdecr_pls1 : 1;/**< [ 31: 31](R/W) This field indicates that the decrement value that the controller applies for each REF_CLK
+ must be GFLADJ_REFCLK_240MHZ_DECR and GFLADJ_REFCLK_240MHZ_DECR +1 alternatively on each
+ REF_CLK. Set this bit to 1 only if [GFLADJ_REFCLK_LPM_SEL] is set to 1 and the fractional
+ component of 240/ref_frequency is greater than or equal to 0.5.
+
+ Example:
+
+ If the REF_CLK is 19.2 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 52.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = (240/19.2) = 12.5.
+ * [GFLADJ_REFCLK_240MHZDECR_PLS1] = 1.
+
+ If the REF_CLK is 24 MHz then:
+ * USBDRD()_UAHC_GUCTL[REFCLKPER] = 41.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = (240/24) = 10.
+ * [GFLADJ_REFCLK_240MHZDECR_PLS1] = 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gfladj_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gfladj bdk_usbdrdx_uahc_gfladj_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GFLADJ(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GFLADJ(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c630ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c630ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c630ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GFLADJ", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GFLADJ(a) bdk_usbdrdx_uahc_gfladj_t
+#define bustype_BDK_USBDRDX_UAHC_GFLADJ(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GFLADJ(a) "USBDRDX_UAHC_GFLADJ"
+#define device_bar_BDK_USBDRDX_UAHC_GFLADJ(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GFLADJ(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GFLADJ(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_ggpio
+ *
+ * USB UAHC Core General-Purpose I/O Register
+ * The application can use this register for general purpose input and output ports or for
+ * debugging.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.10
+ */
+union bdk_usbdrdx_uahc_ggpio
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_ggpio_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t gpo : 16; /**< [ 31: 16](R/W) General purpose output. These outputs are not connected to anything. Can be used as scratch. */
+ uint32_t gpi : 16; /**< [ 15: 0](RO) General purpose input. These inputs are tied 0x0. */
+#else /* Word 0 - Little Endian */
+ uint32_t gpi : 16; /**< [ 15: 0](RO) General purpose input. These inputs are tied 0x0. */
+ uint32_t gpo : 16; /**< [ 31: 16](R/W) General purpose output. These outputs are not connected to anything. Can be used as scratch. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_ggpio_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_ggpio bdk_usbdrdx_uahc_ggpio_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GGPIO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GGPIO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c124ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c124ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c124ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GGPIO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GGPIO(a) bdk_usbdrdx_uahc_ggpio_t
+#define bustype_BDK_USBDRDX_UAHC_GGPIO(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GGPIO(a) "USBDRDX_UAHC_GGPIO"
+#define device_bar_BDK_USBDRDX_UAHC_GGPIO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GGPIO(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GGPIO(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_ghwparams0
+ *
+ * USB UAHC Hardware Parameters Register 0
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v3.10a, section 6.2.19
+ */
+union bdk_usbdrdx_uahc_ghwparams0
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_ghwparams0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t awidth : 8; /**< [ 31: 24](RO) USB core bus-address width. */
+ uint32_t sdwidth : 8; /**< [ 23: 16](RO) USB core bus slave-data width. */
+ uint32_t mdwidth : 8; /**< [ 15: 8](RO) USB core bus master-data width. */
+ uint32_t sbus_type : 2; /**< [ 7: 6](RO) USB core bus slave type: AXI. */
+ uint32_t mbus_type : 3; /**< [ 5: 3](RO) USB core bus master type: AXI. */
+ uint32_t mode : 3; /**< [ 2: 0](RO) Operation mode: 0x2: Dual-role device. */
+#else /* Word 0 - Little Endian */
+ uint32_t mode : 3; /**< [ 2: 0](RO) Operation mode: 0x2: Dual-role device. */
+ uint32_t mbus_type : 3; /**< [ 5: 3](RO) USB core bus master type: AXI. */
+ uint32_t sbus_type : 2; /**< [ 7: 6](RO) USB core bus slave type: AXI. */
+ uint32_t mdwidth : 8; /**< [ 15: 8](RO) USB core bus master-data width. */
+ uint32_t sdwidth : 8; /**< [ 23: 16](RO) USB core bus slave-data width. */
+ uint32_t awidth : 8; /**< [ 31: 24](RO) USB core bus-address width. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_ghwparams0_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_ghwparams0 bdk_usbdrdx_uahc_ghwparams0_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c140ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c140ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c140ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GHWPARAMS0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GHWPARAMS0(a) bdk_usbdrdx_uahc_ghwparams0_t
+#define bustype_BDK_USBDRDX_UAHC_GHWPARAMS0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GHWPARAMS0(a) "USBDRDX_UAHC_GHWPARAMS0"
+#define device_bar_BDK_USBDRDX_UAHC_GHWPARAMS0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GHWPARAMS0(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GHWPARAMS0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_ghwparams1
+ *
+ * USB UAHC Hardware Parameters Register 1
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v3.10a, section 6.2.20
+ */
+union bdk_usbdrdx_uahc_ghwparams1
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_ghwparams1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t en_dbc : 1; /**< [ 31: 31](RAZ) Enable debug capability. */
+ uint32_t rm_opt_features : 1; /**< [ 30: 30](RO) Remove optional features. */
+ uint32_t sync_rst : 1; /**< [ 29: 29](RO) Synchronous reset coding. */
+ uint32_t ram_bus_clks_sync : 1; /**< [ 28: 28](RO) RAM_CLK and BUS_CLK are synchronous.
+ Internal:
+ (appears to be orthogonal from the
+ RAM_CLK_TO_BUS_CLK parameter) */
+ uint32_t mac_ram_clks_sync : 1; /**< [ 27: 27](RO) MAC3_CLK and RAM_CLK are synchronous. */
+ uint32_t mac_phy_clks_sync : 1; /**< [ 26: 26](RO) MAC3_CLK and PHY_CLK are synchronous. */
+ uint32_t en_pwropt : 2; /**< [ 25: 24](RO) Power optimization mode:
+ bit\<0\> = Clock-gating feature available.
+ bit\<1\> = Hibernation feature available. */
+ uint32_t spram_typ : 1; /**< [ 23: 23](RO) SRAM type: one-port RAMs. */
+ uint32_t num_rams : 2; /**< [ 22: 21](RO) Number of RAMs. */
+ uint32_t device_num_int : 6; /**< [ 20: 15](RO) Number of event buffers (and interrupts) in device-mode (unsupported). */
+ uint32_t aspacewidth : 3; /**< [ 14: 12](RO) Native interface address-space port width. */
+ uint32_t reqinfowidth : 3; /**< [ 11: 9](RO) Native interface request/response-info port width. */
+ uint32_t datainfowidth : 3; /**< [ 8: 6](RO) Native interface data-info port width. */
+ uint32_t burstwidth_m1 : 3; /**< [ 5: 3](RO) Width minus one of AXI length field. */
+ uint32_t idwidth_m1 : 3; /**< [ 2: 0](RO) Width minus one of AXI ID field. */
+#else /* Word 0 - Little Endian */
+ uint32_t idwidth_m1 : 3; /**< [ 2: 0](RO) Width minus one of AXI ID field. */
+ uint32_t burstwidth_m1 : 3; /**< [ 5: 3](RO) Width minus one of AXI length field. */
+ uint32_t datainfowidth : 3; /**< [ 8: 6](RO) Native interface data-info port width. */
+ uint32_t reqinfowidth : 3; /**< [ 11: 9](RO) Native interface request/response-info port width. */
+ uint32_t aspacewidth : 3; /**< [ 14: 12](RO) Native interface address-space port width. */
+ uint32_t device_num_int : 6; /**< [ 20: 15](RO) Number of event buffers (and interrupts) in device-mode (unsupported). */
+ uint32_t num_rams : 2; /**< [ 22: 21](RO) Number of RAMs. */
+ uint32_t spram_typ : 1; /**< [ 23: 23](RO) SRAM type: one-port RAMs. */
+ uint32_t en_pwropt : 2; /**< [ 25: 24](RO) Power optimization mode:
+ bit\<0\> = Clock-gating feature available.
+ bit\<1\> = Hibernation feature available. */
+ uint32_t mac_phy_clks_sync : 1; /**< [ 26: 26](RO) MAC3_CLK and PHY_CLK are synchronous. */
+ uint32_t mac_ram_clks_sync : 1; /**< [ 27: 27](RO) MAC3_CLK and RAM_CLK are synchronous. */
+ uint32_t ram_bus_clks_sync : 1; /**< [ 28: 28](RO) RAM_CLK and BUS_CLK are synchronous.
+ Internal:
+ (appears to be orthogonal from the
+ RAM_CLK_TO_BUS_CLK parameter) */
+ uint32_t sync_rst : 1; /**< [ 29: 29](RO) Synchronous reset coding. */
+ uint32_t rm_opt_features : 1; /**< [ 30: 30](RO) Remove optional features. */
+ uint32_t en_dbc : 1; /**< [ 31: 31](RAZ) Enable debug capability. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_ghwparams1_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_ghwparams1 bdk_usbdrdx_uahc_ghwparams1_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c144ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c144ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c144ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GHWPARAMS1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GHWPARAMS1(a) bdk_usbdrdx_uahc_ghwparams1_t
+#define bustype_BDK_USBDRDX_UAHC_GHWPARAMS1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GHWPARAMS1(a) "USBDRDX_UAHC_GHWPARAMS1"
+#define device_bar_BDK_USBDRDX_UAHC_GHWPARAMS1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GHWPARAMS1(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GHWPARAMS1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_ghwparams2
+ *
+ * USB UAHC Core GHW Parameters Register 2
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v3.10a, section 6.2.21
+ */
+union bdk_usbdrdx_uahc_ghwparams2
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_ghwparams2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t userid : 32; /**< [ 31: 0](RO) User ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t userid : 32; /**< [ 31: 0](RO) User ID. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_ghwparams2_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_ghwparams2 bdk_usbdrdx_uahc_ghwparams2_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c148ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c148ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c148ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GHWPARAMS2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GHWPARAMS2(a) bdk_usbdrdx_uahc_ghwparams2_t
+#define bustype_BDK_USBDRDX_UAHC_GHWPARAMS2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GHWPARAMS2(a) "USBDRDX_UAHC_GHWPARAMS2"
+#define device_bar_BDK_USBDRDX_UAHC_GHWPARAMS2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GHWPARAMS2(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GHWPARAMS2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_ghwparams3
+ *
+ * USB UAHC GHW Parameters Register 3
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.22
+ */
+union bdk_usbdrdx_uahc_ghwparams3
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_ghwparams3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t cache_total_xfer_resources : 8;/**< [ 30: 23](RO) Maximum number of transfer resources in the core. */
+ uint32_t num_in_eps : 5; /**< [ 22: 18](RO) Maximum number of device-mode IN endpoints active. */
+ uint32_t num_eps : 6; /**< [ 17: 12](RO) Number of device-mode single-directional endpoints. */
+ uint32_t ulpi_carkit : 1; /**< [ 11: 11](RO) ULPI carkit is not supported. */
+ uint32_t vendor_ctl_interface : 1; /**< [ 10: 10](RO) UTMI+ PHY vendor control interface enabled. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t hsphy_dwidth : 2; /**< [ 7: 6](RO) Data width of the UTMI+ PHY interface: 0x2 = 8-or-16 bits. */
+ uint32_t fsphy_interface : 2; /**< [ 5: 4](RO) USB 1.1 full-speed serial transceiver interface. */
+ uint32_t hsphy_interface : 2; /**< [ 3: 2](RO) High-speed PHY interface: 0x1 = UTMI+. */
+ uint32_t ssphy_interface : 2; /**< [ 1: 0](RO) SuperSpeed PHY interface: 0x1 = PIPE3. */
+#else /* Word 0 - Little Endian */
+ uint32_t ssphy_interface : 2; /**< [ 1: 0](RO) SuperSpeed PHY interface: 0x1 = PIPE3. */
+ uint32_t hsphy_interface : 2; /**< [ 3: 2](RO) High-speed PHY interface: 0x1 = UTMI+. */
+ uint32_t fsphy_interface : 2; /**< [ 5: 4](RO) USB 1.1 full-speed serial transceiver interface. */
+ uint32_t hsphy_dwidth : 2; /**< [ 7: 6](RO) Data width of the UTMI+ PHY interface: 0x2 = 8-or-16 bits. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t vendor_ctl_interface : 1; /**< [ 10: 10](RO) UTMI+ PHY vendor control interface enabled. */
+ uint32_t ulpi_carkit : 1; /**< [ 11: 11](RO) ULPI carkit is not supported. */
+ uint32_t num_eps : 6; /**< [ 17: 12](RO) Number of device-mode single-directional endpoints. */
+ uint32_t num_in_eps : 5; /**< [ 22: 18](RO) Maximum number of device-mode IN endpoints active. */
+ uint32_t cache_total_xfer_resources : 8;/**< [ 30: 23](RO) Maximum number of transfer resources in the core. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_ghwparams3_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_ghwparams3 bdk_usbdrdx_uahc_ghwparams3_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c14cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c14cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c14cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GHWPARAMS3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GHWPARAMS3(a) bdk_usbdrdx_uahc_ghwparams3_t
+#define bustype_BDK_USBDRDX_UAHC_GHWPARAMS3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GHWPARAMS3(a) "USBDRDX_UAHC_GHWPARAMS3"
+#define device_bar_BDK_USBDRDX_UAHC_GHWPARAMS3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GHWPARAMS3(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GHWPARAMS3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_ghwparams4
+ *
+ * USB UAHC GHW Parameters Register 4
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v3.10a, section 6.2.23
+ */
+union bdk_usbdrdx_uahc_ghwparams4
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_ghwparams4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bmu_lsp_depth : 4; /**< [ 31: 28](RO) Depth of the BMU-LSP status buffer. */
+ uint32_t bmu_ptl_depth_m1 : 4; /**< [ 27: 24](RO) Depth of the BMU-PTL source/sink buffers minus 1. */
+ uint32_t en_isoc_supt : 1; /**< [ 23: 23](RO) Isochronous support enabled. */
+ uint32_t reserved_22 : 1;
+ uint32_t ext_buff_control : 1; /**< [ 21: 21](RO) Enables device external buffer control sideband controls. */
+ uint32_t num_ss_usb_instances : 4; /**< [ 20: 17](RO) Number of SuperSpeed bus instances. */
+ uint32_t hiber_scratchbufs : 4; /**< [ 16: 13](RO) Number of hibernation scratchpad buffers. */
+ uint32_t reserved_6_12 : 7;
+ uint32_t cache_trbs_per_transfer : 6;/**< [ 5: 0](RO) Number of TRBs per transfer that can be cached. */
+#else /* Word 0 - Little Endian */
+ uint32_t cache_trbs_per_transfer : 6;/**< [ 5: 0](RO) Number of TRBs per transfer that can be cached. */
+ uint32_t reserved_6_12 : 7;
+ uint32_t hiber_scratchbufs : 4; /**< [ 16: 13](RO) Number of hibernation scratchpad buffers. */
+ uint32_t num_ss_usb_instances : 4; /**< [ 20: 17](RO) Number of SuperSpeed bus instances. */
+ uint32_t ext_buff_control : 1; /**< [ 21: 21](RO) Enables device external buffer control sideband controls. */
+ uint32_t reserved_22 : 1;
+ uint32_t en_isoc_supt : 1; /**< [ 23: 23](RO) Isochronous support enabled. */
+ uint32_t bmu_ptl_depth_m1 : 4; /**< [ 27: 24](RO) Depth of the BMU-PTL source/sink buffers minus 1. */
+ uint32_t bmu_lsp_depth : 4; /**< [ 31: 28](RO) Depth of the BMU-LSP status buffer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_ghwparams4_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_ghwparams4 bdk_usbdrdx_uahc_ghwparams4_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c150ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c150ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c150ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GHWPARAMS4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GHWPARAMS4(a) bdk_usbdrdx_uahc_ghwparams4_t
+#define bustype_BDK_USBDRDX_UAHC_GHWPARAMS4(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GHWPARAMS4(a) "USBDRDX_UAHC_GHWPARAMS4"
+#define device_bar_BDK_USBDRDX_UAHC_GHWPARAMS4(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GHWPARAMS4(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GHWPARAMS4(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_ghwparams5
+ *
+ * USB UAHC GHW Parameters Register 5
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v3.10a, section 6.2.24
+ */
+union bdk_usbdrdx_uahc_ghwparams5
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_ghwparams5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t dfq_fifo_depth : 6; /**< [ 27: 22](RO) Size of the BMU descriptor fetch-request queue. */
+ uint32_t dwq_fifo_depth : 6; /**< [ 21: 16](RO) Size of the BMU descriptor write queue. */
+ uint32_t txq_fifo_depth : 6; /**< [ 15: 10](RO) Size of the BMU TX request queue. */
+ uint32_t rxq_fifo_depth : 6; /**< [ 9: 4](RO) Size of the BMU RX request queue. */
+ uint32_t bmu_busgm_depth : 4; /**< [ 3: 0](RO) Depth of the BMU-BUSGM source/sink buffers. */
+#else /* Word 0 - Little Endian */
+ uint32_t bmu_busgm_depth : 4; /**< [ 3: 0](RO) Depth of the BMU-BUSGM source/sink buffers. */
+ uint32_t rxq_fifo_depth : 6; /**< [ 9: 4](RO) Size of the BMU RX request queue. */
+ uint32_t txq_fifo_depth : 6; /**< [ 15: 10](RO) Size of the BMU TX request queue. */
+ uint32_t dwq_fifo_depth : 6; /**< [ 21: 16](RO) Size of the BMU descriptor write queue. */
+ uint32_t dfq_fifo_depth : 6; /**< [ 27: 22](RO) Size of the BMU descriptor fetch-request queue. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_ghwparams5_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_ghwparams5 bdk_usbdrdx_uahc_ghwparams5_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS5(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS5(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c154ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c154ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c154ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GHWPARAMS5", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GHWPARAMS5(a) bdk_usbdrdx_uahc_ghwparams5_t
+#define bustype_BDK_USBDRDX_UAHC_GHWPARAMS5(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GHWPARAMS5(a) "USBDRDX_UAHC_GHWPARAMS5"
+#define device_bar_BDK_USBDRDX_UAHC_GHWPARAMS5(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GHWPARAMS5(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GHWPARAMS5(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_ghwparams6
+ *
+ * USB UAHC GHW Parameters Register 6
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v3.10a, section 6.2.25
+ */
+union bdk_usbdrdx_uahc_ghwparams6
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_ghwparams6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ram0_depth : 16; /**< [ 31: 16](RO) RAM0 depth. */
+ uint32_t en_bus_filters : 1; /**< [ 15: 15](RO) VBus filters support. */
+ uint32_t en_bc : 1; /**< [ 14: 14](RO) Enable battery charging support. */
+ uint32_t en_otg_ss : 1; /**< [ 13: 13](RO) Enable OTG SuperSpeed support. */
+ uint32_t en_adp : 1; /**< [ 12: 12](RO) Enable ADP support. */
+ uint32_t hnp_support : 1; /**< [ 11: 11](RO) HNP support. */
+ uint32_t srp_support : 1; /**< [ 10: 10](RO) SRP support. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t en_fpga : 1; /**< [ 7: 7](RO) Enable FPGA implementation. */
+ uint32_t en_dbg_ports : 1; /**< [ 6: 6](RO) Enable debug ports for FGPA. */
+ uint32_t psq_fifo_depth : 6; /**< [ 5: 0](RO) Size of the BMU protocol status queue. */
+#else /* Word 0 - Little Endian */
+ uint32_t psq_fifo_depth : 6; /**< [ 5: 0](RO) Size of the BMU protocol status queue. */
+ uint32_t en_dbg_ports : 1; /**< [ 6: 6](RO) Enable debug ports for FGPA. */
+ uint32_t en_fpga : 1; /**< [ 7: 7](RO) Enable FPGA implementation. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t srp_support : 1; /**< [ 10: 10](RO) SRP support. */
+ uint32_t hnp_support : 1; /**< [ 11: 11](RO) HNP support. */
+ uint32_t en_adp : 1; /**< [ 12: 12](RO) Enable ADP support. */
+ uint32_t en_otg_ss : 1; /**< [ 13: 13](RO) Enable OTG SuperSpeed support. */
+ uint32_t en_bc : 1; /**< [ 14: 14](RO) Enable battery charging support. */
+ uint32_t en_bus_filters : 1; /**< [ 15: 15](RO) VBus filters support. */
+ uint32_t ram0_depth : 16; /**< [ 31: 16](RO) RAM0 depth. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_ghwparams6_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_ghwparams6 bdk_usbdrdx_uahc_ghwparams6_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS6(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS6(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c158ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c158ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c158ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GHWPARAMS6", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GHWPARAMS6(a) bdk_usbdrdx_uahc_ghwparams6_t
+#define bustype_BDK_USBDRDX_UAHC_GHWPARAMS6(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GHWPARAMS6(a) "USBDRDX_UAHC_GHWPARAMS6"
+#define device_bar_BDK_USBDRDX_UAHC_GHWPARAMS6(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GHWPARAMS6(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GHWPARAMS6(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_ghwparams7
+ *
+ * USB UAHC GHW Parameters Register 7
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v3.10a, section 6.2.26
+ */
+union bdk_usbdrdx_uahc_ghwparams7
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_ghwparams7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ram2_depth : 16; /**< [ 31: 16](RO) RAM2 depth. */
+ uint32_t ram1_depth : 16; /**< [ 15: 0](RO) RAM1 depth. */
+#else /* Word 0 - Little Endian */
+ uint32_t ram1_depth : 16; /**< [ 15: 0](RO) RAM1 depth. */
+ uint32_t ram2_depth : 16; /**< [ 31: 16](RO) RAM2 depth. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_ghwparams7_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_ghwparams7 bdk_usbdrdx_uahc_ghwparams7_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS7(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS7(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c15cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c15cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c15cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GHWPARAMS7", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GHWPARAMS7(a) bdk_usbdrdx_uahc_ghwparams7_t
+#define bustype_BDK_USBDRDX_UAHC_GHWPARAMS7(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GHWPARAMS7(a) "USBDRDX_UAHC_GHWPARAMS7"
+#define device_bar_BDK_USBDRDX_UAHC_GHWPARAMS7(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GHWPARAMS7(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GHWPARAMS7(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_ghwparams8
+ *
+ * USB UAHC GHW Parameters Register 8
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.20a, section 6.2.3.9.
+ */
+union bdk_usbdrdx_uahc_ghwparams8
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_ghwparams8_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dcache_depth_info : 32; /**< [ 31: 0](RO) Dcache depth. */
+#else /* Word 0 - Little Endian */
+ uint32_t dcache_depth_info : 32; /**< [ 31: 0](RO) Dcache depth. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_ghwparams8_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_ghwparams8 bdk_usbdrdx_uahc_ghwparams8_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS8(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GHWPARAMS8(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c600ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c600ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c600ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GHWPARAMS8", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GHWPARAMS8(a) bdk_usbdrdx_uahc_ghwparams8_t
+#define bustype_BDK_USBDRDX_UAHC_GHWPARAMS8(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GHWPARAMS8(a) "USBDRDX_UAHC_GHWPARAMS8"
+#define device_bar_BDK_USBDRDX_UAHC_GHWPARAMS8(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GHWPARAMS8(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GHWPARAMS8(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gpmsts
+ *
+ * USB UAHC Global Power Management Status Register
+ * This debug register gives information on which event caused the hibernation exit. These
+ * registers are for debug purposes. They provide debug information on the internal status and
+ * state machines. Global debug registers have design-specific information, and are used by for
+ * debugging purposes. These registers are not intended to be used by the customer. If any debug
+ * assistance is needed for the silicon, contact customer support with a dump of these registers.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.6
+ * INTERNAL: Contact Synopsys directly.
+ */
+union bdk_usbdrdx_uahc_gpmsts
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gpmsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t portsel : 4; /**< [ 31: 28](WO) This field selects the port number. Always 0x0. */
+ uint32_t reserved_17_27 : 11;
+ uint32_t u3wakeup : 5; /**< [ 16: 12](RO/H) This field gives the USB 3.0 port wakeup conditions.
+ bit\<12\> = Overcurrent detected.
+ bit\<13\> = Resume detected.
+ bit\<14\> = Connect detected.
+ bit\<15\> = Disconnect detected.
+ bit\<16\> = Last connection state. */
+ uint32_t reserved_10_11 : 2;
+ uint32_t u2wakeup : 10; /**< [ 9: 0](RO/H) This field indicates the USB 2.0 port wakeup conditions.
+ bit\<0\> = Overcurrent detected.
+ bit\<1\> = Resume detected.
+ bit\<2\> = Connect detected.
+ bit\<3\> = Disconnect detected.
+ bit\<4\> = Last connection state.
+ bit\<5\> = ID change detected.
+ bit\<6\> = SRP request detected.
+ bit\<7\> = ULPI interrupt detected.
+ bit\<8\> = USB reset detected.
+ bit\<9\> = Resume detected changed. */
+#else /* Word 0 - Little Endian */
+ uint32_t u2wakeup : 10; /**< [ 9: 0](RO/H) This field indicates the USB 2.0 port wakeup conditions.
+ bit\<0\> = Overcurrent detected.
+ bit\<1\> = Resume detected.
+ bit\<2\> = Connect detected.
+ bit\<3\> = Disconnect detected.
+ bit\<4\> = Last connection state.
+ bit\<5\> = ID change detected.
+ bit\<6\> = SRP request detected.
+ bit\<7\> = ULPI interrupt detected.
+ bit\<8\> = USB reset detected.
+ bit\<9\> = Resume detected changed. */
+ uint32_t reserved_10_11 : 2;
+ uint32_t u3wakeup : 5; /**< [ 16: 12](RO/H) This field gives the USB 3.0 port wakeup conditions.
+ bit\<12\> = Overcurrent detected.
+ bit\<13\> = Resume detected.
+ bit\<14\> = Connect detected.
+ bit\<15\> = Disconnect detected.
+ bit\<16\> = Last connection state. */
+ uint32_t reserved_17_27 : 11;
+ uint32_t portsel : 4; /**< [ 31: 28](WO) This field selects the port number. Always 0x0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gpmsts_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gpmsts bdk_usbdrdx_uahc_gpmsts_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GPMSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GPMSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c114ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c114ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c114ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GPMSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GPMSTS(a) bdk_usbdrdx_uahc_gpmsts_t
+#define bustype_BDK_USBDRDX_UAHC_GPMSTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GPMSTS(a) "USBDRDX_UAHC_GPMSTS"
+#define device_bar_BDK_USBDRDX_UAHC_GPMSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GPMSTS(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GPMSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uahc_gprtbimap
+ *
+ * USB UAHC SuperSpeed Port-to-Bus Instance Mapping Register
+ * This register specifies the SuperSpeed USB instance number to which each USB 3.0 port is
+ * connected. By default, USB 3.0 ports are evenly distributed among all SuperSpeed USB
+ * instances. Software can program this register to specify how USB 3.0 ports are connected to
+ * SuperSpeed USB instances. The UAHC only implements one SuperSpeed bus-instance, so this
+ * register should always be 0.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.16
+ */
+union bdk_usbdrdx_uahc_gprtbimap
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uahc_gprtbimap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) SuperSpeed USB instance number for port 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) SuperSpeed USB instance number for port 1. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gprtbimap_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gprtbimap bdk_usbdrdx_uahc_gprtbimap_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GPRTBIMAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GPRTBIMAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c138ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c138ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c138ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GPRTBIMAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GPRTBIMAP(a) bdk_usbdrdx_uahc_gprtbimap_t
+#define bustype_BDK_USBDRDX_UAHC_GPRTBIMAP(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UAHC_GPRTBIMAP(a) "USBDRDX_UAHC_GPRTBIMAP"
+#define device_bar_BDK_USBDRDX_UAHC_GPRTBIMAP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GPRTBIMAP(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GPRTBIMAP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uahc_gprtbimap_fs
+ *
+ * USB UAHC Full/Low-Speed Port-to-Bus Instance Mapping Register
+ * This register specifies the full-speed/low-speed USB instance number to which each USB 1.1
+ * port is connected. By default, USB 1.1 ports are evenly distributed among all full-speed/
+ * low-speed USB instances. Software can program this register to specify how USB 1.1 ports are
+ * connected to full-speed/low-speed USB instances. The UAHC only implements one full-speed/
+ * low-speed bus-instance, so this register should always be 0x0.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.2.3.
+ */
+union bdk_usbdrdx_uahc_gprtbimap_fs
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uahc_gprtbimap_fs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) Full-speed USB instance number for port 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) Full-speed USB instance number for port 1. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gprtbimap_fs_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gprtbimap_fs bdk_usbdrdx_uahc_gprtbimap_fs_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GPRTBIMAP_FS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GPRTBIMAP_FS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c188ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c188ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c188ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GPRTBIMAP_FS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GPRTBIMAP_FS(a) bdk_usbdrdx_uahc_gprtbimap_fs_t
+#define bustype_BDK_USBDRDX_UAHC_GPRTBIMAP_FS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UAHC_GPRTBIMAP_FS(a) "USBDRDX_UAHC_GPRTBIMAP_FS"
+#define device_bar_BDK_USBDRDX_UAHC_GPRTBIMAP_FS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GPRTBIMAP_FS(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GPRTBIMAP_FS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uahc_gprtbimap_hs
+ *
+ * USB UAHC High-Speed Port-to-Bus Instance Mapping Register
+ * This register specifies the high-speed USB instance number to which each USB 2.0 port is
+ * connected. By default, USB 2.0 ports are evenly distributed among all high-speed USB
+ * instances. Software can program this register to specify how USB 2.0 ports are connected to
+ * high-speed USB instances. The UAHC only implements one high-speed bus-instance, so this
+ * register should always be 0.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.2.2.
+ */
+union bdk_usbdrdx_uahc_gprtbimap_hs
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uahc_gprtbimap_hs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) High-speed USB instance number for port 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) High-speed USB instance number for port 1. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gprtbimap_hs_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gprtbimap_hs bdk_usbdrdx_uahc_gprtbimap_hs_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GPRTBIMAP_HS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GPRTBIMAP_HS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c180ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c180ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c180ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GPRTBIMAP_HS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GPRTBIMAP_HS(a) bdk_usbdrdx_uahc_gprtbimap_hs_t
+#define bustype_BDK_USBDRDX_UAHC_GPRTBIMAP_HS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UAHC_GPRTBIMAP_HS(a) "USBDRDX_UAHC_GPRTBIMAP_HS"
+#define device_bar_BDK_USBDRDX_UAHC_GPRTBIMAP_HS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GPRTBIMAP_HS(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GPRTBIMAP_HS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_grlsid
+ *
+ * USB UAHC Release ID Register
+ * This is a read-only register that contains the release number of the core.
+ * Internal:
+ * Original name: GSNPSID = Synopsys ID.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v3.10a, section 6.2.9.
+ */
+union bdk_usbdrdx_uahc_grlsid
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_grlsid_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t releaseid : 32; /**< [ 31: 0](RO) Software can use this register to configure release-specific features in the driver.
+ Internal:
+ Synopsys ID
+ * SynopsysID[31:16] indicates Core Identification Number. 0x5533 is ASCII for
+ U3 (DWC_usb3).
+ * SynopsysID[15:0] indicates the release number. Current Release is 2.50a. */
+#else /* Word 0 - Little Endian */
+ uint32_t releaseid : 32; /**< [ 31: 0](RO) Software can use this register to configure release-specific features in the driver.
+ Internal:
+ Synopsys ID
+ * SynopsysID[31:16] indicates Core Identification Number. 0x5533 is ASCII for
+ U3 (DWC_usb3).
+ * SynopsysID[15:0] indicates the release number. Current Release is 2.50a. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_grlsid_s cn8; */
+ struct bdk_usbdrdx_uahc_grlsid_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t releaseid : 32; /**< [ 31: 0](RO) Software can use this register to configure release-specific features in the driver.
+ Internal:
+ Synopsys ID
+ * SynopsysID[31:16] indicates Core Identification Number. 0x5533 is ASCII for
+ U3 (DWC_usb3).
+ * SynopsysID[15:0] indicates the release number. Current Release is 3.10a. */
+#else /* Word 0 - Little Endian */
+ uint32_t releaseid : 32; /**< [ 31: 0](RO) Software can use this register to configure release-specific features in the driver.
+ Internal:
+ Synopsys ID
+ * SynopsysID[31:16] indicates Core Identification Number. 0x5533 is ASCII for
+ U3 (DWC_usb3).
+ * SynopsysID[15:0] indicates the release number. Current Release is 3.10a. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_usbdrdx_uahc_grlsid bdk_usbdrdx_uahc_grlsid_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GRLSID(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GRLSID(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c120ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c120ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c120ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GRLSID", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GRLSID(a) bdk_usbdrdx_uahc_grlsid_t
+#define bustype_BDK_USBDRDX_UAHC_GRLSID(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GRLSID(a) "USBDRDX_UAHC_GRLSID"
+#define device_bar_BDK_USBDRDX_UAHC_GRLSID(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GRLSID(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GRLSID(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_grxfifoprihst
+ *
+ * USB UAHC RxFIFOs DMA Priority Register
+ * This register specifies the relative DMA priority level among the host RxFIFOs (one per USB
+ * bus instance) within the associated speed group (SuperSpeed or high-speed/full-speed/
+ * low-speed). When multiple RxFIFOs compete for DMA service at a given time, the RXDMA arbiter
+ * grants access on a packet-basis in the following manner:
+ *
+ * Among the FIFOs in the same speed group (SuperSpeed or high-speed/full-speed/low-speed):
+ * * High-priority RxFIFOs are granted access using round-robin arbitration.
+ * * Low-priority RxFIFOs are granted access using round-robin arbitration only after high-
+ * priority
+ * RxFIFOs have no further processing to do (i.e., either the RXQs are empty or the corresponding
+ * RxFIFOs do not have the required data).
+ *
+ * The RX DMA arbiter prioritizes the SuperSpeed group or high-speed/full-speed/low-speed group
+ * according to the ratio programmed in
+ * USBDRD()_UAHC_GDMAHLRATIO.
+ *
+ * For scatter-gather packets, the arbiter grants successive DMA requests to the same FIFO until
+ * the entire packet is completed. The register size corresponds to the number of configured USB
+ * bus instances; for example, in the default configuration, there are three USB bus instances (one
+ * SuperSpeed, one high-speed, and one full-speed/low-speed).
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.60
+ */
+union bdk_usbdrdx_uahc_grxfifoprihst
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_grxfifoprihst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t rx_priority : 3; /**< [ 2: 0](R/W) Each register bit[n] controls the priority (1 = high, 0 = low) of RxFIFO[n] within a speed group. */
+#else /* Word 0 - Little Endian */
+ uint32_t rx_priority : 3; /**< [ 2: 0](R/W) Each register bit[n] controls the priority (1 = high, 0 = low) of RxFIFO[n] within a speed group. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_grxfifoprihst_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_grxfifoprihst bdk_usbdrdx_uahc_grxfifoprihst_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GRXFIFOPRIHST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GRXFIFOPRIHST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c61cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c61cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c61cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GRXFIFOPRIHST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GRXFIFOPRIHST(a) bdk_usbdrdx_uahc_grxfifoprihst_t
+#define bustype_BDK_USBDRDX_UAHC_GRXFIFOPRIHST(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GRXFIFOPRIHST(a) "USBDRDX_UAHC_GRXFIFOPRIHST"
+#define device_bar_BDK_USBDRDX_UAHC_GRXFIFOPRIHST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GRXFIFOPRIHST(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GRXFIFOPRIHST(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_grxfifosiz#
+ *
+ * USB UAHC RX FIFO Size Register
+ * The application can program the internal RAM start address/depth of the each RxFIFO as shown
+ * below. It is recommended that software use the default value. In Host mode, per-port registers
+ * are implemented. One register per FIFO.
+ *
+ * Reset values = 0:{0x0000_0084} 1:{0x0084_0104} 2:{0x0188_0180}.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.6.2.
+ * INTERNAL: For more information, see the BMU section in Block Descriptions on Synopsys Databook
+ * page 238.
+ */
+union bdk_usbdrdx_uahc_grxfifosizx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_grxfifosizx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rxfstaddr : 16; /**< [ 31: 16](R/W) RxFIFOn RAM start address. This field contains the memory start address for RxFIFOn. The
+ reset value is derived from configuration parameters. */
+ uint32_t rxfdep : 16; /**< [ 15: 0](R/W) RxFIFOn depth. This value is in terms of RX RAM data width.
+ minimum value = 0x20, maximum value = 0x4000.
+
+ Internal:
+ For more information, see the Hardware Integration chapter of the Synopsys
+ Databook.
+ The reset value is derived from configuration parameters. */
+#else /* Word 0 - Little Endian */
+ uint32_t rxfdep : 16; /**< [ 15: 0](R/W) RxFIFOn depth. This value is in terms of RX RAM data width.
+ minimum value = 0x20, maximum value = 0x4000.
+
+ Internal:
+ For more information, see the Hardware Integration chapter of the Synopsys
+ Databook.
+ The reset value is derived from configuration parameters. */
+ uint32_t rxfstaddr : 16; /**< [ 31: 16](R/W) RxFIFOn RAM start address. This field contains the memory start address for RxFIFOn. The
+ reset value is derived from configuration parameters. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_grxfifosizx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_grxfifosizx bdk_usbdrdx_uahc_grxfifosizx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GRXFIFOSIZX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GRXFIFOSIZX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=2)))
+ return 0x86800000c380ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=2)))
+ return 0x86800000c380ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=2)))
+ return 0x86800000c380ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x3);
+ __bdk_csr_fatal("USBDRDX_UAHC_GRXFIFOSIZX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GRXFIFOSIZX(a,b) bdk_usbdrdx_uahc_grxfifosizx_t
+#define bustype_BDK_USBDRDX_UAHC_GRXFIFOSIZX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GRXFIFOSIZX(a,b) "USBDRDX_UAHC_GRXFIFOSIZX"
+#define device_bar_BDK_USBDRDX_UAHC_GRXFIFOSIZX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GRXFIFOSIZX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_GRXFIFOSIZX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_grxthrcfg
+ *
+ * USB UAHC RX Threshold Control Register
+ * In a normal case, an RX burst starts as soon as 1-packet space is available. This works well
+ * as long as the system bus is faster than the USB3.0 bus (a 1024-bytes packet takes ~2.2 us on
+ * the USB bus in SuperSpeed mode). If the system bus latency is larger than 2.2 us to access a
+ * 1024-byte packet, then starting a burst on 1-packet condition leads to an early abort of the
+ * burst causing unnecessary performance reduction. This register allows the configuration of
+ * threshold and burst size control. This feature is enabled by USBRXPKTCNTSEL.
+ *
+ * Receive Path:
+ * * The RX threshold is controlled by USBRXPKTCNT and the RX burst size is controlled by
+ * USBMAXRXBURSTSIZE.
+ * * Selecting optimal RxFIFO size, RX threshold, and RX burst size avoids RX burst aborts due
+ * to overrun if the system bus is slower than USB. Once in a while overrun is OK, and there is
+ * no functional issue.
+ * * Some devices do not support terminating ACK retry. With these devices, host cannot set ACK=0
+ * and Retry=0 and do retry later and you have to retry immediately. For such devices, minimize
+ * retry due to underrun. Setting threshold and burst size guarantees this.
+ * A larger RX threshold affects the performance since the scheduler is idle during this time.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.4.
+ */
+union bdk_usbdrdx_uahc_grxthrcfg
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_grxthrcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t usbrxpktcntsel : 1; /**< [ 29: 29](R/W) USB receive-packet-count enable. Enables/disables the USB reception multipacket
+ thresholding:
+ 0 = the core can only start reception on the USB when the RxFIFO has space for at least
+ one packet.
+ 1 = the core can only start reception on the USB when the RxFIFO has space for at least
+ USBRXPKTCNT amount of packets.
+ This mode is only used for SuperSpeed.
+
+ In device mode, setting this bit to 1 also enables the functionality of reporting
+ NUMP in the ACK TP based on the RxFIFO space instead of reporting a fixed NUMP derived
+ from USBDRD()_UAHC_DCFG[NUMP]. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbrxpktcnt : 4; /**< [ 27: 24](R/W) USB receive-packet count. In host-mode, specifies space (in number of packets) that must
+ be available in the RxFIFO before the core can start the corresponding USB RX transaction
+ (burst).
+
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXRXBURSTSIZE]. */
+ uint32_t usbmaxrxburstsize : 5; /**< [ 23: 19](R/W) USB maximum receive-burst size. Specifies the maximum bulk IN burst the core
+ should do. When the system bus is slower than the USB, RxFIFO can overrun during a long
+ burst.
+
+ Program a smaller value to this field to limit the RX burst size that the core can do. It
+ only applies to SuperSpeed Bulk, Isochronous, and Interrupt IN endpoints.
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0x10. */
+ uint32_t reserved_0_18 : 19;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_18 : 19;
+ uint32_t usbmaxrxburstsize : 5; /**< [ 23: 19](R/W) USB maximum receive-burst size. Specifies the maximum bulk IN burst the core
+ should do. When the system bus is slower than the USB, RxFIFO can overrun during a long
+ burst.
+
+ Program a smaller value to this field to limit the RX burst size that the core can do. It
+ only applies to SuperSpeed Bulk, Isochronous, and Interrupt IN endpoints.
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0x10. */
+ uint32_t usbrxpktcnt : 4; /**< [ 27: 24](R/W) USB receive-packet count. In host-mode, specifies space (in number of packets) that must
+ be available in the RxFIFO before the core can start the corresponding USB RX transaction
+ (burst).
+
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXRXBURSTSIZE]. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbrxpktcntsel : 1; /**< [ 29: 29](R/W) USB receive-packet-count enable. Enables/disables the USB reception multipacket
+ thresholding:
+ 0 = the core can only start reception on the USB when the RxFIFO has space for at least
+ one packet.
+ 1 = the core can only start reception on the USB when the RxFIFO has space for at least
+ USBRXPKTCNT amount of packets.
+ This mode is only used for SuperSpeed.
+
+ In device mode, setting this bit to 1 also enables the functionality of reporting
+ NUMP in the ACK TP based on the RxFIFO space instead of reporting a fixed NUMP derived
+ from USBDRD()_UAHC_DCFG[NUMP]. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uahc_grxthrcfg_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t usbrxpktcntsel : 1; /**< [ 29: 29](R/W) USB receive-packet-count enable. Enables/disables the USB reception multipacket
+ thresholding:
+ 0 = the core can only start reception on the USB when the RxFIFO has space for at least
+ one packet.
+ 1 = the core can only start reception on the USB when the RxFIFO has space for at least
+ USBRXPKTCNT amount of packets.
+ This mode is only used for SuperSpeed.
+
+ In device mode, setting this bit to 1 also enables the functionality of reporting
+ NUMP in the ACK TP based on the RxFIFO space instead of reporting a fixed NUMP derived
+ from USBDRD()_UAHC_DCFG[NUMP]. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbrxpktcnt : 4; /**< [ 27: 24](R/W) USB receive-packet count. In host-mode, specifies space (in number of packets) that must
+ be available in the RxFIFO before the core can start the corresponding USB RX transaction
+ (burst).
+
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXRXBURSTSIZE]. */
+ uint32_t usbmaxrxburstsize : 5; /**< [ 23: 19](R/W) USB maximum receive-burst size. Specifies the maximum bulk IN burst the core
+ should do. When the system bus is slower than the USB, RxFIFO can overrun during a long
+ burst.
+
+ Program a smaller value to this field to limit the RX burst size that the core can do. It
+ only applies to SuperSpeed Bulk, Isochronous, and Interrupt IN endpoints.
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0x10. */
+ uint32_t reserved_16_18 : 3;
+ uint32_t reserved_15 : 1;
+ uint32_t reserved_11_14 : 4;
+ uint32_t reserved_0_10 : 11;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_10 : 11;
+ uint32_t reserved_11_14 : 4;
+ uint32_t reserved_15 : 1;
+ uint32_t reserved_16_18 : 3;
+ uint32_t usbmaxrxburstsize : 5; /**< [ 23: 19](R/W) USB maximum receive-burst size. Specifies the maximum bulk IN burst the core
+ should do. When the system bus is slower than the USB, RxFIFO can overrun during a long
+ burst.
+
+ Program a smaller value to this field to limit the RX burst size that the core can do. It
+ only applies to SuperSpeed Bulk, Isochronous, and Interrupt IN endpoints.
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0x10. */
+ uint32_t usbrxpktcnt : 4; /**< [ 27: 24](R/W) USB receive-packet count. In host-mode, specifies space (in number of packets) that must
+ be available in the RxFIFO before the core can start the corresponding USB RX transaction
+ (burst).
+
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXRXBURSTSIZE]. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbrxpktcntsel : 1; /**< [ 29: 29](R/W) USB receive-packet-count enable. Enables/disables the USB reception multipacket
+ thresholding:
+ 0 = the core can only start reception on the USB when the RxFIFO has space for at least
+ one packet.
+ 1 = the core can only start reception on the USB when the RxFIFO has space for at least
+ USBRXPKTCNT amount of packets.
+ This mode is only used for SuperSpeed.
+
+ In device mode, setting this bit to 1 also enables the functionality of reporting
+ NUMP in the ACK TP based on the RxFIFO space instead of reporting a fixed NUMP derived
+ from USBDRD()_UAHC_DCFG[NUMP]. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_usbdrdx_uahc_grxthrcfg bdk_usbdrdx_uahc_grxthrcfg_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GRXTHRCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GRXTHRCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c10cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c10cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c10cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GRXTHRCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GRXTHRCFG(a) bdk_usbdrdx_uahc_grxthrcfg_t
+#define bustype_BDK_USBDRDX_UAHC_GRXTHRCFG(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GRXTHRCFG(a) "USBDRDX_UAHC_GRXTHRCFG"
+#define device_bar_BDK_USBDRDX_UAHC_GRXTHRCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GRXTHRCFG(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GRXTHRCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gsbuscfg0
+ *
+ * USB UAHC Bus Configuration Register 0
+ * This register can be used to configure the core after power-on or a change in mode of
+ * operation. This register mainly contains AXI system-related configuration parameters. Do not
+ * change this register after the initial programming. The application must program this register
+ * before starting any transactions on AXI. When [INCRBRSTENA] is enabled, it has the highest
+ * priority over other burst lengths. The core always performs the largest burst when enabled.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * The AXI cache signals are not connected in Cavium's hookup, so the *REQINFO fields
+ * can be ignored.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v3.10a, section 6.2.1
+ */
+union bdk_usbdrdx_uahc_gsbuscfg0
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gsbuscfg0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t datrdreqinfo : 4; /**< [ 31: 28](R/W) AXI-cache for data-read operations. Always set to 0x0. */
+ uint32_t desrdreqinfo : 4; /**< [ 27: 24](R/W) AXI-cache for descriptor-read operations. Always set to 0x0. */
+ uint32_t datwrreqinfo : 4; /**< [ 23: 20](R/W) AXI-cache for data-write operations. Always set to 0x0. */
+ uint32_t deswrreqinfo : 4; /**< [ 19: 16](R/W) AXI-cache for descriptor-write operations. Always set to 0x0. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t datbigend : 1; /**< [ 11: 11](R/W) Data access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBDRD()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t descbigend : 1; /**< [ 10: 10](R/W) Descriptor access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBDRD()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t incr256brstena : 1; /**< [ 7: 7](R/W) INCR256 burst-type enable. Always set to 0. */
+ uint32_t incr128brstena : 1; /**< [ 6: 6](R/W) INCR128 burst-type enable. Always set to 0. */
+ uint32_t incr64brstena : 1; /**< [ 5: 5](R/W) INCR64 burst-type enable. Always set to 0. */
+ uint32_t incr32brstena : 1; /**< [ 4: 4](R/W) INCR32 burst-type enable. Always set to 0. */
+ uint32_t incr16brstena : 1; /**< [ 3: 3](R/W) INCR16 burst-type enable. Allows the AXI master to generate INCR 16-beat bursts. */
+ uint32_t incr8brstena : 1; /**< [ 2: 2](R/W) INCR8 burst-type enable. Allows the AXI master to generate INCR eight-beat bursts. */
+ uint32_t incr4brstena : 1; /**< [ 1: 1](R/W) INCR4 burst-type enable. Allows the AXI master to generate INCR four-beat bursts. */
+ uint32_t incrbrstena : 1; /**< [ 0: 0](R/W) Undefined-length INCR burst-type enable.
+ This bit determines the set of burst lengths to be utilized by the master interface. It
+ works in conjunction with the GSBUSCFG0[7:1] enables (INCR*BRSTENA).
+
+ If disabled, the AXI master will use only the burst lengths
+ 1, 4, 8, 16 (assuming the INCR*BRSTENA are set to their reset values).
+
+ If enabled, the AXI master uses any length less than or equal to the largest-enabled burst
+ length based on the INCR*BRSTENA fields. */
+#else /* Word 0 - Little Endian */
+ uint32_t incrbrstena : 1; /**< [ 0: 0](R/W) Undefined-length INCR burst-type enable.
+ This bit determines the set of burst lengths to be utilized by the master interface. It
+ works in conjunction with the GSBUSCFG0[7:1] enables (INCR*BRSTENA).
+
+ If disabled, the AXI master will use only the burst lengths
+ 1, 4, 8, 16 (assuming the INCR*BRSTENA are set to their reset values).
+
+ If enabled, the AXI master uses any length less than or equal to the largest-enabled burst
+ length based on the INCR*BRSTENA fields. */
+ uint32_t incr4brstena : 1; /**< [ 1: 1](R/W) INCR4 burst-type enable. Allows the AXI master to generate INCR four-beat bursts. */
+ uint32_t incr8brstena : 1; /**< [ 2: 2](R/W) INCR8 burst-type enable. Allows the AXI master to generate INCR eight-beat bursts. */
+ uint32_t incr16brstena : 1; /**< [ 3: 3](R/W) INCR16 burst-type enable. Allows the AXI master to generate INCR 16-beat bursts. */
+ uint32_t incr32brstena : 1; /**< [ 4: 4](R/W) INCR32 burst-type enable. Always set to 0. */
+ uint32_t incr64brstena : 1; /**< [ 5: 5](R/W) INCR64 burst-type enable. Always set to 0. */
+ uint32_t incr128brstena : 1; /**< [ 6: 6](R/W) INCR128 burst-type enable. Always set to 0. */
+ uint32_t incr256brstena : 1; /**< [ 7: 7](R/W) INCR256 burst-type enable. Always set to 0. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t descbigend : 1; /**< [ 10: 10](R/W) Descriptor access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBDRD()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t datbigend : 1; /**< [ 11: 11](R/W) Data access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBDRD()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t deswrreqinfo : 4; /**< [ 19: 16](R/W) AXI-cache for descriptor-write operations. Always set to 0x0. */
+ uint32_t datwrreqinfo : 4; /**< [ 23: 20](R/W) AXI-cache for data-write operations. Always set to 0x0. */
+ uint32_t desrdreqinfo : 4; /**< [ 27: 24](R/W) AXI-cache for descriptor-read operations. Always set to 0x0. */
+ uint32_t datrdreqinfo : 4; /**< [ 31: 28](R/W) AXI-cache for data-read operations. Always set to 0x0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gsbuscfg0_s cn8; */
+ struct bdk_usbdrdx_uahc_gsbuscfg0_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t datrdreqinfo : 4; /**< [ 31: 28](R/W) AXI-cache for data-read operations. Always set to 0x0. */
+ uint32_t desrdreqinfo : 4; /**< [ 27: 24](R/W) AXI-cache for descriptor-read operations. Always set to 0x0. */
+ uint32_t datwrreqinfo : 4; /**< [ 23: 20](R/W) AXI-cache for data-write operations. Always set to 0x0. */
+ uint32_t deswrreqinfo : 4; /**< [ 19: 16](R/W) AXI-cache for descriptor-write operations. Always set to 0x0. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t datbigend : 1; /**< [ 11: 11](R/W) Data access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBDRD()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t descbigend : 1; /**< [ 10: 10](R/W) Descriptor access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBDRD()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t incr256brstena : 1; /**< [ 7: 7](R/W) INCR256 burst-type enable. Always set to 0. */
+ uint32_t incr128brstena : 1; /**< [ 6: 6](R/W) INCR128 burst-type enable. Always set to 0. */
+ uint32_t incr64brstena : 1; /**< [ 5: 5](R/W) INCR64 burst-type enable. Always set to 0. */
+ uint32_t incr32brstena : 1; /**< [ 4: 4](R/W) INCR32 burst-type enable. Always set to 0. */
+ uint32_t incr16brstena : 1; /**< [ 3: 3](R/W) INCR16 burst-type enable. Always set to 0. */
+ uint32_t incr8brstena : 1; /**< [ 2: 2](R/W) INCR8 burst-type enable. Allows the AXI master to generate INCR eight-beat bursts. */
+ uint32_t incr4brstena : 1; /**< [ 1: 1](R/W) INCR4 burst-type enable. Allows the AXI master to generate INCR four-beat bursts. */
+ uint32_t incrbrstena : 1; /**< [ 0: 0](R/W) Undefined-length INCR burst-type enable.
+ This bit determines the set of burst lengths to be utilized by the master interface. It
+ works in conjunction with the GSBUSCFG0[7:1] enables (INCR*BRSTENA).
+
+ If disabled, the AXI master will use only the burst lengths
+ 1, 4, 8, 16 (assuming the INCR*BRSTENA are set to their reset values).
+
+ If enabled, the AXI master uses any length less than or equal to the largest-enabled burst
+ length based on the INCR*BRSTENA fields. */
+#else /* Word 0 - Little Endian */
+ uint32_t incrbrstena : 1; /**< [ 0: 0](R/W) Undefined-length INCR burst-type enable.
+ This bit determines the set of burst lengths to be utilized by the master interface. It
+ works in conjunction with the GSBUSCFG0[7:1] enables (INCR*BRSTENA).
+
+ If disabled, the AXI master will use only the burst lengths
+ 1, 4, 8, 16 (assuming the INCR*BRSTENA are set to their reset values).
+
+ If enabled, the AXI master uses any length less than or equal to the largest-enabled burst
+ length based on the INCR*BRSTENA fields. */
+ uint32_t incr4brstena : 1; /**< [ 1: 1](R/W) INCR4 burst-type enable. Allows the AXI master to generate INCR four-beat bursts. */
+ uint32_t incr8brstena : 1; /**< [ 2: 2](R/W) INCR8 burst-type enable. Allows the AXI master to generate INCR eight-beat bursts. */
+ uint32_t incr16brstena : 1; /**< [ 3: 3](R/W) INCR16 burst-type enable. Always set to 0. */
+ uint32_t incr32brstena : 1; /**< [ 4: 4](R/W) INCR32 burst-type enable. Always set to 0. */
+ uint32_t incr64brstena : 1; /**< [ 5: 5](R/W) INCR64 burst-type enable. Always set to 0. */
+ uint32_t incr128brstena : 1; /**< [ 6: 6](R/W) INCR128 burst-type enable. Always set to 0. */
+ uint32_t incr256brstena : 1; /**< [ 7: 7](R/W) INCR256 burst-type enable. Always set to 0. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t descbigend : 1; /**< [ 10: 10](R/W) Descriptor access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBDRD()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t datbigend : 1; /**< [ 11: 11](R/W) Data access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBDRD()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t deswrreqinfo : 4; /**< [ 19: 16](R/W) AXI-cache for descriptor-write operations. Always set to 0x0. */
+ uint32_t datwrreqinfo : 4; /**< [ 23: 20](R/W) AXI-cache for data-write operations. Always set to 0x0. */
+ uint32_t desrdreqinfo : 4; /**< [ 27: 24](R/W) AXI-cache for descriptor-read operations. Always set to 0x0. */
+ uint32_t datrdreqinfo : 4; /**< [ 31: 28](R/W) AXI-cache for data-read operations. Always set to 0x0. */
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_usbdrdx_uahc_gsbuscfg0 bdk_usbdrdx_uahc_gsbuscfg0_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GSBUSCFG0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GSBUSCFG0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c100ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c100ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c100ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GSBUSCFG0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GSBUSCFG0(a) bdk_usbdrdx_uahc_gsbuscfg0_t
+#define bustype_BDK_USBDRDX_UAHC_GSBUSCFG0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GSBUSCFG0(a) "USBDRDX_UAHC_GSBUSCFG0"
+#define device_bar_BDK_USBDRDX_UAHC_GSBUSCFG0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GSBUSCFG0(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GSBUSCFG0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gsbuscfg1
+ *
+ * USB UAHC Bus Configuration Register 1
+ * This register can be used to configure the core after power-on or a change in mode of
+ * operation. This register mainly contains AXI system-related configuration parameters. Do not
+ * change this register after the initial programming. The application must program this register
+ * before starting any transactions on AXI.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.2.
+ */
+union bdk_usbdrdx_uahc_gsbuscfg1
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gsbuscfg1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_13_31 : 19;
+ uint32_t en1kpage : 1; /**< [ 12: 12](R/W) 1K page-boundary enable.
+ 0 = Break transfers at the 4K page boundary (default).
+ 1 = Break transfers at the 1K page boundary. */
+ uint32_t pipetranslimit : 4; /**< [ 11: 8](R/W) AXI pipelined transfers burst-request limit. Controls the number of outstanding pipelined
+ transfers requests the AXI master will push to the AXI slave. Once the AXI master reaches
+ this limit, it does not make more requests on the AXI ARADDR and AWADDR buses until the
+ associated data phases complete. This field is encoded as follows:
+ 0x0 = 1 request. 0x8 = 9 requests.
+ 0x1 = 2 requests. 0x9 = 10 requests.
+ 0x2 = 3 requests. 0xA = 11 requests.
+ 0x3 = 4 requests. 0xB = 12 requests.
+ 0x4 = 5 requests. 0xC = 13 requests.
+ 0x5 = 6 requests. 0xD = 14 requests.
+ 0x6 = 7 requests. 0xE = 15 requests.
+ 0x7 = 8 requests. 0xF = 16 requests. */
+ uint32_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_7 : 8;
+ uint32_t pipetranslimit : 4; /**< [ 11: 8](R/W) AXI pipelined transfers burst-request limit. Controls the number of outstanding pipelined
+ transfers requests the AXI master will push to the AXI slave. Once the AXI master reaches
+ this limit, it does not make more requests on the AXI ARADDR and AWADDR buses until the
+ associated data phases complete. This field is encoded as follows:
+ 0x0 = 1 request. 0x8 = 9 requests.
+ 0x1 = 2 requests. 0x9 = 10 requests.
+ 0x2 = 3 requests. 0xA = 11 requests.
+ 0x3 = 4 requests. 0xB = 12 requests.
+ 0x4 = 5 requests. 0xC = 13 requests.
+ 0x5 = 6 requests. 0xD = 14 requests.
+ 0x6 = 7 requests. 0xE = 15 requests.
+ 0x7 = 8 requests. 0xF = 16 requests. */
+ uint32_t en1kpage : 1; /**< [ 12: 12](R/W) 1K page-boundary enable.
+ 0 = Break transfers at the 4K page boundary (default).
+ 1 = Break transfers at the 1K page boundary. */
+ uint32_t reserved_13_31 : 19;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gsbuscfg1_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gsbuscfg1 bdk_usbdrdx_uahc_gsbuscfg1_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GSBUSCFG1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GSBUSCFG1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c104ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c104ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c104ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GSBUSCFG1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GSBUSCFG1(a) bdk_usbdrdx_uahc_gsbuscfg1_t
+#define bustype_BDK_USBDRDX_UAHC_GSBUSCFG1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GSBUSCFG1(a) "USBDRDX_UAHC_GSBUSCFG1"
+#define device_bar_BDK_USBDRDX_UAHC_GSBUSCFG1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GSBUSCFG1(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GSBUSCFG1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gsts
+ *
+ * USB UAHC Core Status Register
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.7
+ */
+union bdk_usbdrdx_uahc_gsts
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cbelt : 12; /**< [ 31: 20](RO/H) Current BELT value. In host mode, indicates the minimum value of all received device BELT
+ values and the BELT value that is set by the set latency tolerance value command. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t host_ip : 1; /**< [ 7: 7](RO/H) Host interrupt pending. Indicates that there is a pending interrupt pertaining to xHC in
+ the host-event queue. */
+ uint32_t device_ip : 1; /**< [ 6: 6](RO/H) Device interrupt pending. Indicates that there is a pending interrupt pertaining to
+ peripheral (device) operation in the Device event queue. */
+ uint32_t csrtimeout : 1; /**< [ 5: 5](R/W1C/H) CSR timeout. When set to 1, indicates that software performed a write or read operation to
+ a core register that could not be completed within 0xFFFF controller-clock cycles. */
+ uint32_t buserraddrvld : 1; /**< [ 4: 4](R/W1C/H) Bus-error address valid. Indicates that USBDRD()_UAHC_GBUSERRADDR is valid and reports the
+ first bus address that encounters a bus error. */
+ uint32_t reserved_2_3 : 2;
+ uint32_t curmod : 2; /**< [ 1: 0](RO/H) Current mode of operation. 0x0 for device, 0x1 for host. */
+#else /* Word 0 - Little Endian */
+ uint32_t curmod : 2; /**< [ 1: 0](RO/H) Current mode of operation. 0x0 for device, 0x1 for host. */
+ uint32_t reserved_2_3 : 2;
+ uint32_t buserraddrvld : 1; /**< [ 4: 4](R/W1C/H) Bus-error address valid. Indicates that USBDRD()_UAHC_GBUSERRADDR is valid and reports the
+ first bus address that encounters a bus error. */
+ uint32_t csrtimeout : 1; /**< [ 5: 5](R/W1C/H) CSR timeout. When set to 1, indicates that software performed a write or read operation to
+ a core register that could not be completed within 0xFFFF controller-clock cycles. */
+ uint32_t device_ip : 1; /**< [ 6: 6](RO/H) Device interrupt pending. Indicates that there is a pending interrupt pertaining to
+ peripheral (device) operation in the Device event queue. */
+ uint32_t host_ip : 1; /**< [ 7: 7](RO/H) Host interrupt pending. Indicates that there is a pending interrupt pertaining to xHC in
+ the host-event queue. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t cbelt : 12; /**< [ 31: 20](RO/H) Current BELT value. In host mode, indicates the minimum value of all received device BELT
+ values and the BELT value that is set by the set latency tolerance value command. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gsts_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gsts bdk_usbdrdx_uahc_gsts_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c118ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c118ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c118ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GSTS(a) bdk_usbdrdx_uahc_gsts_t
+#define bustype_BDK_USBDRDX_UAHC_GSTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GSTS(a) "USBDRDX_UAHC_GSTS"
+#define device_bar_BDK_USBDRDX_UAHC_GSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GSTS(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gtxfifopridev
+ *
+ * USB UAHC TxFIFOs DMA Priority Register
+ * This register specifies the relative DMA priority level among the host TxFIFOs (one per USB
+ * bus instance) within the associated speed group (SuperSpeed or high-speed/full-speed/
+ * low-speed). When multiple TxFIFOs compete for DMA service at a given time, the TXDMA arbiter
+ * grants access on a packet-basis in the following manner:
+ *
+ * Among the FIFOs in the same speed group (SuperSpeed or high-speed/full-speed/low-speed):
+ *
+ * * High-priority TxFIFOs are granted access using round-robin arbitration.
+ * * Low-priority TxFIFOs are granted access using round-robin arbitration only after high-
+ * priority
+ * TxFIFOs have no further processing to do (i.e., either the TXQs are empty or the corresponding
+ * TxFIFOs do not have the required data).
+ *
+ * The TX DMA arbiter prioritizes the SuperSpeed group or high-speed/full-speed/low-speed group
+ * according to the ratio programmed in
+ * USBDRD()_UAHC_GDMAHLRATIO.
+ *
+ * For scatter-gather packets, the arbiter grants successive DMA requests to the same FIFO until
+ * the entire packet is completed. The register size corresponds to the number of configured USB
+ * bus instances; for example, in the default configuration, there are three USB bus instances (one
+ * SuperSpeed, one high-speed, and one full-speed/low-speed).
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.59
+ */
+union bdk_usbdrdx_uahc_gtxfifopridev
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gtxfifopridev_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t tx_priority : 3; /**< [ 2: 0](R/W) Each register bit n controls the priority (1 = high, 0 = low) of TxFIFO\<n\> within a speed
+ group. */
+#else /* Word 0 - Little Endian */
+ uint32_t tx_priority : 3; /**< [ 2: 0](R/W) Each register bit n controls the priority (1 = high, 0 = low) of TxFIFO\<n\> within a speed
+ group. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gtxfifopridev_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gtxfifopridev bdk_usbdrdx_uahc_gtxfifopridev_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GTXFIFOPRIDEV(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GTXFIFOPRIDEV(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c610ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c610ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c610ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GTXFIFOPRIDEV", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GTXFIFOPRIDEV(a) bdk_usbdrdx_uahc_gtxfifopridev_t
+#define bustype_BDK_USBDRDX_UAHC_GTXFIFOPRIDEV(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GTXFIFOPRIDEV(a) "USBDRDX_UAHC_GTXFIFOPRIDEV"
+#define device_bar_BDK_USBDRDX_UAHC_GTXFIFOPRIDEV(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GTXFIFOPRIDEV(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GTXFIFOPRIDEV(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gtxfifoprihst
+ *
+ * USB UAHC Global Host TxFIFO DMA Priority Register
+ * This register specifies the relative DMA priority level among the Host TxFIFOs (one per USB
+ * bus instance) within the associated speed group (SuperSpeed or HighSpeed/FullSpeed/LowSpeed).
+ * When multiple TxFIFOs compete for DMA service at a given time, the TXDMA arbiter grants access
+ * on a packet-basis in the following manner:
+ *
+ * 1. Among the FIFOs in the same speed group (SuperSpeed or high-speed/full-speed/low-speed):
+ *
+ * _ a. High-priority TxFIFOs are granted access using round-robin arbitration
+ *
+ * _ b. Low-priority TxFIFOs are granted access using round-robin arbitration only after the
+ * high priority TxFIFOs have no further processing to do (i.e., either the TXQs are empty
+ * or the corresponding TxFIFOs are full).
+ * or the corresponding TxFIFOs are full).
+ *
+ * 2. The TX DMA arbiter prioritizes the SuperSpeed group or high-speed/full-speed/low-speed
+ * group according to the ratio programmed in USBDRD()_UAHC_GDMAHLRATIO.
+ *
+ * For scatter-gather packets, the arbiter grants successive DMA requests to the same FIFO until
+ * the entire packet is completed.
+ * The register size corresponds to the number of configured USB bus instances; for example, in
+ * the default configuration, there are three USB bus instances (one SuperSpeed, one
+ * high-speed, and one full-speed/low-speed).
+ *
+ * This register can be reset by IOI reset or with USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.60
+ */
+union bdk_usbdrdx_uahc_gtxfifoprihst
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gtxfifoprihst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t tx_priority : 3; /**< [ 2: 0](R/W) Each register bit n controls the priority (1 = high, 0 = low) of TxFIFO\<n\> within a speed
+ group. */
+#else /* Word 0 - Little Endian */
+ uint32_t tx_priority : 3; /**< [ 2: 0](R/W) Each register bit n controls the priority (1 = high, 0 = low) of TxFIFO\<n\> within a speed
+ group. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gtxfifoprihst_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gtxfifoprihst bdk_usbdrdx_uahc_gtxfifoprihst_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GTXFIFOPRIHST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GTXFIFOPRIHST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c618ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c618ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c618ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GTXFIFOPRIHST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GTXFIFOPRIHST(a) bdk_usbdrdx_uahc_gtxfifoprihst_t
+#define bustype_BDK_USBDRDX_UAHC_GTXFIFOPRIHST(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GTXFIFOPRIHST(a) "USBDRDX_UAHC_GTXFIFOPRIHST"
+#define device_bar_BDK_USBDRDX_UAHC_GTXFIFOPRIHST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GTXFIFOPRIHST(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GTXFIFOPRIHST(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gtxfifosiz#
+ *
+ * USB UAHC TX FIFO Size Registers
+ * This register holds the internal RAM start address/depth of each TxFIFO implemented. Unless
+ * packet size/buffer size for each endpoint is different and application-specific, it is
+ * recommended that the software use the default value. One register per FIFO. One register per
+ * FIFO.
+ *
+ * Reset values = 0:{0x0000_0082} 1:{0x0082_0103} 2:{0x0185_0205}.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.6.1.
+ * INTERNAL: For more information, refer to the BMU section in Block Descriptions on Synopsys
+ * Databook page 238.
+ */
+union bdk_usbdrdx_uahc_gtxfifosizx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gtxfifosizx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t txfstaddr : 16; /**< [ 31: 16](R/W) Transmit FIFOn RAM start address. Contains the memory start address for TxFIFOn. The reset
+ is value derived from configuration parameters. */
+ uint32_t txfdep : 16; /**< [ 15: 0](R/W) TxFIFOn depth. This value is in terms of TX RAM data width.
+ minimum value = 0x20, maximum value = 0x8000.
+
+ Internal:
+ For more information, see the Hardware Integration chapter of the Synopsys
+ Databook.
+ The reset value derived from configuration parameters. */
+#else /* Word 0 - Little Endian */
+ uint32_t txfdep : 16; /**< [ 15: 0](R/W) TxFIFOn depth. This value is in terms of TX RAM data width.
+ minimum value = 0x20, maximum value = 0x8000.
+
+ Internal:
+ For more information, see the Hardware Integration chapter of the Synopsys
+ Databook.
+ The reset value derived from configuration parameters. */
+ uint32_t txfstaddr : 16; /**< [ 31: 16](R/W) Transmit FIFOn RAM start address. Contains the memory start address for TxFIFOn. The reset
+ is value derived from configuration parameters. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gtxfifosizx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gtxfifosizx bdk_usbdrdx_uahc_gtxfifosizx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GTXFIFOSIZX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GTXFIFOSIZX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=3)))
+ return 0x86800000c300ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=3)))
+ return 0x86800000c300ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x3);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=3)))
+ return 0x86800000c300ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x3);
+ __bdk_csr_fatal("USBDRDX_UAHC_GTXFIFOSIZX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GTXFIFOSIZX(a,b) bdk_usbdrdx_uahc_gtxfifosizx_t
+#define bustype_BDK_USBDRDX_UAHC_GTXFIFOSIZX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GTXFIFOSIZX(a,b) "USBDRDX_UAHC_GTXFIFOSIZX"
+#define device_bar_BDK_USBDRDX_UAHC_GTXFIFOSIZX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GTXFIFOSIZX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_GTXFIFOSIZX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gtxthrcfg
+ *
+ * USB UAHC TX Threshold Control Register
+ * In a normal case, a TX burst starts as soon as one packet is prefetched. This works well as
+ * long as the system bus is faster than the USB3.0 bus (a 1024-bytes packet takes ~2.2 us on the
+ * USB bus in SuperSpeed mode). If the system bus latency is larger than 2.2 us to access a
+ * 1024-byte packet, then starting a burst on 1-packet condition leads to an early abort of the
+ * burst causing unnecessary performance reduction. This register allows the configuration of
+ * threshold and burst size control. This feature is enabled by [USBTXPKTCNTSEL].
+ *
+ * Transmit path:
+ * * The TX threshold is controlled by [USBTXPKTCNT], and the TX burst size is controlled by
+ * [USBMAXTXBURSTSIZE].
+ * * Selecting optimal TxFIFO size, TX threshold, and TX burst size avoids TX burst aborts due
+ * to an underrun if the system bus is slower than USB. Once in a while an underrun is OK, and
+ * there is no functional issue.
+ * * A larger threshold affects the performance, since the scheduler is idle during this time.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.3
+ */
+union bdk_usbdrdx_uahc_gtxthrcfg
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gtxthrcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t usbtxpktcntsel : 1; /**< [ 29: 29](R/W) USB transmit packet-count enable. Enables/disables the USB transmission multipacket
+ thresholding:
+ 0 = USB transmission multipacket thresholding is disabled, the core can only start
+ transmission on the USB after the entire packet has been fetched into the corresponding
+ TxFIFO.
+ 1 = USB transmission multipacket thresholding is enabled. The core can only start
+ transmission on the USB after USBTXPKTCNT amount of packets for the USB transaction
+ (burst) are already in the corresponding TxFIFO.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbtxpktcnt : 4; /**< [ 27: 24](R/W) USB transmit-packet count. Specifies the number of packets that must be in the TxFIFO
+ before the core can start transmission for the corresponding USB transaction (burst). This
+ field is only valid when [USBTXPKTCNTSEL] = 1. Valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXTXBURSTSIZE]. */
+ uint32_t usbmaxtxburstsize : 8; /**< [ 23: 16](R/W) USB maximum TX burst size. When [USBTXPKTCNTSEL] = 1, this field specifies the
+ maximum bulk OUT burst the core should do. When the system bus is slower than
+ the USB, TxFIFO can underrun during a long burst. Program a smaller value to
+ this field to limit the TX burst size that the core can do. It only applies to
+ SuperSpeed bulk, isochronous, and interrupt OUT endpoints in the host
+ mode. Valid values are from 0x1 to 0x10. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t usbmaxtxburstsize : 8; /**< [ 23: 16](R/W) USB maximum TX burst size. When [USBTXPKTCNTSEL] = 1, this field specifies the
+ maximum bulk OUT burst the core should do. When the system bus is slower than
+ the USB, TxFIFO can underrun during a long burst. Program a smaller value to
+ this field to limit the TX burst size that the core can do. It only applies to
+ SuperSpeed bulk, isochronous, and interrupt OUT endpoints in the host
+ mode. Valid values are from 0x1 to 0x10. */
+ uint32_t usbtxpktcnt : 4; /**< [ 27: 24](R/W) USB transmit-packet count. Specifies the number of packets that must be in the TxFIFO
+ before the core can start transmission for the corresponding USB transaction (burst). This
+ field is only valid when [USBTXPKTCNTSEL] = 1. Valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXTXBURSTSIZE]. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbtxpktcntsel : 1; /**< [ 29: 29](R/W) USB transmit packet-count enable. Enables/disables the USB transmission multipacket
+ thresholding:
+ 0 = USB transmission multipacket thresholding is disabled, the core can only start
+ transmission on the USB after the entire packet has been fetched into the corresponding
+ TxFIFO.
+ 1 = USB transmission multipacket thresholding is enabled. The core can only start
+ transmission on the USB after USBTXPKTCNT amount of packets for the USB transaction
+ (burst) are already in the corresponding TxFIFO.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uahc_gtxthrcfg_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t usbtxpktcntsel : 1; /**< [ 29: 29](R/W) USB transmit packet-count enable. Enables/disables the USB transmission multipacket
+ thresholding:
+ 0 = USB transmission multipacket thresholding is disabled, the core can only start
+ transmission on the USB after the entire packet has been fetched into the corresponding
+ TxFIFO.
+ 1 = USB transmission multipacket thresholding is enabled. The core can only start
+ transmission on the USB after USBTXPKTCNT amount of packets for the USB transaction
+ (burst) are already in the corresponding TxFIFO.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbtxpktcnt : 4; /**< [ 27: 24](R/W) USB transmit-packet count. Specifies the number of packets that must be in the TxFIFO
+ before the core can start transmission for the corresponding USB transaction (burst). This
+ field is only valid when [USBTXPKTCNTSEL] = 1. Valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXTXBURSTSIZE]. */
+ uint32_t usbmaxtxburstsize : 8; /**< [ 23: 16](R/W) USB maximum TX burst size. When [USBTXPKTCNTSEL] = 1, this field specifies the
+ maximum bulk OUT burst the core should do. When the system bus is slower than
+ the USB, TxFIFO can underrun during a long burst. Program a smaller value to
+ this field to limit the TX burst size that the core can do. It only applies to
+ SuperSpeed bulk, isochronous, and interrupt OUT endpoints in the host
+ mode. Valid values are from 0x1 to 0x10. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t reserved_10_13 : 4;
+ uint32_t reserved_0_9 : 10;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_9 : 10;
+ uint32_t reserved_10_13 : 4;
+ uint32_t reserved_14_15 : 2;
+ uint32_t usbmaxtxburstsize : 8; /**< [ 23: 16](R/W) USB maximum TX burst size. When [USBTXPKTCNTSEL] = 1, this field specifies the
+ maximum bulk OUT burst the core should do. When the system bus is slower than
+ the USB, TxFIFO can underrun during a long burst. Program a smaller value to
+ this field to limit the TX burst size that the core can do. It only applies to
+ SuperSpeed bulk, isochronous, and interrupt OUT endpoints in the host
+ mode. Valid values are from 0x1 to 0x10. */
+ uint32_t usbtxpktcnt : 4; /**< [ 27: 24](R/W) USB transmit-packet count. Specifies the number of packets that must be in the TxFIFO
+ before the core can start transmission for the corresponding USB transaction (burst). This
+ field is only valid when [USBTXPKTCNTSEL] = 1. Valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXTXBURSTSIZE]. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbtxpktcntsel : 1; /**< [ 29: 29](R/W) USB transmit packet-count enable. Enables/disables the USB transmission multipacket
+ thresholding:
+ 0 = USB transmission multipacket thresholding is disabled, the core can only start
+ transmission on the USB after the entire packet has been fetched into the corresponding
+ TxFIFO.
+ 1 = USB transmission multipacket thresholding is enabled. The core can only start
+ transmission on the USB after USBTXPKTCNT amount of packets for the USB transaction
+ (burst) are already in the corresponding TxFIFO.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_usbdrdx_uahc_gtxthrcfg bdk_usbdrdx_uahc_gtxthrcfg_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GTXTHRCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GTXTHRCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c108ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c108ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c108ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GTXTHRCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GTXTHRCFG(a) bdk_usbdrdx_uahc_gtxthrcfg_t
+#define bustype_BDK_USBDRDX_UAHC_GTXTHRCFG(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GTXTHRCFG(a) "USBDRDX_UAHC_GTXTHRCFG"
+#define device_bar_BDK_USBDRDX_UAHC_GTXTHRCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GTXTHRCFG(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GTXTHRCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_guctl
+ *
+ * USB UAHC Core User-Control Register
+ * This register provides a few options for the software to control the core behavior in the host
+ * mode. Most of the options are used to improve host interoperability with different devices.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.12
+ */
+union bdk_usbdrdx_uahc_guctl
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_guctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t refclkper : 10; /**< [ 31: 22](R/W) Reference-clock period. Indicates (in terms of ns) the period of REF_CLK. The default
+ value is set to 0x8
+ (8 ns/125 MHz). This field must be updated during power on initialization if
+ USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1 or USBDRD()_UAHC_GFLADJ [GFLADJ_REFCLK_LPM_SEL] = 1.
+ The
+ programmable maximum value 62 ns, and the minimum value is 8 ns. You use a reference clock
+ with a period that is a integer multiple, so that ITP can meet the jitter margin of 32 ns.
+ The allowable REF_CLK frequencies whose period is not integer multiples are
+ 16/17/19.2/24/39.7 MHz.
+
+ This field should not be set to 0x0 at any time. If you do not plan to use this feature,
+ then you need to set this field to 0x8, the default value. */
+ uint32_t noextrdl : 1; /**< [ 21: 21](R/W) No extra delay between SOF and the first packet.
+ Some high-speed devices misbehave when the host sends a packet immediately after an SOF.
+ However, adding an extra delay between an SOF and the first packet can reduce the USB data
+ rate and performance.
+
+ This bit is used to control whether the host should wait for 2 us before it sends the
+ first packet after a SOF, or not. You can set this bit to 1 to improve the performance if
+ those problematic devices are not a concern in your host environment.
+ 0 = host waits for 2 us after an SOF before it sends the first USB packet.
+ 1 = host does not wait after an SOF before it sends the first USB packet. */
+ uint32_t psqextrressp : 3; /**< [ 20: 18](R/W) PSQ extra reserved space. This is a debug feature, and is not intended for normal usage.
+ This parameter specifies how much additional space in the PSQ (protocol-status queue) must
+ be reserved before the U3PTL initiates a new USB transaction and burst beats. */
+ uint32_t sprsctrltransen : 1; /**< [ 17: 17](R/W) Sparse control transaction enable. Some devices are slow in responding to control
+ transfers. Scheduling multiple transactions in one microframe/frame can cause these
+ devices to misbehave. If this bit is set to 1, the host controller schedules transactions
+ for a control transfer in different microframes/frames. */
+ uint32_t resbwhseps : 1; /**< [ 16: 16](R/W) Reserving 85% bandwidth for high-speed periodic EPs. By default, host controller reserves
+ 80% of the bandwidth for periodic EPs. If this bit is set, the bandwidth is relaxed to 85%
+ to accommodate two high-speed, high-bandwidth ISOC EPs.
+
+ USB 2.0 required 80% bandwidth allocated for ISOC traffic. If two high bandwidth ISOC
+ devices (HD webcams) are connected, and if each requires 1024-bytes * 3 packets per
+ microframe, then the bandwidth required is around 82%. If this bit is set to 1, it is
+ possible to connect two webcams of 1024 bytes * 3 payload per microframe each. Otherwise,
+ you may have to reduce the resolution of the webcams. */
+ uint32_t cmdevaddr : 1; /**< [ 15: 15](R/W) Compliance mode for device address. When set to 1, slot ID can have different value than
+ device address if max_slot_enabled \< 128.
+ 0 = Device address is equal to slot ID.
+ 1 = Increment device address on each address device command.
+
+ The xHCI compliance requires this bit to be set to 1. The 0 mode is for debug purpose
+ only. This allows you to easily identify a device connected to a port in the Lecroy or
+ Eliisys trace during hardware debug.
+
+ This bit is used in host mode only. */
+ uint32_t usbdrdstinautoretryen : 1; /**< [ 14: 14](R/W) Host IN auto-retry enable. When set, this field enables the auto-retry feature. For IN
+ transfers (non-isochronous) that encounter data packets with CRC errors or internal
+ overrun scenarios, the auto-retry feature causes the host core to reply to the device with
+ a non-terminating retry ACK (i.e. an ACK transaction packet with Retry = 1 and NumP != 0).
+ If the auto-retry feature is disabled (default), the core responds with a terminating
+ retry ACK (i.e. an ACK transaction packet with Retry = 1 and NumP = 0). */
+ uint32_t enoverlapchk : 1; /**< [ 13: 13](R/W) Enable check for LFPS overlap during remote Ux Exit. If this bit is set to:
+ 0 = When the link exists U1/U2/U3 because of a remote exit, it does not look for an LFPS
+ overlap.
+ 1 = The SuperSpeed link, when exiting U1/U2/U3, waits for either the remote link LFPS or
+ TS1/TS2 training symbols before it confirms that the LFPS handshake is complete. This is
+ done to handle the case where the LFPS glitch causes the link to start exiting from the
+ low power state. Looking for the LFPS overlap makes sure that the link partner also sees
+ the LFPS. */
+ uint32_t extcapsupten : 1; /**< [ 12: 12](R/W) External extended capability support enable. If disabled, a read
+ USBDRD()_UAHC_SUPTPRT3_DW0
+ [NEXTCAPPTR] returns 0 in the next capability pointer field. This indicates there are no
+ more capabilities. If enabled, a read to USBDRD()_UAHC_SUPTPRT3_DW0[NEXTCAPPTR] returns 4
+ in
+ the
+ next capability pointer field.
+ Always set to 0x0. */
+ uint32_t insrtextrfsbodi : 1; /**< [ 11: 11](R/W) Insert extra delay between full-speed bulk OUT transactions. Some full-speed devices are
+ slow to receive bulk OUT data and can get stuck when there are consecutive bulk OUT
+ transactions with short inter-transaction delays. This bit is used to control whether the
+ host inserts extra delay between consecutive bulk OUT transactions to a full-speed
+ endpoint.
+ 0 = Host does not insert extra delay.
+ Setting this bit to 1 reduces the bulk OUT transfer performance for most of the full-speed
+ devices.
+ 1 = Host inserts about 12 us extra delay between consecutive bulk OUT transactions to an
+ full-speed endpoint to work around the device issue. */
+ uint32_t dtct : 2; /**< [ 10: 9](R/W) Device timeout coarse tuning. This field determines how long the host waits for a response
+ from device before considering a timeout.
+ The core first checks the [DTCT] value. If it is 0, then the timeout value is defined by
+ the
+ [DTFT]. If it is nonzero, then it uses the following timeout values:
+ 0x0 = 0 us; use [DTFT] value instead.
+ 0x1 = 500 us.
+ 0x2 = 1.5 ms.
+ 0x3 = 6.5 ms. */
+ uint32_t dtft : 9; /**< [ 8: 0](R/W) Device timeout fine tuning. This field determines how long the host waits for a response
+ from a device before considering a timeout. For [DTFT] to take effect, [DTCT] must be set
+ to
+ 0x0.
+ The [DTFT] value specifies the number of 125 MHz clock cycles * 256 to count before
+ considering a device timeout. For the 125 MHz clock cycles (8 ns period), this is
+ calculated as follows:
+ _ [DTFT value] * 256 * 8 (ns)
+ 0x2 = 2 * 256 * 8 -\> 4 us.
+ 0x5 = 5 * 256 * 8 -\> 10 us.
+ 0xA = 10 * 256 * 8 -\> 20 us.
+ 0x10 = 16 * 256 * 8 -\> 32 us.
+ 0x19 = 25 * 256 * 8 -\> 51 us.
+ 0x31 = 49 * 256 * 8 -\> 100 us.
+ 0x62 = 98 * 256 * 8 -\> 200 us. */
+#else /* Word 0 - Little Endian */
+ uint32_t dtft : 9; /**< [ 8: 0](R/W) Device timeout fine tuning. This field determines how long the host waits for a response
+ from a device before considering a timeout. For [DTFT] to take effect, [DTCT] must be set
+ to
+ 0x0.
+ The [DTFT] value specifies the number of 125 MHz clock cycles * 256 to count before
+ considering a device timeout. For the 125 MHz clock cycles (8 ns period), this is
+ calculated as follows:
+ _ [DTFT value] * 256 * 8 (ns)
+ 0x2 = 2 * 256 * 8 -\> 4 us.
+ 0x5 = 5 * 256 * 8 -\> 10 us.
+ 0xA = 10 * 256 * 8 -\> 20 us.
+ 0x10 = 16 * 256 * 8 -\> 32 us.
+ 0x19 = 25 * 256 * 8 -\> 51 us.
+ 0x31 = 49 * 256 * 8 -\> 100 us.
+ 0x62 = 98 * 256 * 8 -\> 200 us. */
+ uint32_t dtct : 2; /**< [ 10: 9](R/W) Device timeout coarse tuning. This field determines how long the host waits for a response
+ from device before considering a timeout.
+ The core first checks the [DTCT] value. If it is 0, then the timeout value is defined by
+ the
+ [DTFT]. If it is nonzero, then it uses the following timeout values:
+ 0x0 = 0 us; use [DTFT] value instead.
+ 0x1 = 500 us.
+ 0x2 = 1.5 ms.
+ 0x3 = 6.5 ms. */
+ uint32_t insrtextrfsbodi : 1; /**< [ 11: 11](R/W) Insert extra delay between full-speed bulk OUT transactions. Some full-speed devices are
+ slow to receive bulk OUT data and can get stuck when there are consecutive bulk OUT
+ transactions with short inter-transaction delays. This bit is used to control whether the
+ host inserts extra delay between consecutive bulk OUT transactions to a full-speed
+ endpoint.
+ 0 = Host does not insert extra delay.
+ Setting this bit to 1 reduces the bulk OUT transfer performance for most of the full-speed
+ devices.
+ 1 = Host inserts about 12 us extra delay between consecutive bulk OUT transactions to an
+ full-speed endpoint to work around the device issue. */
+ uint32_t extcapsupten : 1; /**< [ 12: 12](R/W) External extended capability support enable. If disabled, a read
+ USBDRD()_UAHC_SUPTPRT3_DW0
+ [NEXTCAPPTR] returns 0 in the next capability pointer field. This indicates there are no
+ more capabilities. If enabled, a read to USBDRD()_UAHC_SUPTPRT3_DW0[NEXTCAPPTR] returns 4
+ in
+ the
+ next capability pointer field.
+ Always set to 0x0. */
+ uint32_t enoverlapchk : 1; /**< [ 13: 13](R/W) Enable check for LFPS overlap during remote Ux Exit. If this bit is set to:
+ 0 = When the link exists U1/U2/U3 because of a remote exit, it does not look for an LFPS
+ overlap.
+ 1 = The SuperSpeed link, when exiting U1/U2/U3, waits for either the remote link LFPS or
+ TS1/TS2 training symbols before it confirms that the LFPS handshake is complete. This is
+ done to handle the case where the LFPS glitch causes the link to start exiting from the
+ low power state. Looking for the LFPS overlap makes sure that the link partner also sees
+ the LFPS. */
+ uint32_t usbdrdstinautoretryen : 1; /**< [ 14: 14](R/W) Host IN auto-retry enable. When set, this field enables the auto-retry feature. For IN
+ transfers (non-isochronous) that encounter data packets with CRC errors or internal
+ overrun scenarios, the auto-retry feature causes the host core to reply to the device with
+ a non-terminating retry ACK (i.e. an ACK transaction packet with Retry = 1 and NumP != 0).
+ If the auto-retry feature is disabled (default), the core responds with a terminating
+ retry ACK (i.e. an ACK transaction packet with Retry = 1 and NumP = 0). */
+ uint32_t cmdevaddr : 1; /**< [ 15: 15](R/W) Compliance mode for device address. When set to 1, slot ID can have different value than
+ device address if max_slot_enabled \< 128.
+ 0 = Device address is equal to slot ID.
+ 1 = Increment device address on each address device command.
+
+ The xHCI compliance requires this bit to be set to 1. The 0 mode is for debug purpose
+ only. This allows you to easily identify a device connected to a port in the Lecroy or
+ Eliisys trace during hardware debug.
+
+ This bit is used in host mode only. */
+ uint32_t resbwhseps : 1; /**< [ 16: 16](R/W) Reserving 85% bandwidth for high-speed periodic EPs. By default, host controller reserves
+ 80% of the bandwidth for periodic EPs. If this bit is set, the bandwidth is relaxed to 85%
+ to accommodate two high-speed, high-bandwidth ISOC EPs.
+
+ USB 2.0 required 80% bandwidth allocated for ISOC traffic. If two high bandwidth ISOC
+ devices (HD webcams) are connected, and if each requires 1024-bytes * 3 packets per
+ microframe, then the bandwidth required is around 82%. If this bit is set to 1, it is
+ possible to connect two webcams of 1024 bytes * 3 payload per microframe each. Otherwise,
+ you may have to reduce the resolution of the webcams. */
+ uint32_t sprsctrltransen : 1; /**< [ 17: 17](R/W) Sparse control transaction enable. Some devices are slow in responding to control
+ transfers. Scheduling multiple transactions in one microframe/frame can cause these
+ devices to misbehave. If this bit is set to 1, the host controller schedules transactions
+ for a control transfer in different microframes/frames. */
+ uint32_t psqextrressp : 3; /**< [ 20: 18](R/W) PSQ extra reserved space. This is a debug feature, and is not intended for normal usage.
+ This parameter specifies how much additional space in the PSQ (protocol-status queue) must
+ be reserved before the U3PTL initiates a new USB transaction and burst beats. */
+ uint32_t noextrdl : 1; /**< [ 21: 21](R/W) No extra delay between SOF and the first packet.
+ Some high-speed devices misbehave when the host sends a packet immediately after an SOF.
+ However, adding an extra delay between an SOF and the first packet can reduce the USB data
+ rate and performance.
+
+ This bit is used to control whether the host should wait for 2 us before it sends the
+ first packet after a SOF, or not. You can set this bit to 1 to improve the performance if
+ those problematic devices are not a concern in your host environment.
+ 0 = host waits for 2 us after an SOF before it sends the first USB packet.
+ 1 = host does not wait after an SOF before it sends the first USB packet. */
+ uint32_t refclkper : 10; /**< [ 31: 22](R/W) Reference-clock period. Indicates (in terms of ns) the period of REF_CLK. The default
+ value is set to 0x8
+ (8 ns/125 MHz). This field must be updated during power on initialization if
+ USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1 or USBDRD()_UAHC_GFLADJ [GFLADJ_REFCLK_LPM_SEL] = 1.
+ The
+ programmable maximum value 62 ns, and the minimum value is 8 ns. You use a reference clock
+ with a period that is a integer multiple, so that ITP can meet the jitter margin of 32 ns.
+ The allowable REF_CLK frequencies whose period is not integer multiples are
+ 16/17/19.2/24/39.7 MHz.
+
+ This field should not be set to 0x0 at any time. If you do not plan to use this feature,
+ then you need to set this field to 0x8, the default value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_guctl_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_guctl bdk_usbdrdx_uahc_guctl_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GUCTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GUCTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c12cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c12cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c12cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GUCTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GUCTL(a) bdk_usbdrdx_uahc_guctl_t
+#define bustype_BDK_USBDRDX_UAHC_GUCTL(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GUCTL(a) "USBDRDX_UAHC_GUCTL"
+#define device_bar_BDK_USBDRDX_UAHC_GUCTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GUCTL(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GUCTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_guctl1
+ *
+ * USB UAHC Global User Control Register 1
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.8
+ */
+union bdk_usbdrdx_uahc_guctl1
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_guctl1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_18_31 : 14;
+ uint32_t parkmode_disable_ss : 1; /**< [ 17: 17](R/W) This bit is for debug purpose only.
+ When this bit is set to 1 all SS bus instances in park mode are
+ disabled. */
+ uint32_t parkmode_disable_hs : 1; /**< [ 16: 16](R/W) When this bit is set to 1 all HS bus instances park mode are
+ disabled. */
+ uint32_t parkmode_disable_fsls : 1; /**< [ 15: 15](R/W) When this bit is set to 1 all FS/LS bus instances park mode are
+ disabled. */
+ uint32_t reserved_9_14 : 6;
+ uint32_t l1_susp_thrld_en_for_host : 1;/**< [ 8: 8](R/W) The controller puts the PHY into deep low-power mode in L1 when both of the
+ following are true:
+
+ * The HIRD/BESL value used is greater than or equal to the
+ value in [L1_SUSP_THRLD_FOR_HOST].
+ * The [L1_SUSP_THRLD_EN_FOR_HOST] bit is set.
+
+ The controller the UTMI PHY transitions to shallow low-power
+ mode in L1 by powering down necessary blocks when one of the
+ following is true:
+
+ * The HIRD/BESL value used is less than the value in
+ [L1_SUSP_THRLD_FOR_HOST].
+ * [L1_SUSP_THRLD_EN_FOR_HOST] is clear. */
+ uint32_t l1_susp_thrld_for_host : 4; /**< [ 7: 4](R/W) This field is effective only when the [L1_SUSP_THRLD_EN_FOR_HOST] is set to 1. */
+ uint32_t helden : 1; /**< [ 3: 3](R/W) When this bit is set to 1, it enables the exit latency delta (ELD)
+ support defined in the xHCI 1.0 Errata. */
+ uint32_t hparchkdisable : 1; /**< [ 2: 2](R/W) When this bit is set to 0 (by default), the xHC checks that the input
+ slot/EP context fields comply to the xHCI Specification. Upon
+ detection of a parameter error during command execution, the
+ xHC generates an event TRB with completion code indicating
+ 'PARAMETER ERROR'.
+ When the bit is set to 1, the xHC does not perform parameter
+ checks and does not generate 'PARAMETER ERROR' completion
+ code. */
+ uint32_t ovrld_l1_susp_com : 1; /**< [ 1: 1](R/W) Always set to 0. */
+ uint32_t loa_filter_en : 1; /**< [ 0: 0](R/W) If this bit is set, the USB 2.0 port babble is checked at least three consecutive times
+ before the port is disabled. This prevents false triggering of the babble condition when
+ using low quality cables. */
+#else /* Word 0 - Little Endian */
+ uint32_t loa_filter_en : 1; /**< [ 0: 0](R/W) If this bit is set, the USB 2.0 port babble is checked at least three consecutive times
+ before the port is disabled. This prevents false triggering of the babble condition when
+ using low quality cables. */
+ uint32_t ovrld_l1_susp_com : 1; /**< [ 1: 1](R/W) Always set to 0. */
+ uint32_t hparchkdisable : 1; /**< [ 2: 2](R/W) When this bit is set to 0 (by default), the xHC checks that the input
+ slot/EP context fields comply to the xHCI Specification. Upon
+ detection of a parameter error during command execution, the
+ xHC generates an event TRB with completion code indicating
+ 'PARAMETER ERROR'.
+ When the bit is set to 1, the xHC does not perform parameter
+ checks and does not generate 'PARAMETER ERROR' completion
+ code. */
+ uint32_t helden : 1; /**< [ 3: 3](R/W) When this bit is set to 1, it enables the exit latency delta (ELD)
+ support defined in the xHCI 1.0 Errata. */
+ uint32_t l1_susp_thrld_for_host : 4; /**< [ 7: 4](R/W) This field is effective only when the [L1_SUSP_THRLD_EN_FOR_HOST] is set to 1. */
+ uint32_t l1_susp_thrld_en_for_host : 1;/**< [ 8: 8](R/W) The controller puts the PHY into deep low-power mode in L1 when both of the
+ following are true:
+
+ * The HIRD/BESL value used is greater than or equal to the
+ value in [L1_SUSP_THRLD_FOR_HOST].
+ * The [L1_SUSP_THRLD_EN_FOR_HOST] bit is set.
+
+ The controller the UTMI PHY transitions to shallow low-power
+ mode in L1 by powering down necessary blocks when one of the
+ following is true:
+
+ * The HIRD/BESL value used is less than the value in
+ [L1_SUSP_THRLD_FOR_HOST].
+ * [L1_SUSP_THRLD_EN_FOR_HOST] is clear. */
+ uint32_t reserved_9_14 : 6;
+ uint32_t parkmode_disable_fsls : 1; /**< [ 15: 15](R/W) When this bit is set to 1 all FS/LS bus instances park mode are
+ disabled. */
+ uint32_t parkmode_disable_hs : 1; /**< [ 16: 16](R/W) When this bit is set to 1 all HS bus instances park mode are
+ disabled. */
+ uint32_t parkmode_disable_ss : 1; /**< [ 17: 17](R/W) This bit is for debug purpose only.
+ When this bit is set to 1 all SS bus instances in park mode are
+ disabled. */
+ uint32_t reserved_18_31 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_guctl1_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_guctl1 bdk_usbdrdx_uahc_guctl1_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GUCTL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GUCTL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c11cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c11cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c11cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GUCTL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GUCTL1(a) bdk_usbdrdx_uahc_guctl1_t
+#define bustype_BDK_USBDRDX_UAHC_GUCTL1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GUCTL1(a) "USBDRDX_UAHC_GUCTL1"
+#define device_bar_BDK_USBDRDX_UAHC_GUCTL1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GUCTL1(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GUCTL1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_guctl2
+ *
+ * UAHC Global User Control Register 2
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.44.
+ */
+union bdk_usbdrdx_uahc_guctl2
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_guctl2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_15_31 : 17;
+ uint32_t rst_actbitlater : 1; /**< [ 14: 14](R/W) Enable clearing of the command active bit for the ENDXFER command. */
+ uint32_t reserved_13 : 1;
+ uint32_t enableepcacheevict : 1; /**< [ 12: 12](R/W) Enable evicting endpoint cache after flow control for bulk endpoints. */
+ uint32_t disablecfc : 1; /**< [ 11: 11](R/W) Disable xHCI errata feature contiguous frame ID capability. */
+ uint32_t rxpingduration : 6; /**< [ 10: 5](R/W) Receive ping maximum duration. */
+ uint32_t txpingduration : 5; /**< [ 4: 0](R/W) Transmit ping maximum duration. */
+#else /* Word 0 - Little Endian */
+ uint32_t txpingduration : 5; /**< [ 4: 0](R/W) Transmit ping maximum duration. */
+ uint32_t rxpingduration : 6; /**< [ 10: 5](R/W) Receive ping maximum duration. */
+ uint32_t disablecfc : 1; /**< [ 11: 11](R/W) Disable xHCI errata feature contiguous frame ID capability. */
+ uint32_t enableepcacheevict : 1; /**< [ 12: 12](R/W) Enable evicting endpoint cache after flow control for bulk endpoints. */
+ uint32_t reserved_13 : 1;
+ uint32_t rst_actbitlater : 1; /**< [ 14: 14](R/W) Enable clearing of the command active bit for the ENDXFER command. */
+ uint32_t reserved_15_31 : 17;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_guctl2_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_guctl2 bdk_usbdrdx_uahc_guctl2_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GUCTL2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GUCTL2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c19cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GUCTL2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GUCTL2(a) bdk_usbdrdx_uahc_guctl2_t
+#define bustype_BDK_USBDRDX_UAHC_GUCTL2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GUCTL2(a) "USBDRDX_UAHC_GUCTL2"
+#define device_bar_BDK_USBDRDX_UAHC_GUCTL2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GUCTL2(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GUCTL2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_guid
+ *
+ * USB UAHC Core User ID Register
+ * This is a read/write register containing the User ID. The power-on value for this register is
+ * specified as the user identification register. This register can be used in the following
+ * ways:
+ * * To store the version or revision of your system.
+ * * To store hardware configurations that are outside of the core.
+ * * As a scratch register.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v3.10a, section 6.2.11.
+ */
+union bdk_usbdrdx_uahc_guid
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_guid_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t userid : 32; /**< [ 31: 0](R/W) User ID. Application-programmable ID field. */
+#else /* Word 0 - Little Endian */
+ uint32_t userid : 32; /**< [ 31: 0](R/W) User ID. Application-programmable ID field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_guid_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_guid bdk_usbdrdx_uahc_guid_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GUID(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GUID(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000c128ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000c128ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000c128ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_GUID", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GUID(a) bdk_usbdrdx_uahc_guid_t
+#define bustype_BDK_USBDRDX_UAHC_GUID(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GUID(a) "USBDRDX_UAHC_GUID"
+#define device_bar_BDK_USBDRDX_UAHC_GUID(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GUID(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_GUID(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gusb2i2cctl#
+ *
+ * USB UAHC USB2 I2C Control Register
+ * This register is reserved for future use.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.5.2.
+ */
+union bdk_usbdrdx_uahc_gusb2i2cctlx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gusb2i2cctlx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gusb2i2cctlx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_gusb2i2cctlx bdk_usbdrdx_uahc_gusb2i2cctlx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GUSB2I2CCTLX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GUSB2I2CCTLX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x86800000c240ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x86800000c240ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x86800000c240ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_GUSB2I2CCTLX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GUSB2I2CCTLX(a,b) bdk_usbdrdx_uahc_gusb2i2cctlx_t
+#define bustype_BDK_USBDRDX_UAHC_GUSB2I2CCTLX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GUSB2I2CCTLX(a,b) "USBDRDX_UAHC_GUSB2I2CCTLX"
+#define device_bar_BDK_USBDRDX_UAHC_GUSB2I2CCTLX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GUSB2I2CCTLX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_GUSB2I2CCTLX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gusb2phycfg#
+ *
+ * USB UAHC USB2 PHY-Configuration Register
+ * This register is used to configure the core after power-on. It contains USB 2.0 and USB 2.0
+ * PHY-related configuration parameters. The application must program this register before
+ * starting any transactions on either the SoC bus or the USB. Per-port registers are
+ * implemented.
+ *
+ * Do not make changes to this register after the initial programming.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.5.1.
+ */
+union bdk_usbdrdx_uahc_gusb2phycfgx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gusb2phycfgx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) PHY soft reset. Causes the usb2phy_reset signal to be asserted to reset a UTMI PHY. */
+ uint32_t u2_freeclk_exists : 1; /**< [ 30: 30](R/W) Specifies whether your USB 2.0 PHY provides a free-running PHY clock, which is active when
+ the clock control input is active. If your USB 2.0 PHY provides a free-running PHY clock,
+ it must be connected to the utmi_clk[0] input. The remaining utmi_clk[n] must be connected
+ to the respective port clocks. The core uses the Port-0 clock for generating the internal
+ mac2 clock.
+ 0 = USB 2.0 free clock does not exist.
+ 1 = USB 2.0 free clock exists.
+
+ This field must be set to zero if you enable ITP generation based on the REF_CLK
+ counter, USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1, or USBDRD()_UAHC_GFLADJ
+ [GFLADJ_REFCLK_LPM_SEL] =
+ 1. */
+ uint32_t ulpi_lpm_with_opmode_chk : 1;/**< [ 29: 29](R/W) Support the LPM over ULPI without NOPID token to the ULPI PHY. Always 0x0. */
+ uint32_t hsic_con_width_adj : 2; /**< [ 28: 27](RO) This bit is used in the HSIC device mode of operation. Always 0x0 */
+ uint32_t inv_sel_hsic : 1; /**< [ 26: 26](RO) The application driver uses this bit to control the HSIC enable/disable function. */
+ uint32_t reserved_19_25 : 7;
+ uint32_t ulpiextvbusindicator : 1; /**< [ 18: 18](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusdrv : 1; /**< [ 17: 17](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiclksusm : 1; /**< [ 16: 16](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiautores : 1; /**< [ 15: 15](R/W) Reserved (unused in this configuration). */
+ uint32_t reserved_14 : 1;
+ uint32_t usbtrdtim : 4; /**< [ 13: 10](R/W) USB 2.0 turnaround time. Sets the turnaround time in PHY clock cycles. Specifies the
+ response time for a MAC request to the packet FIFO controller (PFC) to fetch data from the
+ DFIFO (SPRAM).
+ USB turnaround time is a critical certification criteria when using long cables and five
+ hub levels.
+ When the MAC interface is 8-bit UTMI+/ULPI, the required values for this field is 0x9. */
+ uint32_t xcvrdly : 1; /**< [ 9: 9](R/W) Transceiver delay.
+ Enables a delay between the assertion of the UTMI transceiver select signal (for
+ high-speed) and the assertion of the TxValid signal during a high-speed chirp.
+ When this bit is set to 1, a delay of approximately 2.5 us is introduced from
+ the time when the transceiver select is set to 0x0, to the time when the TxValid
+ is driven to 0 for sending the chirp-K. This delay is required for some UTMI PHYs.
+ This bit is only valid in device mode. */
+ uint32_t enblslpm : 1; /**< [ 8: 8](R/W) Enable utmi_sleep_n and utmi_l1_suspend_n. The application uses this field to control
+ utmi_sleep_n and utmi_l1_suspend_n assertion to the PHY in the L1 state.
+ 0 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is not transferred to the
+ external PHY.
+ 1 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is transferred to the
+ external PHY.
+
+ When hardware LPM is enabled, this bit should be set high for Port0. */
+ uint32_t physel : 1; /**< [ 7: 7](WO) USB 2.0 high-speed PHY or USB 1.1 full-speed serial transceiver select. */
+ uint32_t susphy : 1; /**< [ 6: 6](R/W) Suspend USB2.0 high-speed/full-speed/low-speed PHY. When set, USB2.0 PHY enters suspend
+ mode if suspend conditions are valid. */
+ uint32_t fsintf : 1; /**< [ 5: 5](RO) Full-speed serial-interface select. Always reads as 0x0. */
+ uint32_t ulpi_utmi_sel : 1; /**< [ 4: 4](RO) ULPI or UTMI+ select. Always reads as 0x0, indicating UTMI+. */
+ uint32_t phyif : 1; /**< [ 3: 3](R/W) PHY interface width: 1 = 16-bit, 0 = 8-bit.
+ All the enabled 2.0 ports should have the same clock frequency as Port0 clock frequency
+ (utmi_clk[0]).
+ The UTMI 8-bit and 16-bit modes cannot be used together for different ports at the same
+ time (i.e., all the ports should be in 8-bit mode, or all of them should be in 16-bit
+ mode). */
+ uint32_t toutcal : 3; /**< [ 2: 0](R/W) High-speed/full-speed timeout calibration.
+ The number of PHY clock cycles, as indicated by the application in this field, is
+ multiplied by a bit-time factor; this factor is added to the high-speed/full-speed
+ interpacket timeout duration in the core to account for additional delays introduced by
+ the PHY. This might be required, since the delay introduced by the PHY in generating the
+ linestate condition can vary among PHYs.
+
+ The USB standard timeout value for high-speed operation is 736 to 816 (inclusive) bit
+ times. The USB standard timeout value for full-speed operation is 16 to 18 (inclusive) bit
+ times. The application must program this field based on the speed of connection.
+
+ The number of bit times added per PHY clock are:
+ * High-speed operation:
+ _ one 30-MHz PHY clock = 16 bit times.
+ _ one 60-MHz PHY clock = 8 bit times.
+
+ * Full-speed operation:
+ _ one 30-MHz PHY clock = 0.4 bit times.
+ _ one 60-MHz PHY clock = 0.2 bit times.
+ _ one 48-MHz PHY clock = 0.25 bit times. */
+#else /* Word 0 - Little Endian */
+ uint32_t toutcal : 3; /**< [ 2: 0](R/W) High-speed/full-speed timeout calibration.
+ The number of PHY clock cycles, as indicated by the application in this field, is
+ multiplied by a bit-time factor; this factor is added to the high-speed/full-speed
+ interpacket timeout duration in the core to account for additional delays introduced by
+ the PHY. This might be required, since the delay introduced by the PHY in generating the
+ linestate condition can vary among PHYs.
+
+ The USB standard timeout value for high-speed operation is 736 to 816 (inclusive) bit
+ times. The USB standard timeout value for full-speed operation is 16 to 18 (inclusive) bit
+ times. The application must program this field based on the speed of connection.
+
+ The number of bit times added per PHY clock are:
+ * High-speed operation:
+ _ one 30-MHz PHY clock = 16 bit times.
+ _ one 60-MHz PHY clock = 8 bit times.
+
+ * Full-speed operation:
+ _ one 30-MHz PHY clock = 0.4 bit times.
+ _ one 60-MHz PHY clock = 0.2 bit times.
+ _ one 48-MHz PHY clock = 0.25 bit times. */
+ uint32_t phyif : 1; /**< [ 3: 3](R/W) PHY interface width: 1 = 16-bit, 0 = 8-bit.
+ All the enabled 2.0 ports should have the same clock frequency as Port0 clock frequency
+ (utmi_clk[0]).
+ The UTMI 8-bit and 16-bit modes cannot be used together for different ports at the same
+ time (i.e., all the ports should be in 8-bit mode, or all of them should be in 16-bit
+ mode). */
+ uint32_t ulpi_utmi_sel : 1; /**< [ 4: 4](RO) ULPI or UTMI+ select. Always reads as 0x0, indicating UTMI+. */
+ uint32_t fsintf : 1; /**< [ 5: 5](RO) Full-speed serial-interface select. Always reads as 0x0. */
+ uint32_t susphy : 1; /**< [ 6: 6](R/W) Suspend USB2.0 high-speed/full-speed/low-speed PHY. When set, USB2.0 PHY enters suspend
+ mode if suspend conditions are valid. */
+ uint32_t physel : 1; /**< [ 7: 7](WO) USB 2.0 high-speed PHY or USB 1.1 full-speed serial transceiver select. */
+ uint32_t enblslpm : 1; /**< [ 8: 8](R/W) Enable utmi_sleep_n and utmi_l1_suspend_n. The application uses this field to control
+ utmi_sleep_n and utmi_l1_suspend_n assertion to the PHY in the L1 state.
+ 0 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is not transferred to the
+ external PHY.
+ 1 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is transferred to the
+ external PHY.
+
+ When hardware LPM is enabled, this bit should be set high for Port0. */
+ uint32_t xcvrdly : 1; /**< [ 9: 9](R/W) Transceiver delay.
+ Enables a delay between the assertion of the UTMI transceiver select signal (for
+ high-speed) and the assertion of the TxValid signal during a high-speed chirp.
+ When this bit is set to 1, a delay of approximately 2.5 us is introduced from
+ the time when the transceiver select is set to 0x0, to the time when the TxValid
+ is driven to 0 for sending the chirp-K. This delay is required for some UTMI PHYs.
+ This bit is only valid in device mode. */
+ uint32_t usbtrdtim : 4; /**< [ 13: 10](R/W) USB 2.0 turnaround time. Sets the turnaround time in PHY clock cycles. Specifies the
+ response time for a MAC request to the packet FIFO controller (PFC) to fetch data from the
+ DFIFO (SPRAM).
+ USB turnaround time is a critical certification criteria when using long cables and five
+ hub levels.
+ When the MAC interface is 8-bit UTMI+/ULPI, the required values for this field is 0x9. */
+ uint32_t reserved_14 : 1;
+ uint32_t ulpiautores : 1; /**< [ 15: 15](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiclksusm : 1; /**< [ 16: 16](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusdrv : 1; /**< [ 17: 17](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusindicator : 1; /**< [ 18: 18](R/W) Reserved (unused in this configuration). */
+ uint32_t reserved_19_25 : 7;
+ uint32_t inv_sel_hsic : 1; /**< [ 26: 26](RO) The application driver uses this bit to control the HSIC enable/disable function. */
+ uint32_t hsic_con_width_adj : 2; /**< [ 28: 27](RO) This bit is used in the HSIC device mode of operation. Always 0x0 */
+ uint32_t ulpi_lpm_with_opmode_chk : 1;/**< [ 29: 29](R/W) Support the LPM over ULPI without NOPID token to the ULPI PHY. Always 0x0. */
+ uint32_t u2_freeclk_exists : 1; /**< [ 30: 30](R/W) Specifies whether your USB 2.0 PHY provides a free-running PHY clock, which is active when
+ the clock control input is active. If your USB 2.0 PHY provides a free-running PHY clock,
+ it must be connected to the utmi_clk[0] input. The remaining utmi_clk[n] must be connected
+ to the respective port clocks. The core uses the Port-0 clock for generating the internal
+ mac2 clock.
+ 0 = USB 2.0 free clock does not exist.
+ 1 = USB 2.0 free clock exists.
+
+ This field must be set to zero if you enable ITP generation based on the REF_CLK
+ counter, USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1, or USBDRD()_UAHC_GFLADJ
+ [GFLADJ_REFCLK_LPM_SEL] =
+ 1. */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) PHY soft reset. Causes the usb2phy_reset signal to be asserted to reset a UTMI PHY. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uahc_gusb2phycfgx_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) PHY soft reset. Causes the usb2phy_reset signal to be asserted to reset a UTMI PHY. */
+ uint32_t u2_freeclk_exists : 1; /**< [ 30: 30](R/W) Specifies whether your USB 2.0 PHY provides a free-running PHY clock, which is active when
+ the clock control input is active. If your USB 2.0 PHY provides a free-running PHY clock,
+ it must be connected to the utmi_clk[0] input. The remaining utmi_clk[n] must be connected
+ to the respective port clocks. The core uses the Port-0 clock for generating the internal
+ mac2 clock.
+ 0 = USB 2.0 free clock does not exist.
+ 1 = USB 2.0 free clock exists.
+
+ This field must be set to zero if you enable ITP generation based on the REF_CLK
+ counter, USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1, or USBDRD()_UAHC_GFLADJ
+ [GFLADJ_REFCLK_LPM_SEL] =
+ 1. */
+ uint32_t ulpi_lpm_with_opmode_chk : 1;/**< [ 29: 29](R/W) Support the LPM over ULPI without NOPID token to the ULPI PHY. Always 0x0. */
+ uint32_t hsic_con_width_adj : 2; /**< [ 28: 27](RO) This bit is used in the HSIC device mode of operation. Always 0x0 */
+ uint32_t inv_sel_hsic : 1; /**< [ 26: 26](RO) The application driver uses this bit to control the HSIC enable/disable function. */
+ uint32_t reserved_25 : 1;
+ uint32_t reserved_19_24 : 6;
+ uint32_t ulpiextvbusindicator : 1; /**< [ 18: 18](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusdrv : 1; /**< [ 17: 17](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiclksusm : 1; /**< [ 16: 16](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiautores : 1; /**< [ 15: 15](R/W) Reserved (unused in this configuration). */
+ uint32_t reserved_14 : 1;
+ uint32_t usbtrdtim : 4; /**< [ 13: 10](R/W) USB 2.0 turnaround time. Sets the turnaround time in PHY clock cycles. Specifies the
+ response time for a MAC request to the packet FIFO controller (PFC) to fetch data from the
+ DFIFO (SPRAM).
+ USB turnaround time is a critical certification criteria when using long cables and five
+ hub levels.
+ When the MAC interface is 8-bit UTMI+/ULPI, the required values for this field is 0x9. */
+ uint32_t xcvrdly : 1; /**< [ 9: 9](R/W) Transceiver delay.
+ Enables a delay between the assertion of the UTMI transceiver select signal (for
+ high-speed) and the assertion of the TxValid signal during a high-speed chirp.
+ When this bit is set to 1, a delay of approximately 2.5 us is introduced from
+ the time when the transceiver select is set to 0x0, to the time when the TxValid
+ is driven to 0 for sending the chirp-K. This delay is required for some UTMI PHYs.
+ This bit is only valid in device mode. */
+ uint32_t enblslpm : 1; /**< [ 8: 8](R/W) Enable utmi_sleep_n and utmi_l1_suspend_n. The application uses this field to control
+ utmi_sleep_n and utmi_l1_suspend_n assertion to the PHY in the L1 state.
+ 0 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is not transferred to the
+ external PHY.
+ 1 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is transferred to the
+ external PHY.
+
+ When hardware LPM is enabled, this bit should be set high for Port0. */
+ uint32_t physel : 1; /**< [ 7: 7](WO) USB 2.0 high-speed PHY or USB 1.1 full-speed serial transceiver select. */
+ uint32_t susphy : 1; /**< [ 6: 6](R/W) Suspend USB2.0 high-speed/full-speed/low-speed PHY. When set, USB2.0 PHY enters suspend
+ mode if suspend conditions are valid. */
+ uint32_t fsintf : 1; /**< [ 5: 5](RO) Full-speed serial-interface select. Always reads as 0x0. */
+ uint32_t ulpi_utmi_sel : 1; /**< [ 4: 4](RO) ULPI or UTMI+ select. Always reads as 0x0, indicating UTMI+. */
+ uint32_t phyif : 1; /**< [ 3: 3](R/W) PHY interface width: 1 = 16-bit, 0 = 8-bit.
+ All the enabled 2.0 ports should have the same clock frequency as Port0 clock frequency
+ (utmi_clk[0]).
+ The UTMI 8-bit and 16-bit modes cannot be used together for different ports at the same
+ time (i.e., all the ports should be in 8-bit mode, or all of them should be in 16-bit
+ mode). */
+ uint32_t toutcal : 3; /**< [ 2: 0](R/W) High-speed/full-speed timeout calibration.
+ The number of PHY clock cycles, as indicated by the application in this field, is
+ multiplied by a bit-time factor; this factor is added to the high-speed/full-speed
+ interpacket timeout duration in the core to account for additional delays introduced by
+ the PHY. This might be required, since the delay introduced by the PHY in generating the
+ linestate condition can vary among PHYs.
+
+ The USB standard timeout value for high-speed operation is 736 to 816 (inclusive) bit
+ times. The USB standard timeout value for full-speed operation is 16 to 18 (inclusive) bit
+ times. The application must program this field based on the speed of connection.
+
+ The number of bit times added per PHY clock are:
+ * High-speed operation:
+ _ one 30-MHz PHY clock = 16 bit times.
+ _ one 60-MHz PHY clock = 8 bit times.
+
+ * Full-speed operation:
+ _ one 30-MHz PHY clock = 0.4 bit times.
+ _ one 60-MHz PHY clock = 0.2 bit times.
+ _ one 48-MHz PHY clock = 0.25 bit times. */
+#else /* Word 0 - Little Endian */
+ uint32_t toutcal : 3; /**< [ 2: 0](R/W) High-speed/full-speed timeout calibration.
+ The number of PHY clock cycles, as indicated by the application in this field, is
+ multiplied by a bit-time factor; this factor is added to the high-speed/full-speed
+ interpacket timeout duration in the core to account for additional delays introduced by
+ the PHY. This might be required, since the delay introduced by the PHY in generating the
+ linestate condition can vary among PHYs.
+
+ The USB standard timeout value for high-speed operation is 736 to 816 (inclusive) bit
+ times. The USB standard timeout value for full-speed operation is 16 to 18 (inclusive) bit
+ times. The application must program this field based on the speed of connection.
+
+ The number of bit times added per PHY clock are:
+ * High-speed operation:
+ _ one 30-MHz PHY clock = 16 bit times.
+ _ one 60-MHz PHY clock = 8 bit times.
+
+ * Full-speed operation:
+ _ one 30-MHz PHY clock = 0.4 bit times.
+ _ one 60-MHz PHY clock = 0.2 bit times.
+ _ one 48-MHz PHY clock = 0.25 bit times. */
+ uint32_t phyif : 1; /**< [ 3: 3](R/W) PHY interface width: 1 = 16-bit, 0 = 8-bit.
+ All the enabled 2.0 ports should have the same clock frequency as Port0 clock frequency
+ (utmi_clk[0]).
+ The UTMI 8-bit and 16-bit modes cannot be used together for different ports at the same
+ time (i.e., all the ports should be in 8-bit mode, or all of them should be in 16-bit
+ mode). */
+ uint32_t ulpi_utmi_sel : 1; /**< [ 4: 4](RO) ULPI or UTMI+ select. Always reads as 0x0, indicating UTMI+. */
+ uint32_t fsintf : 1; /**< [ 5: 5](RO) Full-speed serial-interface select. Always reads as 0x0. */
+ uint32_t susphy : 1; /**< [ 6: 6](R/W) Suspend USB2.0 high-speed/full-speed/low-speed PHY. When set, USB2.0 PHY enters suspend
+ mode if suspend conditions are valid. */
+ uint32_t physel : 1; /**< [ 7: 7](WO) USB 2.0 high-speed PHY or USB 1.1 full-speed serial transceiver select. */
+ uint32_t enblslpm : 1; /**< [ 8: 8](R/W) Enable utmi_sleep_n and utmi_l1_suspend_n. The application uses this field to control
+ utmi_sleep_n and utmi_l1_suspend_n assertion to the PHY in the L1 state.
+ 0 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is not transferred to the
+ external PHY.
+ 1 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is transferred to the
+ external PHY.
+
+ When hardware LPM is enabled, this bit should be set high for Port0. */
+ uint32_t xcvrdly : 1; /**< [ 9: 9](R/W) Transceiver delay.
+ Enables a delay between the assertion of the UTMI transceiver select signal (for
+ high-speed) and the assertion of the TxValid signal during a high-speed chirp.
+ When this bit is set to 1, a delay of approximately 2.5 us is introduced from
+ the time when the transceiver select is set to 0x0, to the time when the TxValid
+ is driven to 0 for sending the chirp-K. This delay is required for some UTMI PHYs.
+ This bit is only valid in device mode. */
+ uint32_t usbtrdtim : 4; /**< [ 13: 10](R/W) USB 2.0 turnaround time. Sets the turnaround time in PHY clock cycles. Specifies the
+ response time for a MAC request to the packet FIFO controller (PFC) to fetch data from the
+ DFIFO (SPRAM).
+ USB turnaround time is a critical certification criteria when using long cables and five
+ hub levels.
+ When the MAC interface is 8-bit UTMI+/ULPI, the required values for this field is 0x9. */
+ uint32_t reserved_14 : 1;
+ uint32_t ulpiautores : 1; /**< [ 15: 15](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiclksusm : 1; /**< [ 16: 16](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusdrv : 1; /**< [ 17: 17](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusindicator : 1; /**< [ 18: 18](R/W) Reserved (unused in this configuration). */
+ uint32_t reserved_19_24 : 6;
+ uint32_t reserved_25 : 1;
+ uint32_t inv_sel_hsic : 1; /**< [ 26: 26](RO) The application driver uses this bit to control the HSIC enable/disable function. */
+ uint32_t hsic_con_width_adj : 2; /**< [ 28: 27](RO) This bit is used in the HSIC device mode of operation. Always 0x0 */
+ uint32_t ulpi_lpm_with_opmode_chk : 1;/**< [ 29: 29](R/W) Support the LPM over ULPI without NOPID token to the ULPI PHY. Always 0x0. */
+ uint32_t u2_freeclk_exists : 1; /**< [ 30: 30](R/W) Specifies whether your USB 2.0 PHY provides a free-running PHY clock, which is active when
+ the clock control input is active. If your USB 2.0 PHY provides a free-running PHY clock,
+ it must be connected to the utmi_clk[0] input. The remaining utmi_clk[n] must be connected
+ to the respective port clocks. The core uses the Port-0 clock for generating the internal
+ mac2 clock.
+ 0 = USB 2.0 free clock does not exist.
+ 1 = USB 2.0 free clock exists.
+
+ This field must be set to zero if you enable ITP generation based on the REF_CLK
+ counter, USBDRD()_UAHC_GCTL[SOFITPSYNC] = 1, or USBDRD()_UAHC_GFLADJ
+ [GFLADJ_REFCLK_LPM_SEL] =
+ 1. */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) PHY soft reset. Causes the usb2phy_reset signal to be asserted to reset a UTMI PHY. */
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_usbdrdx_uahc_gusb2phycfgx bdk_usbdrdx_uahc_gusb2phycfgx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GUSB2PHYCFGX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GUSB2PHYCFGX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x86800000c200ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x86800000c200ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x86800000c200ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_GUSB2PHYCFGX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GUSB2PHYCFGX(a,b) bdk_usbdrdx_uahc_gusb2phycfgx_t
+#define bustype_BDK_USBDRDX_UAHC_GUSB2PHYCFGX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GUSB2PHYCFGX(a,b) "USBDRDX_UAHC_GUSB2PHYCFGX"
+#define device_bar_BDK_USBDRDX_UAHC_GUSB2PHYCFGX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GUSB2PHYCFGX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_GUSB2PHYCFGX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_gusb3pipectl#
+ *
+ * USB UAHC USB3 Pipe-Control Register
+ * This register is used to configure the core after power-on. It contains USB 3.0 and USB 3.0
+ * PHY-related configuration parameters. The application must program this register before
+ * starting any transactions on either the SoC bus or the USB. Per-port registers are
+ * implemented.
+ *
+ * Do not make changes to this register after the initial programming.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.5.4.
+ */
+union bdk_usbdrdx_uahc_gusb3pipectlx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_gusb3pipectlx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) USB3 PHY soft reset (PHYSoftRst). When set to 1, initiates a PHY soft reset. After setting
+ this bit to 1, the software needs to clear this bit. */
+ uint32_t hstprtcmpl : 1; /**< [ 30: 30](R/W) Host port compliance. Setting this bit to 1 enables placing the SuperSpeed port link into
+ a compliance state, which allows testing of the PIPE PHY compliance patterns without
+ having to have a test fixture on the USB 3.0 cable. By default, this bit should be set to
+ 0.
+
+ In compliance-lab testing, the SuperSpeed port link enters compliance after failing the
+ first polling sequence after power on. Set this bit to 0 when you run compliance tests.
+
+ The sequence for using this functionality is as follows:
+ * Disconnect any plugged-in devices.
+ * Set USBDRD()_UAHC_USBCMD[HCRST] = 1 or power-on-chip reset.
+ * Set USBDRD()_UAHC_PORTSC()[PP] = 0.
+ * Set HSTPRTCMPL = 1. This places the link into compliance state.
+
+ To advance the compliance pattern, follow this sequence (toggle HSTPRTCMPL):
+ * Set HSTPRTCMPL = 0.
+ * Set HSTPRTCMPL = 1. This advances the link to the next compliance pattern.
+
+ To exit from the compliance state, set USBDRD()_UAHC_USBCMD[HCRST] = 1 or power-on-chip
+ reset. */
+ uint32_t u2ssinactp3ok : 1; /**< [ 29: 29](R/W) P3 OK for U2/SS.Inactive:
+ 0 = During link state U2/SS.Inactive, put PHY in P2 (default).
+ 1 = During link state U2/SS.Inactive, put PHY in P3. */
+ uint32_t disrxdetp3 : 1; /**< [ 28: 28](R/W) Disables receiver detection in P3. If PHY is in P3 and the core needs to perform receiver
+ detection:
+ 0 = Core performs receiver detection in P3 (default).
+ 1 = Core changes the PHY power state to P2 and then performs receiver detection. After
+ receiver detection, core changes PHY power state to P3. */
+ uint32_t ux_exit_in_px : 1; /**< [ 27: 27](R/W) UX exit in Px:
+ 0 = Core does U1/U2/U3 exit in PHY power state P0 (default behavior).
+ 1 = Core does U1/U2/U3 exit in PHY power state P1/P2/P3 respectively.
+
+ This bit is added for SuperSpeed PHY workaround where SuperSpeed PHY injects a glitch on
+ pipe3_RxElecIdle while receiving Ux exit LFPS, and pipe3_PowerDown change is in progress.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t ping_enchance_en : 1; /**< [ 26: 26](R/W) Ping enhancement enable. When set to 1, the downstream-port U1-ping-receive timeout
+ becomes 500 ms instead of 300 ms. Minimum Ping.LFPS receive duration is 8 ns (one mac3_clk
+ cycle). This field is valid for the downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t u1u2exitfail_to_recov : 1; /**< [ 25: 25](R/W) U1U2exit fail to recovery. When set to 1, and U1/U2 LFPS handshake fails, the LTSSM
+ transitions from U1/U2 to recovery instead of SS.inactive.
+ If recovery fails, then the LTSSM can enter SS.Inactive. This is an enhancement only. It
+ prevents interoperability issue if the remote link does not do the proper handshake. */
+ uint32_t request_p1p2p3 : 1; /**< [ 24: 24](R/W) Always request P1/P2/P3 for U1/U2/U3.
+ 0 = if immediate Ux exit (remotely initiated, or locally initiated) happens, the core does
+ not request P1/P2/P3 power state change.
+ 1 = the core always requests PHY power change from P0 to P1/P2/P3 during U0 to U1/U2/U3
+ transition.
+
+ Internal:
+ Note: This bit should be set to 1 for Synopsys PHY. For third-party SuperSpeed
+ PHY, check with your PHY vendor. */
+ uint32_t startrxdetu3rxdet : 1; /**< [ 23: 23](WO) Start receiver detection in U3/Rx.Detect.
+ If DISRXDETU3RXDET is set to 1 during reset, and the link is in U3 or Rx.Detect state, the
+ core starts receiver detection on rising edge of this bit.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t disrxdetu3rxdet : 1; /**< [ 22: 22](R/W) Disable receiver detection in U3/Rx.Detect. When set to 1, the core does not do receiver
+ detection in U3 or Rx.Detect state. If STARTRXDETU3RXDET is set to 1 during reset,
+ receiver detection starts manually.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t delaypx : 3; /**< [ 21: 19](R/W) Delay P1P2P3. Delay P0 to P1/P2/P3 request when entering U1/U2/U3 until (DELAYPX * 8)
+ 8B10B error occurs, or Pipe3_RxValid drops to 0.
+ DELAYPXTRANSENTERUX must reset to 1 to enable this functionality.
+
+ Internal:
+ Should always be 0x1 for a Synopsys PHY. */
+ uint32_t delaypxtransenterux : 1; /**< [ 18: 18](R/W) Delay PHY power change from P0 to P1/P2/P3 when link state changing from U0 to U1/U2/U3
+ respectively.
+ 0 = when entering U1/U2/U3, transition to P1/P2/P3 without checking for Pipe3_RxElecIlde
+ and pipe3_RxValid.
+ 1 = when entering U1/U2/U3, delay the transition to P1/P2/P3 until the pipe3 signals,
+ Pipe3_RxElecIlde is 1 and pipe3_RxValid is 0.
+
+ Internal:
+ Note: This bit should be set to 1 for Synopsys PHY. It is also used by third-
+ party SuperSpeed PHY. */
+ uint32_t suspend_en : 1; /**< [ 17: 17](R/W) Suspend USB3.0 SuperSpeed PHY (Suspend_en). When set to 1, and if suspend conditions are
+ valid, the USB 3.0 PHY enters suspend mode. */
+ uint32_t datwidth : 2; /**< [ 16: 15](RO) PIPE data width.
+ 0x0 = 32 bits.
+ 0x1 = 16 bits.
+ 0x2 = 8 bits.
+ 0x3 = reserved.
+
+ One clock cycle after reset, these bits receive the value seen on the pipe3_DataBusWidth.
+ This will always be 0x0.
+
+ Internal:
+ The simulation testbench uses the coreConsultant parameter to configure the VIP.
+ INTERNAL: These bits in the coreConsultant parameter should match your PHY data width and
+ the pipe3_DataBusWidth port. */
+ uint32_t abortrxdetinu2 : 1; /**< [ 14: 14](R/W) Abort RX Detect in U2. When set to 1, and the link state is U2, the core aborts receiver
+ detection if it receives U2 exit LFPS from the remote link partner.
+
+ This bit is for downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t skiprxdet : 1; /**< [ 13: 13](R/W) Skip RX detect. When set to 1, the core skips RX detection if pipe3_RxElecIdle is low.
+ Skip is defined as waiting for the appropriate timeout, then repeating the operation. */
+ uint32_t lfpsp0algn : 1; /**< [ 12: 12](R/W) LFPS P0 align. When set to 1:
+ * Core deasserts LFPS transmission on the clock edge that it requests PHY power state
+ 0 when exiting U1, U2, or U3 low power states. Otherwise, LFPS transmission is asserted
+ one clock earlier.
+ * Core requests symbol transmission two pipe3_rx_pclks periods after the PHY asserts
+ PhyStatus as a result of the PHY switching from P1 or P2 state to P0 state.
+ For USB 3.0 host, this is not required. */
+ uint32_t p3p2tranok : 1; /**< [ 11: 11](R/W) P3 P2 transitions OK.
+ 0 = P0 is always entered as an intermediate state during transitions between P2 and P3, as
+ defined in the PIPE3 specification.
+ 1 = the core transitions directly from PHY power state P2 to P3 or from state P3 to P2.
+
+ According to PIPE3 specification, any direct transition between P3 and P2 is illegal.
+
+ Internal:
+ This bit is used only for some non-Synopsys PHYs that cannot do LFPS in P3.
+ INTERNAL: Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t p3exsigp2 : 1; /**< [ 10: 10](R/W) P3 exit signal in P2. When set to 1, the core always changes the PHY power state to P2,
+ before attempting a U3 exit handshake.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t lfpsfilt : 1; /**< [ 9: 9](R/W) LFPS filter. When set to 1, filter LFPS reception with pipe3_RxValid in PHY power state
+ P0, ignore LFPS reception from the PHY unless both pipe3_Rxelecidle and pipe3_RxValid are
+ deasserted. */
+ uint32_t rxdet2polllfpsctrl : 1; /**< [ 8: 8](R/W) RX_DETECT to polling.
+ 0 = Enables a 400 us delay to start polling LFPS after RX_DETECT. This allows VCM offset
+ to settle to a proper level.
+ 1 = Disables the 400 us delay to start polling LFPS after RX_DETECT. */
+ uint32_t ssicen : 1; /**< [ 7: 7](R/W) SSIC is not supported. This bit must be set to 0. */
+ uint32_t txswing : 1; /**< [ 6: 6](R/W) TX swing. Refer to the PIPE3 specification. */
+ uint32_t txmargin : 3; /**< [ 5: 3](R/W) TX margin. Refer to the PIPE3 specification, table 5-3. */
+ uint32_t txdeemphasis : 2; /**< [ 2: 1](R/W) TX deemphasis. The value driven to the PHY is controlled by the LTSSM during USB3
+ compliance mode. Refer to the PIPE3 specification, table 5-3.
+
+ Use the following values for the appropriate level of de-emphasis (From pipe3 spec):
+ 0x0 = -6 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_6DB].
+ 0x1 = -3.5 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_3P5DB].
+ 0x2 = No de-emphasis.
+ 0x3 = Reserved. */
+ uint32_t elasticbuffermode : 1; /**< [ 0: 0](R/W) Elastic buffer mode. Refer to the PIPE3 specification, table 5-3. */
+#else /* Word 0 - Little Endian */
+ uint32_t elasticbuffermode : 1; /**< [ 0: 0](R/W) Elastic buffer mode. Refer to the PIPE3 specification, table 5-3. */
+ uint32_t txdeemphasis : 2; /**< [ 2: 1](R/W) TX deemphasis. The value driven to the PHY is controlled by the LTSSM during USB3
+ compliance mode. Refer to the PIPE3 specification, table 5-3.
+
+ Use the following values for the appropriate level of de-emphasis (From pipe3 spec):
+ 0x0 = -6 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_6DB].
+ 0x1 = -3.5 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_3P5DB].
+ 0x2 = No de-emphasis.
+ 0x3 = Reserved. */
+ uint32_t txmargin : 3; /**< [ 5: 3](R/W) TX margin. Refer to the PIPE3 specification, table 5-3. */
+ uint32_t txswing : 1; /**< [ 6: 6](R/W) TX swing. Refer to the PIPE3 specification. */
+ uint32_t ssicen : 1; /**< [ 7: 7](R/W) SSIC is not supported. This bit must be set to 0. */
+ uint32_t rxdet2polllfpsctrl : 1; /**< [ 8: 8](R/W) RX_DETECT to polling.
+ 0 = Enables a 400 us delay to start polling LFPS after RX_DETECT. This allows VCM offset
+ to settle to a proper level.
+ 1 = Disables the 400 us delay to start polling LFPS after RX_DETECT. */
+ uint32_t lfpsfilt : 1; /**< [ 9: 9](R/W) LFPS filter. When set to 1, filter LFPS reception with pipe3_RxValid in PHY power state
+ P0, ignore LFPS reception from the PHY unless both pipe3_Rxelecidle and pipe3_RxValid are
+ deasserted. */
+ uint32_t p3exsigp2 : 1; /**< [ 10: 10](R/W) P3 exit signal in P2. When set to 1, the core always changes the PHY power state to P2,
+ before attempting a U3 exit handshake.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t p3p2tranok : 1; /**< [ 11: 11](R/W) P3 P2 transitions OK.
+ 0 = P0 is always entered as an intermediate state during transitions between P2 and P3, as
+ defined in the PIPE3 specification.
+ 1 = the core transitions directly from PHY power state P2 to P3 or from state P3 to P2.
+
+ According to PIPE3 specification, any direct transition between P3 and P2 is illegal.
+
+ Internal:
+ This bit is used only for some non-Synopsys PHYs that cannot do LFPS in P3.
+ INTERNAL: Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t lfpsp0algn : 1; /**< [ 12: 12](R/W) LFPS P0 align. When set to 1:
+ * Core deasserts LFPS transmission on the clock edge that it requests PHY power state
+ 0 when exiting U1, U2, or U3 low power states. Otherwise, LFPS transmission is asserted
+ one clock earlier.
+ * Core requests symbol transmission two pipe3_rx_pclks periods after the PHY asserts
+ PhyStatus as a result of the PHY switching from P1 or P2 state to P0 state.
+ For USB 3.0 host, this is not required. */
+ uint32_t skiprxdet : 1; /**< [ 13: 13](R/W) Skip RX detect. When set to 1, the core skips RX detection if pipe3_RxElecIdle is low.
+ Skip is defined as waiting for the appropriate timeout, then repeating the operation. */
+ uint32_t abortrxdetinu2 : 1; /**< [ 14: 14](R/W) Abort RX Detect in U2. When set to 1, and the link state is U2, the core aborts receiver
+ detection if it receives U2 exit LFPS from the remote link partner.
+
+ This bit is for downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t datwidth : 2; /**< [ 16: 15](RO) PIPE data width.
+ 0x0 = 32 bits.
+ 0x1 = 16 bits.
+ 0x2 = 8 bits.
+ 0x3 = reserved.
+
+ One clock cycle after reset, these bits receive the value seen on the pipe3_DataBusWidth.
+ This will always be 0x0.
+
+ Internal:
+ The simulation testbench uses the coreConsultant parameter to configure the VIP.
+ INTERNAL: These bits in the coreConsultant parameter should match your PHY data width and
+ the pipe3_DataBusWidth port. */
+ uint32_t suspend_en : 1; /**< [ 17: 17](R/W) Suspend USB3.0 SuperSpeed PHY (Suspend_en). When set to 1, and if suspend conditions are
+ valid, the USB 3.0 PHY enters suspend mode. */
+ uint32_t delaypxtransenterux : 1; /**< [ 18: 18](R/W) Delay PHY power change from P0 to P1/P2/P3 when link state changing from U0 to U1/U2/U3
+ respectively.
+ 0 = when entering U1/U2/U3, transition to P1/P2/P3 without checking for Pipe3_RxElecIlde
+ and pipe3_RxValid.
+ 1 = when entering U1/U2/U3, delay the transition to P1/P2/P3 until the pipe3 signals,
+ Pipe3_RxElecIlde is 1 and pipe3_RxValid is 0.
+
+ Internal:
+ Note: This bit should be set to 1 for Synopsys PHY. It is also used by third-
+ party SuperSpeed PHY. */
+ uint32_t delaypx : 3; /**< [ 21: 19](R/W) Delay P1P2P3. Delay P0 to P1/P2/P3 request when entering U1/U2/U3 until (DELAYPX * 8)
+ 8B10B error occurs, or Pipe3_RxValid drops to 0.
+ DELAYPXTRANSENTERUX must reset to 1 to enable this functionality.
+
+ Internal:
+ Should always be 0x1 for a Synopsys PHY. */
+ uint32_t disrxdetu3rxdet : 1; /**< [ 22: 22](R/W) Disable receiver detection in U3/Rx.Detect. When set to 1, the core does not do receiver
+ detection in U3 or Rx.Detect state. If STARTRXDETU3RXDET is set to 1 during reset,
+ receiver detection starts manually.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t startrxdetu3rxdet : 1; /**< [ 23: 23](WO) Start receiver detection in U3/Rx.Detect.
+ If DISRXDETU3RXDET is set to 1 during reset, and the link is in U3 or Rx.Detect state, the
+ core starts receiver detection on rising edge of this bit.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t request_p1p2p3 : 1; /**< [ 24: 24](R/W) Always request P1/P2/P3 for U1/U2/U3.
+ 0 = if immediate Ux exit (remotely initiated, or locally initiated) happens, the core does
+ not request P1/P2/P3 power state change.
+ 1 = the core always requests PHY power change from P0 to P1/P2/P3 during U0 to U1/U2/U3
+ transition.
+
+ Internal:
+ Note: This bit should be set to 1 for Synopsys PHY. For third-party SuperSpeed
+ PHY, check with your PHY vendor. */
+ uint32_t u1u2exitfail_to_recov : 1; /**< [ 25: 25](R/W) U1U2exit fail to recovery. When set to 1, and U1/U2 LFPS handshake fails, the LTSSM
+ transitions from U1/U2 to recovery instead of SS.inactive.
+ If recovery fails, then the LTSSM can enter SS.Inactive. This is an enhancement only. It
+ prevents interoperability issue if the remote link does not do the proper handshake. */
+ uint32_t ping_enchance_en : 1; /**< [ 26: 26](R/W) Ping enhancement enable. When set to 1, the downstream-port U1-ping-receive timeout
+ becomes 500 ms instead of 300 ms. Minimum Ping.LFPS receive duration is 8 ns (one mac3_clk
+ cycle). This field is valid for the downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t ux_exit_in_px : 1; /**< [ 27: 27](R/W) UX exit in Px:
+ 0 = Core does U1/U2/U3 exit in PHY power state P0 (default behavior).
+ 1 = Core does U1/U2/U3 exit in PHY power state P1/P2/P3 respectively.
+
+ This bit is added for SuperSpeed PHY workaround where SuperSpeed PHY injects a glitch on
+ pipe3_RxElecIdle while receiving Ux exit LFPS, and pipe3_PowerDown change is in progress.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t disrxdetp3 : 1; /**< [ 28: 28](R/W) Disables receiver detection in P3. If PHY is in P3 and the core needs to perform receiver
+ detection:
+ 0 = Core performs receiver detection in P3 (default).
+ 1 = Core changes the PHY power state to P2 and then performs receiver detection. After
+ receiver detection, core changes PHY power state to P3. */
+ uint32_t u2ssinactp3ok : 1; /**< [ 29: 29](R/W) P3 OK for U2/SS.Inactive:
+ 0 = During link state U2/SS.Inactive, put PHY in P2 (default).
+ 1 = During link state U2/SS.Inactive, put PHY in P3. */
+ uint32_t hstprtcmpl : 1; /**< [ 30: 30](R/W) Host port compliance. Setting this bit to 1 enables placing the SuperSpeed port link into
+ a compliance state, which allows testing of the PIPE PHY compliance patterns without
+ having to have a test fixture on the USB 3.0 cable. By default, this bit should be set to
+ 0.
+
+ In compliance-lab testing, the SuperSpeed port link enters compliance after failing the
+ first polling sequence after power on. Set this bit to 0 when you run compliance tests.
+
+ The sequence for using this functionality is as follows:
+ * Disconnect any plugged-in devices.
+ * Set USBDRD()_UAHC_USBCMD[HCRST] = 1 or power-on-chip reset.
+ * Set USBDRD()_UAHC_PORTSC()[PP] = 0.
+ * Set HSTPRTCMPL = 1. This places the link into compliance state.
+
+ To advance the compliance pattern, follow this sequence (toggle HSTPRTCMPL):
+ * Set HSTPRTCMPL = 0.
+ * Set HSTPRTCMPL = 1. This advances the link to the next compliance pattern.
+
+ To exit from the compliance state, set USBDRD()_UAHC_USBCMD[HCRST] = 1 or power-on-chip
+ reset. */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) USB3 PHY soft reset (PHYSoftRst). When set to 1, initiates a PHY soft reset. After setting
+ this bit to 1, the software needs to clear this bit. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_gusb3pipectlx_s cn81xx; */
+ struct bdk_usbdrdx_uahc_gusb3pipectlx_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) USB3 PHY soft reset (PHYSoftRst). When set to 1, initiates a PHY soft reset. After setting
+ this bit to 1, the software needs to clear this bit. */
+ uint32_t hstprtcmpl : 1; /**< [ 30: 30](R/W) Host port compliance. Setting this bit to 1 enables placing the SuperSpeed port link into
+ a compliance state, which allows testing of the PIPE PHY compliance patterns without
+ having to have a test fixture on the USB 3.0 cable. By default, this bit should be set to
+ 0.
+
+ In compliance-lab testing, the SuperSpeed port link enters compliance after failing the
+ first polling sequence after power on. Set this bit to 0 when you run compliance tests.
+
+ The sequence for using this functionality is as follows:
+ * Disconnect any plugged-in devices.
+ * Set USBDRD()_UAHC_USBCMD[HCRST] = 1 or power-on-chip reset.
+ * Set USBDRD()_UAHC_PORTSC()[PP] = 0.
+ * Set HSTPRTCMPL = 1. This places the link into compliance state.
+
+ To advance the compliance pattern, follow this sequence (toggle HSTPRTCMPL):
+ * Set HSTPRTCMPL = 0.
+ * Set HSTPRTCMPL = 1. This advances the link to the next compliance pattern.
+
+ To exit from the compliance state, set USBDRD()_UAHC_USBCMD[HCRST] = 1 or power-on-chip
+ reset. */
+ uint32_t u2ssinactp3ok : 1; /**< [ 29: 29](R/W) P3 OK for U2/SS.Inactive:
+ 0 = During link state U2/SS.Inactive, put PHY in P2 (default).
+ 1 = During link state U2/SS.Inactive, put PHY in P3. */
+ uint32_t disrxdetp3 : 1; /**< [ 28: 28](R/W) Disables receiver detection in P3. If PHY is in P3 and the core needs to perform receiver
+ detection:
+ 0 = Core performs receiver detection in P3 (default).
+ 1 = Core changes the PHY power state to P2 and then performs receiver detection. After
+ receiver detection, core changes PHY power state to P3. */
+ uint32_t ux_exit_in_px : 1; /**< [ 27: 27](R/W) UX exit in Px:
+ 0 = Core does U1/U2/U3 exit in PHY power state P0 (default behavior).
+ 1 = Core does U1/U2/U3 exit in PHY power state P1/P2/P3 respectively.
+
+ This bit is added for SuperSpeed PHY workaround where SuperSpeed PHY injects a glitch on
+ pipe3_RxElecIdle while receiving Ux exit LFPS, and pipe3_PowerDown change is in progress.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t ping_enchance_en : 1; /**< [ 26: 26](R/W) Ping enhancement enable. When set to 1, the downstream-port U1-ping-receive timeout
+ becomes 500 ms instead of 300 ms. Minimum Ping.LFPS receive duration is 8 ns (one mac3_clk
+ cycle). This field is valid for the downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t u1u2exitfail_to_recov : 1; /**< [ 25: 25](R/W) U1U2exit fail to recovery. When set to 1, and U1/U2 LFPS handshake fails, the LTSSM
+ transitions from U1/U2 to recovery instead of SS.inactive.
+ If recovery fails, then the LTSSM can enter SS.Inactive. This is an enhancement only. It
+ prevents interoperability issue if the remote link does not do the proper handshake. */
+ uint32_t request_p1p2p3 : 1; /**< [ 24: 24](R/W) Always request P1/P2/P3 for U1/U2/U3.
+ 0 = if immediate Ux exit (remotely initiated, or locally initiated) happens, the core does
+ not request P1/P2/P3 power state change.
+ 1 = the core always requests PHY power change from P0 to P1/P2/P3 during U0 to U1/U2/U3
+ transition.
+
+ Internal:
+ Note: This bit should be set to 1 for Synopsys PHY. For third-party SuperSpeed
+ PHY, check with your PHY vendor. */
+ uint32_t startrxdetu3rxdet : 1; /**< [ 23: 23](WO) Start receiver detection in U3/Rx.Detect.
+ If DISRXDETU3RXDET is set to 1 during reset, and the link is in U3 or Rx.Detect state, the
+ core starts receiver detection on rising edge of this bit.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t disrxdetu3rxdet : 1; /**< [ 22: 22](R/W) Disable receiver detection in U3/Rx.Detect. When set to 1, the core does not do receiver
+ detection in U3 or Rx.Detect state. If STARTRXDETU3RXDET is set to 1 during reset,
+ receiver detection starts manually.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t delaypx : 3; /**< [ 21: 19](R/W) Delay P1P2P3. Delay P0 to P1/P2/P3 request when entering U1/U2/U3 until (DELAYPX * 8)
+ 8B10B error occurs, or Pipe3_RxValid drops to 0.
+ DELAYPXTRANSENTERUX must reset to 1 to enable this functionality.
+
+ Internal:
+ Should always be 0x1 for a Synopsys PHY. */
+ uint32_t delaypxtransenterux : 1; /**< [ 18: 18](R/W) Delay PHY power change from P0 to P1/P2/P3 when link state changing from U0 to U1/U2/U3
+ respectively.
+ 0 = when entering U1/U2/U3, transition to P1/P2/P3 without checking for Pipe3_RxElecIlde
+ and pipe3_RxValid.
+ 1 = when entering U1/U2/U3, delay the transition to P1/P2/P3 until the pipe3 signals,
+ Pipe3_RxElecIlde is 1 and pipe3_RxValid is 0.
+
+ Internal:
+ Note: This bit should be set to 1 for Synopsys PHY. It is also used by third-
+ party SuperSpeed PHY. */
+ uint32_t suspend_en : 1; /**< [ 17: 17](R/W) Suspend USB3.0 SuperSpeed PHY (Suspend_en). When set to 1, and if suspend conditions are
+ valid, the USB 3.0 PHY enters suspend mode. */
+ uint32_t datwidth : 2; /**< [ 16: 15](RO) PIPE data width.
+ 0x0 = 32 bits.
+ 0x1 = 16 bits.
+ 0x2 = 8 bits.
+ 0x3 = reserved.
+
+ One clock cycle after reset, these bits receive the value seen on the pipe3_DataBusWidth.
+ This will always be 0x0.
+
+ Internal:
+ The simulation testbench uses the coreConsultant parameter to configure the VIP.
+ INTERNAL: These bits in the coreConsultant parameter should match your PHY data width and
+ the pipe3_DataBusWidth port. */
+ uint32_t abortrxdetinu2 : 1; /**< [ 14: 14](R/W) Abort RX Detect in U2. When set to 1, and the link state is U2, the core aborts receiver
+ detection if it receives U2 exit LFPS from the remote link partner.
+
+ This bit is for downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t skiprxdet : 1; /**< [ 13: 13](R/W) Skip RX detect. When set to 1, the core skips RX detection if pipe3_RxElecIdle is low.
+ Skip is defined as waiting for the appropriate timeout, then repeating the operation. */
+ uint32_t lfpsp0algn : 1; /**< [ 12: 12](R/W) LFPS P0 align. When set to 1:
+ * Core deasserts LFPS transmission on the clock edge that it requests PHY power state
+ 0 when exiting U1, U2, or U3 low power states. Otherwise, LFPS transmission is asserted
+ one clock earlier.
+ * Core requests symbol transmission two pipe3_rx_pclks periods after the PHY asserts
+ PhyStatus as a result of the PHY switching from P1 or P2 state to P0 state.
+ For USB 3.0 host, this is not required. */
+ uint32_t p3p2tranok : 1; /**< [ 11: 11](R/W) P3 P2 transitions OK.
+ 0 = P0 is always entered as an intermediate state during transitions between P2 and P3, as
+ defined in the PIPE3 specification.
+ 1 = the core transitions directly from PHY power state P2 to P3 or from state P3 to P2.
+
+ According to PIPE3 specification, any direct transition between P3 and P2 is illegal.
+
+ Internal:
+ This bit is used only for some non-Synopsys PHYs that cannot do LFPS in P3.
+ INTERNAL: Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t p3exsigp2 : 1; /**< [ 10: 10](R/W) P3 exit signal in P2. When set to 1, the core always changes the PHY power state to P2,
+ before attempting a U3 exit handshake.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t lfpsfilt : 1; /**< [ 9: 9](R/W) LFPS filter. When set to 1, filter LFPS reception with pipe3_RxValid in PHY power state
+ P0, ignore LFPS reception from the PHY unless both pipe3_Rxelecidle and pipe3_RxValid are
+ deasserted. */
+ uint32_t rxdet2polllfpsctrl : 1; /**< [ 8: 8](R/W) RX_DETECT to polling.
+ 0 = Enables a 400 us delay to start polling LFPS after RX_DETECT. This allows VCM offset
+ to settle to a proper level.
+ 1 = Disables the 400 us delay to start polling LFPS after RX_DETECT. */
+ uint32_t ssicen : 1; /**< [ 7: 7](R/W) SSIC is not supported. This bit must be set to 0. */
+ uint32_t txswing : 1; /**< [ 6: 6](R/W) TX swing. Refer to the PIPE3 specification. */
+ uint32_t txmargin : 3; /**< [ 5: 3](R/W) TX margin. Refer to the PIPE3 specification, table 5-3. */
+ uint32_t txdeemphasis : 2; /**< [ 2: 1](R/W) TX deemphasis. The value driven to the PHY is controlled by the LTSSM during USB3
+ compliance mode. Refer to the PIPE3 specification, table 5-3.
+
+ Use the following values for the appropriate level of de-emphasis (From pipe3 spec):
+ 0x0 = -6 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_6DB].
+ 0x1 = -3.5 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_3P5DB].
+ 0x2 = No de-emphasis.
+ 0x3 = Reserved.
+
+ Use the following values for the appropriate level of de-emphasis (From pipe3 spec):
+ 0x0 = -6 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_6DB].
+ 0x1 = -3.5 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_3P5DB].
+ 0x2 = No de-emphasis.
+ 0x3 = Reserved. */
+ uint32_t elasticbuffermode : 1; /**< [ 0: 0](R/W) Elastic buffer mode. Refer to the PIPE3 specification, table 5-3. */
+#else /* Word 0 - Little Endian */
+ uint32_t elasticbuffermode : 1; /**< [ 0: 0](R/W) Elastic buffer mode. Refer to the PIPE3 specification, table 5-3. */
+ uint32_t txdeemphasis : 2; /**< [ 2: 1](R/W) TX deemphasis. The value driven to the PHY is controlled by the LTSSM during USB3
+ compliance mode. Refer to the PIPE3 specification, table 5-3.
+
+ Use the following values for the appropriate level of de-emphasis (From pipe3 spec):
+ 0x0 = -6 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_6DB].
+ 0x1 = -3.5 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_3P5DB].
+ 0x2 = No de-emphasis.
+ 0x3 = Reserved.
+
+ Use the following values for the appropriate level of de-emphasis (From pipe3 spec):
+ 0x0 = -6 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_6DB].
+ 0x1 = -3.5 dB de-emphasis, use USBDRD()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_3P5DB].
+ 0x2 = No de-emphasis.
+ 0x3 = Reserved. */
+ uint32_t txmargin : 3; /**< [ 5: 3](R/W) TX margin. Refer to the PIPE3 specification, table 5-3. */
+ uint32_t txswing : 1; /**< [ 6: 6](R/W) TX swing. Refer to the PIPE3 specification. */
+ uint32_t ssicen : 1; /**< [ 7: 7](R/W) SSIC is not supported. This bit must be set to 0. */
+ uint32_t rxdet2polllfpsctrl : 1; /**< [ 8: 8](R/W) RX_DETECT to polling.
+ 0 = Enables a 400 us delay to start polling LFPS after RX_DETECT. This allows VCM offset
+ to settle to a proper level.
+ 1 = Disables the 400 us delay to start polling LFPS after RX_DETECT. */
+ uint32_t lfpsfilt : 1; /**< [ 9: 9](R/W) LFPS filter. When set to 1, filter LFPS reception with pipe3_RxValid in PHY power state
+ P0, ignore LFPS reception from the PHY unless both pipe3_Rxelecidle and pipe3_RxValid are
+ deasserted. */
+ uint32_t p3exsigp2 : 1; /**< [ 10: 10](R/W) P3 exit signal in P2. When set to 1, the core always changes the PHY power state to P2,
+ before attempting a U3 exit handshake.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t p3p2tranok : 1; /**< [ 11: 11](R/W) P3 P2 transitions OK.
+ 0 = P0 is always entered as an intermediate state during transitions between P2 and P3, as
+ defined in the PIPE3 specification.
+ 1 = the core transitions directly from PHY power state P2 to P3 or from state P3 to P2.
+
+ According to PIPE3 specification, any direct transition between P3 and P2 is illegal.
+
+ Internal:
+ This bit is used only for some non-Synopsys PHYs that cannot do LFPS in P3.
+ INTERNAL: Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t lfpsp0algn : 1; /**< [ 12: 12](R/W) LFPS P0 align. When set to 1:
+ * Core deasserts LFPS transmission on the clock edge that it requests PHY power state
+ 0 when exiting U1, U2, or U3 low power states. Otherwise, LFPS transmission is asserted
+ one clock earlier.
+ * Core requests symbol transmission two pipe3_rx_pclks periods after the PHY asserts
+ PhyStatus as a result of the PHY switching from P1 or P2 state to P0 state.
+ For USB 3.0 host, this is not required. */
+ uint32_t skiprxdet : 1; /**< [ 13: 13](R/W) Skip RX detect. When set to 1, the core skips RX detection if pipe3_RxElecIdle is low.
+ Skip is defined as waiting for the appropriate timeout, then repeating the operation. */
+ uint32_t abortrxdetinu2 : 1; /**< [ 14: 14](R/W) Abort RX Detect in U2. When set to 1, and the link state is U2, the core aborts receiver
+ detection if it receives U2 exit LFPS from the remote link partner.
+
+ This bit is for downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t datwidth : 2; /**< [ 16: 15](RO) PIPE data width.
+ 0x0 = 32 bits.
+ 0x1 = 16 bits.
+ 0x2 = 8 bits.
+ 0x3 = reserved.
+
+ One clock cycle after reset, these bits receive the value seen on the pipe3_DataBusWidth.
+ This will always be 0x0.
+
+ Internal:
+ The simulation testbench uses the coreConsultant parameter to configure the VIP.
+ INTERNAL: These bits in the coreConsultant parameter should match your PHY data width and
+ the pipe3_DataBusWidth port. */
+ uint32_t suspend_en : 1; /**< [ 17: 17](R/W) Suspend USB3.0 SuperSpeed PHY (Suspend_en). When set to 1, and if suspend conditions are
+ valid, the USB 3.0 PHY enters suspend mode. */
+ uint32_t delaypxtransenterux : 1; /**< [ 18: 18](R/W) Delay PHY power change from P0 to P1/P2/P3 when link state changing from U0 to U1/U2/U3
+ respectively.
+ 0 = when entering U1/U2/U3, transition to P1/P2/P3 without checking for Pipe3_RxElecIlde
+ and pipe3_RxValid.
+ 1 = when entering U1/U2/U3, delay the transition to P1/P2/P3 until the pipe3 signals,
+ Pipe3_RxElecIlde is 1 and pipe3_RxValid is 0.
+
+ Internal:
+ Note: This bit should be set to 1 for Synopsys PHY. It is also used by third-
+ party SuperSpeed PHY. */
+ uint32_t delaypx : 3; /**< [ 21: 19](R/W) Delay P1P2P3. Delay P0 to P1/P2/P3 request when entering U1/U2/U3 until (DELAYPX * 8)
+ 8B10B error occurs, or Pipe3_RxValid drops to 0.
+ DELAYPXTRANSENTERUX must reset to 1 to enable this functionality.
+
+ Internal:
+ Should always be 0x1 for a Synopsys PHY. */
+ uint32_t disrxdetu3rxdet : 1; /**< [ 22: 22](R/W) Disable receiver detection in U3/Rx.Detect. When set to 1, the core does not do receiver
+ detection in U3 or Rx.Detect state. If STARTRXDETU3RXDET is set to 1 during reset,
+ receiver detection starts manually.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t startrxdetu3rxdet : 1; /**< [ 23: 23](WO) Start receiver detection in U3/Rx.Detect.
+ If DISRXDETU3RXDET is set to 1 during reset, and the link is in U3 or Rx.Detect state, the
+ core starts receiver detection on rising edge of this bit.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t request_p1p2p3 : 1; /**< [ 24: 24](R/W) Always request P1/P2/P3 for U1/U2/U3.
+ 0 = if immediate Ux exit (remotely initiated, or locally initiated) happens, the core does
+ not request P1/P2/P3 power state change.
+ 1 = the core always requests PHY power change from P0 to P1/P2/P3 during U0 to U1/U2/U3
+ transition.
+
+ Internal:
+ Note: This bit should be set to 1 for Synopsys PHY. For third-party SuperSpeed
+ PHY, check with your PHY vendor. */
+ uint32_t u1u2exitfail_to_recov : 1; /**< [ 25: 25](R/W) U1U2exit fail to recovery. When set to 1, and U1/U2 LFPS handshake fails, the LTSSM
+ transitions from U1/U2 to recovery instead of SS.inactive.
+ If recovery fails, then the LTSSM can enter SS.Inactive. This is an enhancement only. It
+ prevents interoperability issue if the remote link does not do the proper handshake. */
+ uint32_t ping_enchance_en : 1; /**< [ 26: 26](R/W) Ping enhancement enable. When set to 1, the downstream-port U1-ping-receive timeout
+ becomes 500 ms instead of 300 ms. Minimum Ping.LFPS receive duration is 8 ns (one mac3_clk
+ cycle). This field is valid for the downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t ux_exit_in_px : 1; /**< [ 27: 27](R/W) UX exit in Px:
+ 0 = Core does U1/U2/U3 exit in PHY power state P0 (default behavior).
+ 1 = Core does U1/U2/U3 exit in PHY power state P1/P2/P3 respectively.
+
+ This bit is added for SuperSpeed PHY workaround where SuperSpeed PHY injects a glitch on
+ pipe3_RxElecIdle while receiving Ux exit LFPS, and pipe3_PowerDown change is in progress.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t disrxdetp3 : 1; /**< [ 28: 28](R/W) Disables receiver detection in P3. If PHY is in P3 and the core needs to perform receiver
+ detection:
+ 0 = Core performs receiver detection in P3 (default).
+ 1 = Core changes the PHY power state to P2 and then performs receiver detection. After
+ receiver detection, core changes PHY power state to P3. */
+ uint32_t u2ssinactp3ok : 1; /**< [ 29: 29](R/W) P3 OK for U2/SS.Inactive:
+ 0 = During link state U2/SS.Inactive, put PHY in P2 (default).
+ 1 = During link state U2/SS.Inactive, put PHY in P3. */
+ uint32_t hstprtcmpl : 1; /**< [ 30: 30](R/W) Host port compliance. Setting this bit to 1 enables placing the SuperSpeed port link into
+ a compliance state, which allows testing of the PIPE PHY compliance patterns without
+ having to have a test fixture on the USB 3.0 cable. By default, this bit should be set to
+ 0.
+
+ In compliance-lab testing, the SuperSpeed port link enters compliance after failing the
+ first polling sequence after power on. Set this bit to 0 when you run compliance tests.
+
+ The sequence for using this functionality is as follows:
+ * Disconnect any plugged-in devices.
+ * Set USBDRD()_UAHC_USBCMD[HCRST] = 1 or power-on-chip reset.
+ * Set USBDRD()_UAHC_PORTSC()[PP] = 0.
+ * Set HSTPRTCMPL = 1. This places the link into compliance state.
+
+ To advance the compliance pattern, follow this sequence (toggle HSTPRTCMPL):
+ * Set HSTPRTCMPL = 0.
+ * Set HSTPRTCMPL = 1. This advances the link to the next compliance pattern.
+
+ To exit from the compliance state, set USBDRD()_UAHC_USBCMD[HCRST] = 1 or power-on-chip
+ reset. */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) USB3 PHY soft reset (PHYSoftRst). When set to 1, initiates a PHY soft reset. After setting
+ this bit to 1, the software needs to clear this bit. */
+#endif /* Word 0 - End */
+ } cn83xx;
+ /* struct bdk_usbdrdx_uahc_gusb3pipectlx_cn83xx cn9; */
+};
+typedef union bdk_usbdrdx_uahc_gusb3pipectlx bdk_usbdrdx_uahc_gusb3pipectlx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_GUSB3PIPECTLX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_GUSB3PIPECTLX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x86800000c2c0ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x86800000c2c0ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x86800000c2c0ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_GUSB3PIPECTLX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_GUSB3PIPECTLX(a,b) bdk_usbdrdx_uahc_gusb3pipectlx_t
+#define bustype_BDK_USBDRDX_UAHC_GUSB3PIPECTLX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_GUSB3PIPECTLX(a,b) "USBDRDX_UAHC_GUSB3PIPECTLX"
+#define device_bar_BDK_USBDRDX_UAHC_GUSB3PIPECTLX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_GUSB3PIPECTLX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_GUSB3PIPECTLX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_hccparams
+ *
+ * USB XHCI Controller Capability Parameters Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.3.6.
+ */
+union bdk_usbdrdx_uahc_hccparams
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_hccparams_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t xecp : 16; /**< [ 31: 16](RO) xHCI extended capabilities pointer. */
+ uint32_t maxpsasize : 4; /**< [ 15: 12](RO) Maximum primary-stream-array size. */
+ uint32_t reserved_11 : 1;
+ uint32_t sec : 1; /**< [ 10: 10](RO/H) Stopped EDLTA capability. */
+ uint32_t spc : 1; /**< [ 9: 9](RO/H) Stopped - short packet capability. */
+ uint32_t pae : 1; /**< [ 8: 8](RO) Parse all event data. */
+ uint32_t nss : 1; /**< [ 7: 7](RO) No secondary SID support. */
+ uint32_t ltc : 1; /**< [ 6: 6](RO) Latency tolerance messaging capability. */
+ uint32_t lhrc : 1; /**< [ 5: 5](RO) Light HC reset capability. */
+ uint32_t pind : 1; /**< [ 4: 4](RO) Port indicators. */
+ uint32_t ppc : 1; /**< [ 3: 3](RO) Port power control. Value is based on USBDRD()_UCTL_HOST_CFG[PPC_EN]. */
+ uint32_t csz : 1; /**< [ 2: 2](RO) Context size. */
+ uint32_t bnc : 1; /**< [ 1: 1](RO) BW negotiation capability. */
+ uint32_t ac64 : 1; /**< [ 0: 0](RO) 64-bit addressing capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t ac64 : 1; /**< [ 0: 0](RO) 64-bit addressing capability. */
+ uint32_t bnc : 1; /**< [ 1: 1](RO) BW negotiation capability. */
+ uint32_t csz : 1; /**< [ 2: 2](RO) Context size. */
+ uint32_t ppc : 1; /**< [ 3: 3](RO) Port power control. Value is based on USBDRD()_UCTL_HOST_CFG[PPC_EN]. */
+ uint32_t pind : 1; /**< [ 4: 4](RO) Port indicators. */
+ uint32_t lhrc : 1; /**< [ 5: 5](RO) Light HC reset capability. */
+ uint32_t ltc : 1; /**< [ 6: 6](RO) Latency tolerance messaging capability. */
+ uint32_t nss : 1; /**< [ 7: 7](RO) No secondary SID support. */
+ uint32_t pae : 1; /**< [ 8: 8](RO) Parse all event data. */
+ uint32_t spc : 1; /**< [ 9: 9](RO/H) Stopped - short packet capability. */
+ uint32_t sec : 1; /**< [ 10: 10](RO/H) Stopped EDLTA capability. */
+ uint32_t reserved_11 : 1;
+ uint32_t maxpsasize : 4; /**< [ 15: 12](RO) Maximum primary-stream-array size. */
+ uint32_t xecp : 16; /**< [ 31: 16](RO) xHCI extended capabilities pointer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_hccparams_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_hccparams bdk_usbdrdx_uahc_hccparams_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_HCCPARAMS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_HCCPARAMS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000010ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000010ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_HCCPARAMS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_HCCPARAMS(a) bdk_usbdrdx_uahc_hccparams_t
+#define bustype_BDK_USBDRDX_UAHC_HCCPARAMS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_HCCPARAMS(a) "USBDRDX_UAHC_HCCPARAMS"
+#define device_bar_BDK_USBDRDX_UAHC_HCCPARAMS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_HCCPARAMS(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_HCCPARAMS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_hccparams1
+ *
+ * USB XHCI Controller Capability Parameters Register 1
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.3.6.
+ */
+union bdk_usbdrdx_uahc_hccparams1
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_hccparams1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t xecp : 16; /**< [ 31: 16](RO) xHCI extended capabilities pointer. */
+ uint32_t maxpsasize : 4; /**< [ 15: 12](RO) Maximum primary-stream-array size. */
+ uint32_t cfc : 1; /**< [ 11: 11](RO) Contiguous frame ID capability. */
+ uint32_t sec : 1; /**< [ 10: 10](RO/H) Stopped EDLTA capability. */
+ uint32_t spc : 1; /**< [ 9: 9](RO/H) Stopped - short packet capability. */
+ uint32_t pae : 1; /**< [ 8: 8](RO) Parse all event data. */
+ uint32_t nss : 1; /**< [ 7: 7](RO) No secondary SID support. */
+ uint32_t ltc : 1; /**< [ 6: 6](RO) Latency tolerance messaging capability. */
+ uint32_t lhrc : 1; /**< [ 5: 5](RO) Light HC reset capability. */
+ uint32_t pind : 1; /**< [ 4: 4](RO) Port indicators. */
+ uint32_t ppc : 1; /**< [ 3: 3](RO) Port power control. Value is based on USBDRD()_UCTL_HOST_CFG[PPC_EN]. */
+ uint32_t csz : 1; /**< [ 2: 2](RO) Context size. */
+ uint32_t bnc : 1; /**< [ 1: 1](RO) BW negotiation capability. */
+ uint32_t ac64 : 1; /**< [ 0: 0](RO) 64-bit addressing capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t ac64 : 1; /**< [ 0: 0](RO) 64-bit addressing capability. */
+ uint32_t bnc : 1; /**< [ 1: 1](RO) BW negotiation capability. */
+ uint32_t csz : 1; /**< [ 2: 2](RO) Context size. */
+ uint32_t ppc : 1; /**< [ 3: 3](RO) Port power control. Value is based on USBDRD()_UCTL_HOST_CFG[PPC_EN]. */
+ uint32_t pind : 1; /**< [ 4: 4](RO) Port indicators. */
+ uint32_t lhrc : 1; /**< [ 5: 5](RO) Light HC reset capability. */
+ uint32_t ltc : 1; /**< [ 6: 6](RO) Latency tolerance messaging capability. */
+ uint32_t nss : 1; /**< [ 7: 7](RO) No secondary SID support. */
+ uint32_t pae : 1; /**< [ 8: 8](RO) Parse all event data. */
+ uint32_t spc : 1; /**< [ 9: 9](RO/H) Stopped - short packet capability. */
+ uint32_t sec : 1; /**< [ 10: 10](RO/H) Stopped EDLTA capability. */
+ uint32_t cfc : 1; /**< [ 11: 11](RO) Contiguous frame ID capability. */
+ uint32_t maxpsasize : 4; /**< [ 15: 12](RO) Maximum primary-stream-array size. */
+ uint32_t xecp : 16; /**< [ 31: 16](RO) xHCI extended capabilities pointer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_hccparams1_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_hccparams1 bdk_usbdrdx_uahc_hccparams1_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_HCCPARAMS1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_HCCPARAMS1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000010ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_HCCPARAMS1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_HCCPARAMS1(a) bdk_usbdrdx_uahc_hccparams1_t
+#define bustype_BDK_USBDRDX_UAHC_HCCPARAMS1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_HCCPARAMS1(a) "USBDRDX_UAHC_HCCPARAMS1"
+#define device_bar_BDK_USBDRDX_UAHC_HCCPARAMS1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_HCCPARAMS1(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_HCCPARAMS1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_hccparams2
+ *
+ * USB XHCI Controller Capability Parameters Register 2
+ * The default values for all fields in this register are implementation dependent. For
+ * information on this register, refer to the xHCI Specification, v1.1, section 5.3.9.
+ */
+union bdk_usbdrdx_uahc_hccparams2
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_hccparams2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_6_31 : 26;
+ uint32_t cic : 1; /**< [ 5: 5](RO) Configuration information capability. */
+ uint32_t lec : 1; /**< [ 4: 4](RO) Large ESIT payload capability. */
+ uint32_t ctc : 1; /**< [ 3: 3](RO) Compliance transition capability. */
+ uint32_t fsc : 1; /**< [ 2: 2](RO) Force save context capability. */
+ uint32_t cmc : 1; /**< [ 1: 1](RO) Configure endpoint command max exit latency too large capability. */
+ uint32_t u3c : 1; /**< [ 0: 0](RO) U3 entry capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t u3c : 1; /**< [ 0: 0](RO) U3 entry capability. */
+ uint32_t cmc : 1; /**< [ 1: 1](RO) Configure endpoint command max exit latency too large capability. */
+ uint32_t fsc : 1; /**< [ 2: 2](RO) Force save context capability. */
+ uint32_t ctc : 1; /**< [ 3: 3](RO) Compliance transition capability. */
+ uint32_t lec : 1; /**< [ 4: 4](RO) Large ESIT payload capability. */
+ uint32_t cic : 1; /**< [ 5: 5](RO) Configuration information capability. */
+ uint32_t reserved_6_31 : 26;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_hccparams2_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_hccparams2 bdk_usbdrdx_uahc_hccparams2_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_HCCPARAMS2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_HCCPARAMS2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000001cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_HCCPARAMS2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_HCCPARAMS2(a) bdk_usbdrdx_uahc_hccparams2_t
+#define bustype_BDK_USBDRDX_UAHC_HCCPARAMS2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_HCCPARAMS2(a) "USBDRDX_UAHC_HCCPARAMS2"
+#define device_bar_BDK_USBDRDX_UAHC_HCCPARAMS2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_HCCPARAMS2(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_HCCPARAMS2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_hcsparams1
+ *
+ * USB XHCI Controller Structural Parameters Register 1
+ * This register defines basic structural parameters supported by this xHC implementation: number
+ * of device slots support, Interrupters, root hub ports, etc. For information on this
+ * register, refer to the xHCI Specification, v1.1, section 5.3.3.
+ */
+union bdk_usbdrdx_uahc_hcsparams1
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_hcsparams1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t maxports : 8; /**< [ 31: 24](RO) Maximum number of ports. */
+ uint32_t reserved_19_23 : 5;
+ uint32_t maxintrs : 11; /**< [ 18: 8](RO) Maximum number of interrupters. */
+ uint32_t maxslots : 8; /**< [ 7: 0](RO) Maximum number of device slots. */
+#else /* Word 0 - Little Endian */
+ uint32_t maxslots : 8; /**< [ 7: 0](RO) Maximum number of device slots. */
+ uint32_t maxintrs : 11; /**< [ 18: 8](RO) Maximum number of interrupters. */
+ uint32_t reserved_19_23 : 5;
+ uint32_t maxports : 8; /**< [ 31: 24](RO) Maximum number of ports. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_hcsparams1_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_hcsparams1 bdk_usbdrdx_uahc_hcsparams1_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_HCSPARAMS1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_HCSPARAMS1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000004ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000004ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000004ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_HCSPARAMS1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_HCSPARAMS1(a) bdk_usbdrdx_uahc_hcsparams1_t
+#define bustype_BDK_USBDRDX_UAHC_HCSPARAMS1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_HCSPARAMS1(a) "USBDRDX_UAHC_HCSPARAMS1"
+#define device_bar_BDK_USBDRDX_UAHC_HCSPARAMS1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_HCSPARAMS1(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_HCSPARAMS1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_hcsparams2
+ *
+ * USB XHCI Controller Structural Parameters Register 2
+ * This register defines additional xHC structural parameters. For information on this register,
+ * refer to the xHCI Specification, v1.1, section 5.3.4.
+ */
+union bdk_usbdrdx_uahc_hcsparams2
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_hcsparams2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t maxscratchpadbufs_l : 5; /**< [ 31: 27](RO) Maximum number of scratchpad buffers[4:0]. */
+ uint32_t spr : 1; /**< [ 26: 26](RO) Scratchpad restore. */
+ uint32_t maxscratchpadbufs_h : 5; /**< [ 25: 21](RO) Maximum number of scratchpad buffers[9:5]. */
+ uint32_t reserved_8_20 : 13;
+ uint32_t erst_max : 4; /**< [ 7: 4](RO) Event ring segment table maximum. */
+ uint32_t ist : 4; /**< [ 3: 0](RO) Isochronous scheduling threshold. */
+#else /* Word 0 - Little Endian */
+ uint32_t ist : 4; /**< [ 3: 0](RO) Isochronous scheduling threshold. */
+ uint32_t erst_max : 4; /**< [ 7: 4](RO) Event ring segment table maximum. */
+ uint32_t reserved_8_20 : 13;
+ uint32_t maxscratchpadbufs_h : 5; /**< [ 25: 21](RO) Maximum number of scratchpad buffers[9:5]. */
+ uint32_t spr : 1; /**< [ 26: 26](RO) Scratchpad restore. */
+ uint32_t maxscratchpadbufs_l : 5; /**< [ 31: 27](RO) Maximum number of scratchpad buffers[4:0]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_hcsparams2_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_hcsparams2 bdk_usbdrdx_uahc_hcsparams2_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_HCSPARAMS2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_HCSPARAMS2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000008ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000008ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000008ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_HCSPARAMS2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_HCSPARAMS2(a) bdk_usbdrdx_uahc_hcsparams2_t
+#define bustype_BDK_USBDRDX_UAHC_HCSPARAMS2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_HCSPARAMS2(a) "USBDRDX_UAHC_HCSPARAMS2"
+#define device_bar_BDK_USBDRDX_UAHC_HCSPARAMS2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_HCSPARAMS2(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_HCSPARAMS2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_hcsparams3
+ *
+ * USB XHCI Controller Structural Parameters Register 3
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.3.5.
+ */
+union bdk_usbdrdx_uahc_hcsparams3
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_hcsparams3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t u2_device_exit_latency : 16;/**< [ 31: 16](RO) U2 device exit latency. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t u1_device_exit_latency : 8; /**< [ 7: 0](RO) U1 device exit latency. */
+#else /* Word 0 - Little Endian */
+ uint32_t u1_device_exit_latency : 8; /**< [ 7: 0](RO) U1 device exit latency. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t u2_device_exit_latency : 16;/**< [ 31: 16](RO) U2 device exit latency. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_hcsparams3_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_hcsparams3 bdk_usbdrdx_uahc_hcsparams3_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_HCSPARAMS3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_HCSPARAMS3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000000cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000000cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000000cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_HCSPARAMS3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_HCSPARAMS3(a) bdk_usbdrdx_uahc_hcsparams3_t
+#define bustype_BDK_USBDRDX_UAHC_HCSPARAMS3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_HCSPARAMS3(a) "USBDRDX_UAHC_HCSPARAMS3"
+#define device_bar_BDK_USBDRDX_UAHC_HCSPARAMS3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_HCSPARAMS3(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_HCSPARAMS3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_iman#
+ *
+ * USB XHCI Interrupt Management Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.5.2.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_imanx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_imanx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t ie : 1; /**< [ 1: 1](R/W) Interrupt enable. */
+ uint32_t ip : 1; /**< [ 0: 0](R/W1C/H) Interrupt pending. */
+#else /* Word 0 - Little Endian */
+ uint32_t ip : 1; /**< [ 0: 0](R/W1C/H) Interrupt pending. */
+ uint32_t ie : 1; /**< [ 1: 1](R/W) Interrupt enable. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_imanx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_imanx bdk_usbdrdx_uahc_imanx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_IMANX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_IMANX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000000460ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000000460ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000000460ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_IMANX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_IMANX(a,b) bdk_usbdrdx_uahc_imanx_t
+#define bustype_BDK_USBDRDX_UAHC_IMANX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_IMANX(a,b) "USBDRDX_UAHC_IMANX"
+#define device_bar_BDK_USBDRDX_UAHC_IMANX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_IMANX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_IMANX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_imod#
+ *
+ * USB XHCI Interrupt Moderation Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.5.2.2.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_imodx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_imodx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t imodc : 16; /**< [ 31: 16](R/W) Interrupt moderation counter. */
+ uint32_t imodi : 16; /**< [ 15: 0](R/W) Interrupt moderation interval. */
+#else /* Word 0 - Little Endian */
+ uint32_t imodi : 16; /**< [ 15: 0](R/W) Interrupt moderation interval. */
+ uint32_t imodc : 16; /**< [ 31: 16](R/W) Interrupt moderation counter. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_imodx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_imodx bdk_usbdrdx_uahc_imodx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_IMODX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_IMODX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000000464ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000000464ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000000464ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_IMODX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_IMODX(a,b) bdk_usbdrdx_uahc_imodx_t
+#define bustype_BDK_USBDRDX_UAHC_IMODX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_IMODX(a,b) "USBDRDX_UAHC_IMODX"
+#define device_bar_BDK_USBDRDX_UAHC_IMODX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_IMODX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_IMODX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_mfindex
+ *
+ * USB XHCI Microframe Index Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.5.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_mfindex
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_mfindex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_14_31 : 18;
+ uint32_t mfindex : 14; /**< [ 13: 0](RO/H) Microframe index. */
+#else /* Word 0 - Little Endian */
+ uint32_t mfindex : 14; /**< [ 13: 0](RO/H) Microframe index. */
+ uint32_t reserved_14_31 : 18;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_mfindex_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_mfindex bdk_usbdrdx_uahc_mfindex_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_MFINDEX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_MFINDEX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000440ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000440ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000440ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_MFINDEX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_MFINDEX(a) bdk_usbdrdx_uahc_mfindex_t
+#define bustype_BDK_USBDRDX_UAHC_MFINDEX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_MFINDEX(a) "USBDRDX_UAHC_MFINDEX"
+#define device_bar_BDK_USBDRDX_UAHC_MFINDEX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_MFINDEX(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_MFINDEX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_pagesize
+ *
+ * USB XHCI Page-Size Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.3.
+ */
+union bdk_usbdrdx_uahc_pagesize
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_pagesize_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t pagesize : 16; /**< [ 15: 0](RO) Page size. */
+#else /* Word 0 - Little Endian */
+ uint32_t pagesize : 16; /**< [ 15: 0](RO) Page size. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_pagesize_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_pagesize bdk_usbdrdx_uahc_pagesize_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_PAGESIZE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_PAGESIZE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000028ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000028ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000028ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_PAGESIZE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_PAGESIZE(a) bdk_usbdrdx_uahc_pagesize_t
+#define bustype_BDK_USBDRDX_UAHC_PAGESIZE(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_PAGESIZE(a) "USBDRDX_UAHC_PAGESIZE"
+#define device_bar_BDK_USBDRDX_UAHC_PAGESIZE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_PAGESIZE(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_PAGESIZE(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_porthlpmc_20#
+ *
+ * USB XHCI Port Hardware LPM Control (High-Speed) Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.11.2.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_porthlpmc_20x
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_porthlpmc_20x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_14_31 : 18;
+ uint32_t hirdd : 4; /**< [ 13: 10](R/W) See section 5.4.11.2 of the XHCI Spec 1.1.
+ If USBDRD()_UAHC_SUPTPRT2_DW2[BLC] = 0, then HIRD timing is applied to this field.
+ If USBDRD()_UAHC_SUPTPRT2_DW2[BLC] = 1, then BESL timing is applied to this field. */
+ uint32_t l1_timeout : 8; /**< [ 9: 2](R/W) Timeout value for the L1 inactivity timer (LPM timer). This field is set to 0x0 by the
+ assertion of PR to 1. Refer to section 4.23.5.1.1.1 (in XHCI spec 1.1) for more
+ information on L1 Timeout operation.
+ The following are permissible values:
+ 0x0 = 128 us. (default).
+ 0x1 = 256 us.
+ 0x2 = 512 us.
+ 0x3 = 768 us.
+ _ ...
+ 0xFF = 65280 us. */
+ uint32_t hirdm : 2; /**< [ 1: 0](R/W) Host-initiated resume-duration mode. */
+#else /* Word 0 - Little Endian */
+ uint32_t hirdm : 2; /**< [ 1: 0](R/W) Host-initiated resume-duration mode. */
+ uint32_t l1_timeout : 8; /**< [ 9: 2](R/W) Timeout value for the L1 inactivity timer (LPM timer). This field is set to 0x0 by the
+ assertion of PR to 1. Refer to section 4.23.5.1.1.1 (in XHCI spec 1.1) for more
+ information on L1 Timeout operation.
+ The following are permissible values:
+ 0x0 = 128 us. (default).
+ 0x1 = 256 us.
+ 0x2 = 512 us.
+ 0x3 = 768 us.
+ _ ...
+ 0xFF = 65280 us. */
+ uint32_t hirdd : 4; /**< [ 13: 10](R/W) See section 5.4.11.2 of the XHCI Spec 1.1.
+ If USBDRD()_UAHC_SUPTPRT2_DW2[BLC] = 0, then HIRD timing is applied to this field.
+ If USBDRD()_UAHC_SUPTPRT2_DW2[BLC] = 1, then BESL timing is applied to this field. */
+ uint32_t reserved_14_31 : 18;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_porthlpmc_20x_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_porthlpmc_20x bdk_usbdrdx_uahc_porthlpmc_20x_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_PORTHLPMC_20X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_PORTHLPMC_20X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x86800000042cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x86800000042cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x86800000042cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_PORTHLPMC_20X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_PORTHLPMC_20X(a,b) bdk_usbdrdx_uahc_porthlpmc_20x_t
+#define bustype_BDK_USBDRDX_UAHC_PORTHLPMC_20X(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_PORTHLPMC_20X(a,b) "USBDRDX_UAHC_PORTHLPMC_20X"
+#define device_bar_BDK_USBDRDX_UAHC_PORTHLPMC_20X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_PORTHLPMC_20X(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_PORTHLPMC_20X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_porthlpmc_ss#
+ *
+ * USB XHCI Port Hardware LPM Control (SuperSpeed) Register
+ * The USB3 port hardware LPM control register is reserved and shall be treated as RsvdP by
+ * software. See xHCI specification v1.1 section 5.4.11.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST].
+ */
+union bdk_usbdrdx_uahc_porthlpmc_ssx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_porthlpmc_ssx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_porthlpmc_ssx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_porthlpmc_ssx bdk_usbdrdx_uahc_porthlpmc_ssx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_PORTHLPMC_SSX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_PORTHLPMC_SSX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==1)))
+ return 0x86800000042cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==1)))
+ return 0x86800000042cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==1)))
+ return 0x86800000042cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_PORTHLPMC_SSX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_PORTHLPMC_SSX(a,b) bdk_usbdrdx_uahc_porthlpmc_ssx_t
+#define bustype_BDK_USBDRDX_UAHC_PORTHLPMC_SSX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_PORTHLPMC_SSX(a,b) "USBDRDX_UAHC_PORTHLPMC_SSX"
+#define device_bar_BDK_USBDRDX_UAHC_PORTHLPMC_SSX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_PORTHLPMC_SSX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_PORTHLPMC_SSX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_portli_20#
+ *
+ * USB XHCI Port Link (High-Speed) Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.10.
+ */
+union bdk_usbdrdx_uahc_portli_20x
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_portli_20x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_portli_20x_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_portli_20x bdk_usbdrdx_uahc_portli_20x_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_PORTLI_20X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_PORTLI_20X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000000428ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000000428ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000000428ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_PORTLI_20X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_PORTLI_20X(a,b) bdk_usbdrdx_uahc_portli_20x_t
+#define bustype_BDK_USBDRDX_UAHC_PORTLI_20X(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_PORTLI_20X(a,b) "USBDRDX_UAHC_PORTLI_20X"
+#define device_bar_BDK_USBDRDX_UAHC_PORTLI_20X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_PORTLI_20X(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_PORTLI_20X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_portli_ss#
+ *
+ * USB XHCI Port Link (SuperSpeed) Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.10.
+ */
+union bdk_usbdrdx_uahc_portli_ssx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_portli_ssx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t linkerrorcount : 16; /**< [ 15: 0](RO/H) Link error count. */
+#else /* Word 0 - Little Endian */
+ uint32_t linkerrorcount : 16; /**< [ 15: 0](RO/H) Link error count. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_portli_ssx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_portli_ssx bdk_usbdrdx_uahc_portli_ssx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_PORTLI_SSX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_PORTLI_SSX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==1)))
+ return 0x868000000428ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==1)))
+ return 0x868000000428ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==1)))
+ return 0x868000000428ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_PORTLI_SSX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_PORTLI_SSX(a,b) bdk_usbdrdx_uahc_portli_ssx_t
+#define bustype_BDK_USBDRDX_UAHC_PORTLI_SSX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_PORTLI_SSX(a,b) "USBDRDX_UAHC_PORTLI_SSX"
+#define device_bar_BDK_USBDRDX_UAHC_PORTLI_SSX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_PORTLI_SSX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_PORTLI_SSX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_portpmsc_20#
+ *
+ * USB XHCI Port Power Management Status/Control (High-Speed) Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.9.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST].
+ */
+union bdk_usbdrdx_uahc_portpmsc_20x
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_portpmsc_20x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t port_test_control : 4; /**< [ 31: 28](R/W) Port test control. */
+ uint32_t reserved_17_27 : 11;
+ uint32_t hle : 1; /**< [ 16: 16](R/W) Hardware LPM enable. */
+ uint32_t l1_device_slot : 8; /**< [ 15: 8](R/W) L1 device slot. */
+ uint32_t hird : 4; /**< [ 7: 4](R/W) Host-initiated resume duration. */
+ uint32_t rwe : 1; /**< [ 3: 3](R/W) Remove wake enable. */
+ uint32_t l1s : 3; /**< [ 2: 0](RO/H) L1 status. */
+#else /* Word 0 - Little Endian */
+ uint32_t l1s : 3; /**< [ 2: 0](RO/H) L1 status. */
+ uint32_t rwe : 1; /**< [ 3: 3](R/W) Remove wake enable. */
+ uint32_t hird : 4; /**< [ 7: 4](R/W) Host-initiated resume duration. */
+ uint32_t l1_device_slot : 8; /**< [ 15: 8](R/W) L1 device slot. */
+ uint32_t hle : 1; /**< [ 16: 16](R/W) Hardware LPM enable. */
+ uint32_t reserved_17_27 : 11;
+ uint32_t port_test_control : 4; /**< [ 31: 28](R/W) Port test control. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_portpmsc_20x_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_portpmsc_20x bdk_usbdrdx_uahc_portpmsc_20x_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_PORTPMSC_20X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_PORTPMSC_20X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000000424ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000000424ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000000424ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UAHC_PORTPMSC_20X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_PORTPMSC_20X(a,b) bdk_usbdrdx_uahc_portpmsc_20x_t
+#define bustype_BDK_USBDRDX_UAHC_PORTPMSC_20X(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_PORTPMSC_20X(a,b) "USBDRDX_UAHC_PORTPMSC_20X"
+#define device_bar_BDK_USBDRDX_UAHC_PORTPMSC_20X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_PORTPMSC_20X(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_PORTPMSC_20X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_portpmsc_ss#
+ *
+ * USB XHCI Port Power Management Status/Control (SuperSpeed) Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.9.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST].
+ */
+union bdk_usbdrdx_uahc_portpmsc_ssx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_portpmsc_ssx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_17_31 : 15;
+ uint32_t fla : 1; /**< [ 16: 16](R/W/H) Force link PM accept. */
+ uint32_t u2_timeout : 8; /**< [ 15: 8](R/W/H) U2 timeout. */
+ uint32_t u1_timeout : 8; /**< [ 7: 0](R/W/H) U1 timeout. */
+#else /* Word 0 - Little Endian */
+ uint32_t u1_timeout : 8; /**< [ 7: 0](R/W/H) U1 timeout. */
+ uint32_t u2_timeout : 8; /**< [ 15: 8](R/W/H) U2 timeout. */
+ uint32_t fla : 1; /**< [ 16: 16](R/W/H) Force link PM accept. */
+ uint32_t reserved_17_31 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_portpmsc_ssx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_portpmsc_ssx bdk_usbdrdx_uahc_portpmsc_ssx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_PORTPMSC_SSX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_PORTPMSC_SSX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==1)))
+ return 0x868000000424ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==1)))
+ return 0x868000000424ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==1)))
+ return 0x868000000424ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_PORTPMSC_SSX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_PORTPMSC_SSX(a,b) bdk_usbdrdx_uahc_portpmsc_ssx_t
+#define bustype_BDK_USBDRDX_UAHC_PORTPMSC_SSX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_PORTPMSC_SSX(a,b) "USBDRDX_UAHC_PORTPMSC_SSX"
+#define device_bar_BDK_USBDRDX_UAHC_PORTPMSC_SSX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_PORTPMSC_SSX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_PORTPMSC_SSX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_portsc#
+ *
+ * USB XHCI Port Status and Control Registers
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.8. Port 1
+ * is USB3.0 SuperSpeed link, Port 0 is USB2.0 high-speed/full-speed/low-speed link.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST].
+ */
+union bdk_usbdrdx_uahc_portscx
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_portscx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wpr : 1; /**< [ 31: 31](WO) Warm port reset. */
+ uint32_t dr : 1; /**< [ 30: 30](RO/H) Device removable. */
+ uint32_t reserved_28_29 : 2;
+ uint32_t woe : 1; /**< [ 27: 27](R/W) Wake on overcurrent enable. */
+ uint32_t wde : 1; /**< [ 26: 26](R/W) Wake on disconnect enable. */
+ uint32_t wce : 1; /**< [ 25: 25](R/W) Wake on connect enable. */
+ uint32_t cas : 1; /**< [ 24: 24](RO/H) Cold attach status. */
+ uint32_t cec : 1; /**< [ 23: 23](R/W1C/H) Port configuration error change. */
+ uint32_t plc : 1; /**< [ 22: 22](R/W1C/H) Port link state change. */
+ uint32_t prc : 1; /**< [ 21: 21](R/W1C/H) Port reset change. */
+ uint32_t occ : 1; /**< [ 20: 20](R/W1C/H) Overcurrent change. */
+ uint32_t wrc : 1; /**< [ 19: 19](R/W1C/H) Warm port reset change. */
+ uint32_t pec : 1; /**< [ 18: 18](R/W1C/H) Port enabled/disabled change. */
+ uint32_t csc : 1; /**< [ 17: 17](R/W1C/H) Connect status change. */
+ uint32_t lws : 1; /**< [ 16: 16](WO) Port link state write strobe. */
+ uint32_t pic : 2; /**< [ 15: 14](R/W/H) Port indicator control. */
+ uint32_t portspeed : 4; /**< [ 13: 10](RO/H) Port speed. */
+ uint32_t pp : 1; /**< [ 9: 9](R/W/H) Port power. */
+ uint32_t pls : 4; /**< [ 8: 5](R/W/H) Port link state. */
+ uint32_t pr : 1; /**< [ 4: 4](R/W1S/H) Port reset. */
+ uint32_t oca : 1; /**< [ 3: 3](RO/H) Overcurrent active. */
+ uint32_t reserved_2 : 1;
+ uint32_t ped : 1; /**< [ 1: 1](R/W1C/H) Port enabled/disabled. */
+ uint32_t ccs : 1; /**< [ 0: 0](RO/H) Current connect status. */
+#else /* Word 0 - Little Endian */
+ uint32_t ccs : 1; /**< [ 0: 0](RO/H) Current connect status. */
+ uint32_t ped : 1; /**< [ 1: 1](R/W1C/H) Port enabled/disabled. */
+ uint32_t reserved_2 : 1;
+ uint32_t oca : 1; /**< [ 3: 3](RO/H) Overcurrent active. */
+ uint32_t pr : 1; /**< [ 4: 4](R/W1S/H) Port reset. */
+ uint32_t pls : 4; /**< [ 8: 5](R/W/H) Port link state. */
+ uint32_t pp : 1; /**< [ 9: 9](R/W/H) Port power. */
+ uint32_t portspeed : 4; /**< [ 13: 10](RO/H) Port speed. */
+ uint32_t pic : 2; /**< [ 15: 14](R/W/H) Port indicator control. */
+ uint32_t lws : 1; /**< [ 16: 16](WO) Port link state write strobe. */
+ uint32_t csc : 1; /**< [ 17: 17](R/W1C/H) Connect status change. */
+ uint32_t pec : 1; /**< [ 18: 18](R/W1C/H) Port enabled/disabled change. */
+ uint32_t wrc : 1; /**< [ 19: 19](R/W1C/H) Warm port reset change. */
+ uint32_t occ : 1; /**< [ 20: 20](R/W1C/H) Overcurrent change. */
+ uint32_t prc : 1; /**< [ 21: 21](R/W1C/H) Port reset change. */
+ uint32_t plc : 1; /**< [ 22: 22](R/W1C/H) Port link state change. */
+ uint32_t cec : 1; /**< [ 23: 23](R/W1C/H) Port configuration error change. */
+ uint32_t cas : 1; /**< [ 24: 24](RO/H) Cold attach status. */
+ uint32_t wce : 1; /**< [ 25: 25](R/W) Wake on connect enable. */
+ uint32_t wde : 1; /**< [ 26: 26](R/W) Wake on disconnect enable. */
+ uint32_t woe : 1; /**< [ 27: 27](R/W) Wake on overcurrent enable. */
+ uint32_t reserved_28_29 : 2;
+ uint32_t dr : 1; /**< [ 30: 30](RO/H) Device removable. */
+ uint32_t wpr : 1; /**< [ 31: 31](WO) Warm port reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_portscx_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_portscx bdk_usbdrdx_uahc_portscx_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_PORTSCX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_PORTSCX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b<=1)))
+ return 0x868000000420ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b<=1)))
+ return 0x868000000420ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b<=1)))
+ return 0x868000000420ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_PORTSCX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_PORTSCX(a,b) bdk_usbdrdx_uahc_portscx_t
+#define bustype_BDK_USBDRDX_UAHC_PORTSCX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_PORTSCX(a,b) "USBDRDX_UAHC_PORTSCX"
+#define device_bar_BDK_USBDRDX_UAHC_PORTSCX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_PORTSCX(a,b) (a)
+#define arguments_BDK_USBDRDX_UAHC_PORTSCX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_rtsoff
+ *
+ * USB XHCI Runtime Register-Space Offset Register
+ * This register defines the offset of the xHCI runtime registers from the base. For information
+ * on this register, refer to the xHCI Specification, v1.1, section 5.3.8.
+ */
+union bdk_usbdrdx_uahc_rtsoff
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_rtsoff_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rtsoff : 27; /**< [ 31: 5](RO) Runtime register-space offset. */
+ uint32_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_4 : 5;
+ uint32_t rtsoff : 27; /**< [ 31: 5](RO) Runtime register-space offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_rtsoff_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_rtsoff bdk_usbdrdx_uahc_rtsoff_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_RTSOFF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_RTSOFF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000018ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000018ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000018ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_RTSOFF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_RTSOFF(a) bdk_usbdrdx_uahc_rtsoff_t
+#define bustype_BDK_USBDRDX_UAHC_RTSOFF(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_RTSOFF(a) "USBDRDX_UAHC_RTSOFF"
+#define device_bar_BDK_USBDRDX_UAHC_RTSOFF(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_RTSOFF(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_RTSOFF(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_suptprt2_dw0
+ *
+ * USB XHCI Supported-Protocol-Capability (USB 2.0) Register 0
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.2.
+ */
+union bdk_usbdrdx_uahc_suptprt2_dw0
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_suptprt2_dw0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t majorrev : 8; /**< [ 31: 24](RO) Major revision. */
+ uint32_t minorrev : 8; /**< [ 23: 16](RO) Minor revision. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO) Next capability pointer. */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = supported protocol. */
+#else /* Word 0 - Little Endian */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = supported protocol. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO) Next capability pointer. */
+ uint32_t minorrev : 8; /**< [ 23: 16](RO) Minor revision. */
+ uint32_t majorrev : 8; /**< [ 31: 24](RO) Major revision. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_suptprt2_dw0_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_suptprt2_dw0 bdk_usbdrdx_uahc_suptprt2_dw0_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT2_DW0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT2_DW0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000890ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000890ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000890ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_SUPTPRT2_DW0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_SUPTPRT2_DW0(a) bdk_usbdrdx_uahc_suptprt2_dw0_t
+#define bustype_BDK_USBDRDX_UAHC_SUPTPRT2_DW0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_SUPTPRT2_DW0(a) "USBDRDX_UAHC_SUPTPRT2_DW0"
+#define device_bar_BDK_USBDRDX_UAHC_SUPTPRT2_DW0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_SUPTPRT2_DW0(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_SUPTPRT2_DW0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_suptprt2_dw1
+ *
+ * USB XHCI Supported-Protocol-Capability (USB 2.0) Register 1
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.2.
+ */
+union bdk_usbdrdx_uahc_suptprt2_dw1
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_suptprt2_dw1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t name : 32; /**< [ 31: 0](RO) Name string: 'USB'. */
+#else /* Word 0 - Little Endian */
+ uint32_t name : 32; /**< [ 31: 0](RO) Name string: 'USB'. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_suptprt2_dw1_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_suptprt2_dw1 bdk_usbdrdx_uahc_suptprt2_dw1_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT2_DW1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT2_DW1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000894ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000894ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000894ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_SUPTPRT2_DW1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_SUPTPRT2_DW1(a) bdk_usbdrdx_uahc_suptprt2_dw1_t
+#define bustype_BDK_USBDRDX_UAHC_SUPTPRT2_DW1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_SUPTPRT2_DW1(a) "USBDRDX_UAHC_SUPTPRT2_DW1"
+#define device_bar_BDK_USBDRDX_UAHC_SUPTPRT2_DW1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_SUPTPRT2_DW1(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_SUPTPRT2_DW1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_suptprt2_dw2
+ *
+ * USB XHCI Supported-Protocol-Capability (USB 2.0) Register 2
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.2.
+ */
+union bdk_usbdrdx_uahc_suptprt2_dw2
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_suptprt2_dw2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t psic : 4; /**< [ 31: 28](RO) Protocol speed ID count. */
+ uint32_t reserved_21_27 : 7;
+ uint32_t blc : 1; /**< [ 20: 20](RO) BESL LPM capability. */
+ uint32_t hlc : 1; /**< [ 19: 19](RO) Hardware LMP capability. */
+ uint32_t ihi : 1; /**< [ 18: 18](RO) Integrated hub implemented. */
+ uint32_t hso : 1; /**< [ 17: 17](RO) High-speed only. */
+ uint32_t reserved_16 : 1;
+ uint32_t compatprtcnt : 8; /**< [ 15: 8](RO) Compatible port count. */
+ uint32_t compatprtoff : 8; /**< [ 7: 0](RO) Compatible port offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t compatprtoff : 8; /**< [ 7: 0](RO) Compatible port offset. */
+ uint32_t compatprtcnt : 8; /**< [ 15: 8](RO) Compatible port count. */
+ uint32_t reserved_16 : 1;
+ uint32_t hso : 1; /**< [ 17: 17](RO) High-speed only. */
+ uint32_t ihi : 1; /**< [ 18: 18](RO) Integrated hub implemented. */
+ uint32_t hlc : 1; /**< [ 19: 19](RO) Hardware LMP capability. */
+ uint32_t blc : 1; /**< [ 20: 20](RO) BESL LPM capability. */
+ uint32_t reserved_21_27 : 7;
+ uint32_t psic : 4; /**< [ 31: 28](RO) Protocol speed ID count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_suptprt2_dw2_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_suptprt2_dw2 bdk_usbdrdx_uahc_suptprt2_dw2_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT2_DW2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT2_DW2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000898ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000898ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000898ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_SUPTPRT2_DW2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_SUPTPRT2_DW2(a) bdk_usbdrdx_uahc_suptprt2_dw2_t
+#define bustype_BDK_USBDRDX_UAHC_SUPTPRT2_DW2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_SUPTPRT2_DW2(a) "USBDRDX_UAHC_SUPTPRT2_DW2"
+#define device_bar_BDK_USBDRDX_UAHC_SUPTPRT2_DW2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_SUPTPRT2_DW2(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_SUPTPRT2_DW2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_suptprt2_dw3
+ *
+ * USB XHCI Supported-Protocol-Capability (USB 2.0) Register 3
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.2.
+ */
+union bdk_usbdrdx_uahc_suptprt2_dw3
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_suptprt2_dw3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_5_31 : 27;
+ uint32_t protslottype : 5; /**< [ 4: 0](RO) Protocol slot type. */
+#else /* Word 0 - Little Endian */
+ uint32_t protslottype : 5; /**< [ 4: 0](RO) Protocol slot type. */
+ uint32_t reserved_5_31 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_suptprt2_dw3_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_suptprt2_dw3 bdk_usbdrdx_uahc_suptprt2_dw3_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT2_DW3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT2_DW3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x86800000089cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x86800000089cll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x86800000089cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_SUPTPRT2_DW3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_SUPTPRT2_DW3(a) bdk_usbdrdx_uahc_suptprt2_dw3_t
+#define bustype_BDK_USBDRDX_UAHC_SUPTPRT2_DW3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_SUPTPRT2_DW3(a) "USBDRDX_UAHC_SUPTPRT2_DW3"
+#define device_bar_BDK_USBDRDX_UAHC_SUPTPRT2_DW3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_SUPTPRT2_DW3(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_SUPTPRT2_DW3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_suptprt3_dw0
+ *
+ * USB XHCI Supported-Protocol-Capability (USB 3.0) Register 0
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.2.
+ */
+union bdk_usbdrdx_uahc_suptprt3_dw0
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_suptprt3_dw0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t majorrev : 8; /**< [ 31: 24](RO) Major revision. */
+ uint32_t minorrev : 8; /**< [ 23: 16](RO) Minor revision. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO/H) Next capability pointer. Value depends on USBDRD()_UAHC_GUCTL[EXTCAPSUPTEN]. If
+ EXTCAPSUPTEN
+ =
+ 0, value is 0x0. If EXTCAPSUPTEN = 1, value is 0x4. */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = supported protocol. */
+#else /* Word 0 - Little Endian */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = supported protocol. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO/H) Next capability pointer. Value depends on USBDRD()_UAHC_GUCTL[EXTCAPSUPTEN]. If
+ EXTCAPSUPTEN
+ =
+ 0, value is 0x0. If EXTCAPSUPTEN = 1, value is 0x4. */
+ uint32_t minorrev : 8; /**< [ 23: 16](RO) Minor revision. */
+ uint32_t majorrev : 8; /**< [ 31: 24](RO) Major revision. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_suptprt3_dw0_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_suptprt3_dw0 bdk_usbdrdx_uahc_suptprt3_dw0_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT3_DW0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT3_DW0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8680000008a0ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x8680000008a0ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x8680000008a0ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_SUPTPRT3_DW0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_SUPTPRT3_DW0(a) bdk_usbdrdx_uahc_suptprt3_dw0_t
+#define bustype_BDK_USBDRDX_UAHC_SUPTPRT3_DW0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_SUPTPRT3_DW0(a) "USBDRDX_UAHC_SUPTPRT3_DW0"
+#define device_bar_BDK_USBDRDX_UAHC_SUPTPRT3_DW0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_SUPTPRT3_DW0(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_SUPTPRT3_DW0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_suptprt3_dw1
+ *
+ * USB XHCI Supported-Protocol-Capability (USB 3.0) Register 1
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.2.
+ */
+union bdk_usbdrdx_uahc_suptprt3_dw1
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_suptprt3_dw1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t name : 32; /**< [ 31: 0](RO) Name string: 'USB'. */
+#else /* Word 0 - Little Endian */
+ uint32_t name : 32; /**< [ 31: 0](RO) Name string: 'USB'. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_suptprt3_dw1_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_suptprt3_dw1 bdk_usbdrdx_uahc_suptprt3_dw1_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT3_DW1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT3_DW1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8680000008a4ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x8680000008a4ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x8680000008a4ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_SUPTPRT3_DW1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_SUPTPRT3_DW1(a) bdk_usbdrdx_uahc_suptprt3_dw1_t
+#define bustype_BDK_USBDRDX_UAHC_SUPTPRT3_DW1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_SUPTPRT3_DW1(a) "USBDRDX_UAHC_SUPTPRT3_DW1"
+#define device_bar_BDK_USBDRDX_UAHC_SUPTPRT3_DW1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_SUPTPRT3_DW1(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_SUPTPRT3_DW1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_suptprt3_dw2
+ *
+ * USB XHCI Supported-Protocol-Capability (USB 3.0) Register 2
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.2.
+ */
+union bdk_usbdrdx_uahc_suptprt3_dw2
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_suptprt3_dw2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t psic : 4; /**< [ 31: 28](RO) Protocol speed ID count. */
+ uint32_t reserved_16_27 : 12;
+ uint32_t compatprtcnt : 8; /**< [ 15: 8](RO) Compatible port count. */
+ uint32_t compatprtoff : 8; /**< [ 7: 0](RO) Compatible port offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t compatprtoff : 8; /**< [ 7: 0](RO) Compatible port offset. */
+ uint32_t compatprtcnt : 8; /**< [ 15: 8](RO) Compatible port count. */
+ uint32_t reserved_16_27 : 12;
+ uint32_t psic : 4; /**< [ 31: 28](RO) Protocol speed ID count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_suptprt3_dw2_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_suptprt3_dw2 bdk_usbdrdx_uahc_suptprt3_dw2_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT3_DW2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT3_DW2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8680000008a8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x8680000008a8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x8680000008a8ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_SUPTPRT3_DW2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_SUPTPRT3_DW2(a) bdk_usbdrdx_uahc_suptprt3_dw2_t
+#define bustype_BDK_USBDRDX_UAHC_SUPTPRT3_DW2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_SUPTPRT3_DW2(a) "USBDRDX_UAHC_SUPTPRT3_DW2"
+#define device_bar_BDK_USBDRDX_UAHC_SUPTPRT3_DW2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_SUPTPRT3_DW2(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_SUPTPRT3_DW2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_suptprt3_dw3
+ *
+ * USB XHCI Supported-Protocol-Capability (USB 3.0) Register 3
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.2.
+ */
+union bdk_usbdrdx_uahc_suptprt3_dw3
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_suptprt3_dw3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_5_31 : 27;
+ uint32_t protslottype : 5; /**< [ 4: 0](RO) Protocol slot type. */
+#else /* Word 0 - Little Endian */
+ uint32_t protslottype : 5; /**< [ 4: 0](RO) Protocol slot type. */
+ uint32_t reserved_5_31 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_suptprt3_dw3_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_suptprt3_dw3 bdk_usbdrdx_uahc_suptprt3_dw3_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT3_DW3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_SUPTPRT3_DW3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8680000008acll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x8680000008acll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x8680000008acll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_SUPTPRT3_DW3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_SUPTPRT3_DW3(a) bdk_usbdrdx_uahc_suptprt3_dw3_t
+#define bustype_BDK_USBDRDX_UAHC_SUPTPRT3_DW3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_SUPTPRT3_DW3(a) "USBDRDX_UAHC_SUPTPRT3_DW3"
+#define device_bar_BDK_USBDRDX_UAHC_SUPTPRT3_DW3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_SUPTPRT3_DW3(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_SUPTPRT3_DW3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_usbcmd
+ *
+ * USB XHCI Command Register
+ * The command register indicates the command to be executed by the serial bus host controller.
+ * Writing to
+ * the register causes a command to be executed.
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_usbcmd
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_usbcmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t eu3s : 1; /**< [ 11: 11](R/W) Enable U3 MFINDEX stop. */
+ uint32_t ewe : 1; /**< [ 10: 10](R/W) Enable wrap event. */
+ uint32_t crs : 1; /**< [ 9: 9](WO) Controller restore state. */
+ uint32_t css : 1; /**< [ 8: 8](WO) Controller save state. */
+ uint32_t lhcrst : 1; /**< [ 7: 7](R/W1S/H) Light host controller reset. */
+ uint32_t reserved_4_6 : 3;
+ uint32_t hsee : 1; /**< [ 3: 3](R/W) Host system error enable. */
+ uint32_t inte : 1; /**< [ 2: 2](R/W) Interrupter enable. */
+ uint32_t hcrst : 1; /**< [ 1: 1](R/W1S/H) Host controller reset. */
+ uint32_t r_s : 1; /**< [ 0: 0](R/W) Run/stop. */
+#else /* Word 0 - Little Endian */
+ uint32_t r_s : 1; /**< [ 0: 0](R/W) Run/stop. */
+ uint32_t hcrst : 1; /**< [ 1: 1](R/W1S/H) Host controller reset. */
+ uint32_t inte : 1; /**< [ 2: 2](R/W) Interrupter enable. */
+ uint32_t hsee : 1; /**< [ 3: 3](R/W) Host system error enable. */
+ uint32_t reserved_4_6 : 3;
+ uint32_t lhcrst : 1; /**< [ 7: 7](R/W1S/H) Light host controller reset. */
+ uint32_t css : 1; /**< [ 8: 8](WO) Controller save state. */
+ uint32_t crs : 1; /**< [ 9: 9](WO) Controller restore state. */
+ uint32_t ewe : 1; /**< [ 10: 10](R/W) Enable wrap event. */
+ uint32_t eu3s : 1; /**< [ 11: 11](R/W) Enable U3 MFINDEX stop. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_usbcmd_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_usbcmd bdk_usbdrdx_uahc_usbcmd_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_USBCMD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_USBCMD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000020ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000020ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000020ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_USBCMD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_USBCMD(a) bdk_usbdrdx_uahc_usbcmd_t
+#define bustype_BDK_USBDRDX_UAHC_USBCMD(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_USBCMD(a) "USBDRDX_UAHC_USBCMD"
+#define device_bar_BDK_USBDRDX_UAHC_USBCMD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_USBCMD(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_USBCMD(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_usblegctlsts
+ *
+ * USB XHCI Legacy Support Control/Status Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.1.2. Note
+ * that the SMI interrupts are not connected to anything in a CNXXXX configuration.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_usblegctlsts
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_usblegctlsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t smi_on_bar : 1; /**< [ 31: 31](R/W1C/H) System management interrupt on BAR. Never generated. */
+ uint32_t smi_on_pci_command : 1; /**< [ 30: 30](R/W1C/H) System management interrupt on PCI command. Never generated. */
+ uint32_t smi_on_os_ownership : 1; /**< [ 29: 29](R/W1C/H) System management interrupt on OS ownership change. This bit is set to 1 whenever
+ USBDRD()_UAHC_USBLEGSUP[HC_OS_OWNED_SEMAPHORES] transitions. */
+ uint32_t reserved_21_28 : 8;
+ uint32_t smi_on_hostsystemerr : 1; /**< [ 20: 20](RO/H) System-management interrupt on host-system error. Shadow bit of USBDRD()_UAHC_USBSTS[HSE].
+ Refer to
+ xHCI Section 5.4.2 for definition and effects of the events associated with this bit being
+ set to 1.
+
+ To clear this bit to a 0, system software must write a 1 to USBDRD()_UAHC_USBSTS[HSE]. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t smi_on_event_interrupt : 1; /**< [ 16: 16](RO/H) System-management interrupt on event interrupt. Shadow bit of USBDRD()_UAHC_USBSTS[EINT].
+ Refer to
+ xHCI Section 5.4.2 for definition. This bit automatically clears when [EINT] clears and
+ sets when [EINT] sets. */
+ uint32_t smi_on_bar_en : 1; /**< [ 15: 15](R/W) System-management interrupt on BAR enable. */
+ uint32_t smi_on_pci_command_en : 1; /**< [ 14: 14](R/W) System-management interrupt on PCI command enable. */
+ uint32_t smi_on_os_ownership_en : 1; /**< [ 13: 13](R/W) System-management interrupt on OS ownership enable. */
+ uint32_t reserved_5_12 : 8;
+ uint32_t smi_on_hostsystemerr_en : 1;/**< [ 4: 4](R/W) System-management interrupt on host-system error enable */
+ uint32_t reserved_1_3 : 3;
+ uint32_t usb_smi_en : 1; /**< [ 0: 0](R/W) USB system-management interrupt enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t usb_smi_en : 1; /**< [ 0: 0](R/W) USB system-management interrupt enable. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t smi_on_hostsystemerr_en : 1;/**< [ 4: 4](R/W) System-management interrupt on host-system error enable */
+ uint32_t reserved_5_12 : 8;
+ uint32_t smi_on_os_ownership_en : 1; /**< [ 13: 13](R/W) System-management interrupt on OS ownership enable. */
+ uint32_t smi_on_pci_command_en : 1; /**< [ 14: 14](R/W) System-management interrupt on PCI command enable. */
+ uint32_t smi_on_bar_en : 1; /**< [ 15: 15](R/W) System-management interrupt on BAR enable. */
+ uint32_t smi_on_event_interrupt : 1; /**< [ 16: 16](RO/H) System-management interrupt on event interrupt. Shadow bit of USBDRD()_UAHC_USBSTS[EINT].
+ Refer to
+ xHCI Section 5.4.2 for definition. This bit automatically clears when [EINT] clears and
+ sets when [EINT] sets. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t smi_on_hostsystemerr : 1; /**< [ 20: 20](RO/H) System-management interrupt on host-system error. Shadow bit of USBDRD()_UAHC_USBSTS[HSE].
+ Refer to
+ xHCI Section 5.4.2 for definition and effects of the events associated with this bit being
+ set to 1.
+
+ To clear this bit to a 0, system software must write a 1 to USBDRD()_UAHC_USBSTS[HSE]. */
+ uint32_t reserved_21_28 : 8;
+ uint32_t smi_on_os_ownership : 1; /**< [ 29: 29](R/W1C/H) System management interrupt on OS ownership change. This bit is set to 1 whenever
+ USBDRD()_UAHC_USBLEGSUP[HC_OS_OWNED_SEMAPHORES] transitions. */
+ uint32_t smi_on_pci_command : 1; /**< [ 30: 30](R/W1C/H) System management interrupt on PCI command. Never generated. */
+ uint32_t smi_on_bar : 1; /**< [ 31: 31](R/W1C/H) System management interrupt on BAR. Never generated. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_usblegctlsts_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_usblegctlsts bdk_usbdrdx_uahc_usblegctlsts_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_USBLEGCTLSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_USBLEGCTLSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000884ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000884ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000884ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_USBLEGCTLSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_USBLEGCTLSTS(a) bdk_usbdrdx_uahc_usblegctlsts_t
+#define bustype_BDK_USBDRDX_UAHC_USBLEGCTLSTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_USBLEGCTLSTS(a) "USBDRDX_UAHC_USBLEGCTLSTS"
+#define device_bar_BDK_USBDRDX_UAHC_USBLEGCTLSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_USBLEGCTLSTS(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_USBLEGCTLSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_usblegsup
+ *
+ * USB XHCI Legacy Support Capability Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.1.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_usblegsup
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_usblegsup_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t hc_os_owned_semaphores : 1; /**< [ 24: 24](R/W) HC OS-owned semaphore. */
+ uint32_t reserved_17_23 : 7;
+ uint32_t hc_bios_owned_semaphores : 1;/**< [ 16: 16](R/W) HC BIOS-owned semaphore. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO) Next xHCI extended-capability pointer. */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = USB legacy support. */
+#else /* Word 0 - Little Endian */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = USB legacy support. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO) Next xHCI extended-capability pointer. */
+ uint32_t hc_bios_owned_semaphores : 1;/**< [ 16: 16](R/W) HC BIOS-owned semaphore. */
+ uint32_t reserved_17_23 : 7;
+ uint32_t hc_os_owned_semaphores : 1; /**< [ 24: 24](R/W) HC OS-owned semaphore. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_usblegsup_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_usblegsup bdk_usbdrdx_uahc_usblegsup_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_USBLEGSUP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_USBLEGSUP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000880ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000880ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000880ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_USBLEGSUP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_USBLEGSUP(a) bdk_usbdrdx_uahc_usblegsup_t
+#define bustype_BDK_USBDRDX_UAHC_USBLEGSUP(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_USBLEGSUP(a) "USBDRDX_UAHC_USBLEGSUP"
+#define device_bar_BDK_USBDRDX_UAHC_USBLEGSUP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_USBLEGSUP(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_USBLEGSUP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbdrd#_uahc_usbsts
+ *
+ * USB XHCI Status Register
+ * This register indicates pending interrupts and various states of the host controller.
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.2.
+ *
+ * This register can be reset by NCB reset,
+ * or USBDRD()_UCTL_CTL[UAHC_RST],
+ * or USBDRD()_UAHC_GCTL[CORESOFTRESET],
+ * or USBDRD()_UAHC_USBCMD[HCRST], or USBDRD()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbdrdx_uahc_usbsts
+{
+ uint32_t u;
+ struct bdk_usbdrdx_uahc_usbsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_13_31 : 19;
+ uint32_t hce : 1; /**< [ 12: 12](RO/H) Host controller error. */
+ uint32_t cnr : 1; /**< [ 11: 11](RO/H) Controller not ready. */
+ uint32_t sre : 1; /**< [ 10: 10](R/W1C/H) Save/restore error. */
+ uint32_t rss : 1; /**< [ 9: 9](RO/H) Restore state status. */
+ uint32_t sss : 1; /**< [ 8: 8](RO/H) Save state status. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t pcd : 1; /**< [ 4: 4](R/W1C/H) Port change detect. */
+ uint32_t eint : 1; /**< [ 3: 3](R/W1C/H) Event interrupt. */
+ uint32_t hse : 1; /**< [ 2: 2](R/W1C/H) Host system error. The typical software response to an HSE is to reset the core. */
+ uint32_t reserved_1 : 1;
+ uint32_t hch : 1; /**< [ 0: 0](RO/H) HC halted. */
+#else /* Word 0 - Little Endian */
+ uint32_t hch : 1; /**< [ 0: 0](RO/H) HC halted. */
+ uint32_t reserved_1 : 1;
+ uint32_t hse : 1; /**< [ 2: 2](R/W1C/H) Host system error. The typical software response to an HSE is to reset the core. */
+ uint32_t eint : 1; /**< [ 3: 3](R/W1C/H) Event interrupt. */
+ uint32_t pcd : 1; /**< [ 4: 4](R/W1C/H) Port change detect. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t sss : 1; /**< [ 8: 8](RO/H) Save state status. */
+ uint32_t rss : 1; /**< [ 9: 9](RO/H) Restore state status. */
+ uint32_t sre : 1; /**< [ 10: 10](R/W1C/H) Save/restore error. */
+ uint32_t cnr : 1; /**< [ 11: 11](RO/H) Controller not ready. */
+ uint32_t hce : 1; /**< [ 12: 12](RO/H) Host controller error. */
+ uint32_t reserved_13_31 : 19;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uahc_usbsts_s cn; */
+};
+typedef union bdk_usbdrdx_uahc_usbsts bdk_usbdrdx_uahc_usbsts_t;
+
+static inline uint64_t BDK_USBDRDX_UAHC_USBSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UAHC_USBSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000000024ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000000024ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000000024ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UAHC_USBSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UAHC_USBSTS(a) bdk_usbdrdx_uahc_usbsts_t
+#define bustype_BDK_USBDRDX_UAHC_USBSTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBDRDX_UAHC_USBSTS(a) "USBDRDX_UAHC_USBSTS"
+#define device_bar_BDK_USBDRDX_UAHC_USBSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UAHC_USBSTS(a) (a)
+#define arguments_BDK_USBDRDX_UAHC_USBSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_bist_status
+ *
+ * USB UCTL BIST Status Register
+ * This register indicates the results from the built-in self-test (BIST) runs of USBDRD
+ * memories.
+ * A 0 indicates pass or never run, a 1 indicates fail. This register can be reset by NCB reset.
+ */
+union bdk_usbdrdx_uctl_bist_status
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_42_63 : 22;
+ uint64_t uctl_xm_r_bist_ndone : 1; /**< [ 41: 41](RO/H) BIST is not complete for the UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_w_bist_ndone : 1; /**< [ 40: 40](RO/H) BIST is not complete for the UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_35_39 : 5;
+ uint64_t uahc_ram2_bist_ndone : 1; /**< [ 34: 34](RO/H) BIST is not complete for the UAHC RxFIFO RAM (RAM2). */
+ uint64_t uahc_ram1_bist_ndone : 1; /**< [ 33: 33](RO/H) BIST is not complete for the UAHC TxFIFO RAM (RAM1). */
+ uint64_t uahc_ram0_bist_ndone : 1; /**< [ 32: 32](RO/H) BIST is not complete for the UAHC descriptor/register cache (RAM0). */
+ uint64_t reserved_10_31 : 22;
+ uint64_t uctl_xm_r_bist_status : 1; /**< [ 9: 9](RO/H) BIST status of the UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_w_bist_status : 1; /**< [ 8: 8](RO/H) BIST status of the UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t uahc_ram2_bist_status : 1; /**< [ 2: 2](RO/H) BIST status of the UAHC RxFIFO RAM (RAM2). */
+ uint64_t uahc_ram1_bist_status : 1; /**< [ 1: 1](RO/H) BIST status of the UAHC TxFIFO RAM (RAM1). */
+ uint64_t uahc_ram0_bist_status : 1; /**< [ 0: 0](RO/H) BIST status of the UAHC descriptor/register cache (RAM0). */
+#else /* Word 0 - Little Endian */
+ uint64_t uahc_ram0_bist_status : 1; /**< [ 0: 0](RO/H) BIST status of the UAHC descriptor/register cache (RAM0). */
+ uint64_t uahc_ram1_bist_status : 1; /**< [ 1: 1](RO/H) BIST status of the UAHC TxFIFO RAM (RAM1). */
+ uint64_t uahc_ram2_bist_status : 1; /**< [ 2: 2](RO/H) BIST status of the UAHC RxFIFO RAM (RAM2). */
+ uint64_t reserved_3_7 : 5;
+ uint64_t uctl_xm_w_bist_status : 1; /**< [ 8: 8](RO/H) BIST status of the UCTL AxiMaster write-data FIFO. */
+ uint64_t uctl_xm_r_bist_status : 1; /**< [ 9: 9](RO/H) BIST status of the UCTL AxiMaster read-data FIFO. */
+ uint64_t reserved_10_31 : 22;
+ uint64_t uahc_ram0_bist_ndone : 1; /**< [ 32: 32](RO/H) BIST is not complete for the UAHC descriptor/register cache (RAM0). */
+ uint64_t uahc_ram1_bist_ndone : 1; /**< [ 33: 33](RO/H) BIST is not complete for the UAHC TxFIFO RAM (RAM1). */
+ uint64_t uahc_ram2_bist_ndone : 1; /**< [ 34: 34](RO/H) BIST is not complete for the UAHC RxFIFO RAM (RAM2). */
+ uint64_t reserved_35_39 : 5;
+ uint64_t uctl_xm_w_bist_ndone : 1; /**< [ 40: 40](RO/H) BIST is not complete for the UCTL AxiMaster write-data FIFO. */
+ uint64_t uctl_xm_r_bist_ndone : 1; /**< [ 41: 41](RO/H) BIST is not complete for the UCTL AxiMaster read-data FIFO. */
+ uint64_t reserved_42_63 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_bist_status_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_bist_status bdk_usbdrdx_uctl_bist_status_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000100008ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000100008ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_BIST_STATUS(a) bdk_usbdrdx_uctl_bist_status_t
+#define bustype_BDK_USBDRDX_UCTL_BIST_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_BIST_STATUS(a) "USBDRDX_UCTL_BIST_STATUS"
+#define device_bar_BDK_USBDRDX_UCTL_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_BIST_STATUS(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_csclk_active_pc
+ *
+ * USB UCTL Conditional Sclk Clock Counter Register
+ * This register counts conditional clocks, for power analysis.
+ * Reset by NCB reset.
+ */
+union bdk_usbdrdx_uctl_csclk_active_pc
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_csclk_active_pc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Counts conditional clock active cycles since reset. */
+#else /* Word 0 - Little Endian */
+ uint64_t count : 64; /**< [ 63: 0](R/W/H) Counts conditional clock active cycles since reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_csclk_active_pc_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_csclk_active_pc bdk_usbdrdx_uctl_csclk_active_pc_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_CSCLK_ACTIVE_PC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_CSCLK_ACTIVE_PC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100028ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_CSCLK_ACTIVE_PC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_CSCLK_ACTIVE_PC(a) bdk_usbdrdx_uctl_csclk_active_pc_t
+#define bustype_BDK_USBDRDX_UCTL_CSCLK_ACTIVE_PC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_CSCLK_ACTIVE_PC(a) "USBDRDX_UCTL_CSCLK_ACTIVE_PC"
+#define device_bar_BDK_USBDRDX_UCTL_CSCLK_ACTIVE_PC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_CSCLK_ACTIVE_PC(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_CSCLK_ACTIVE_PC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_ctl
+ *
+ * USB UCTL Control Register
+ * This register controls clocks, resets, power, and BIST.
+ *
+ * This register can be reset by NCB reset.
+ */
+union bdk_usbdrdx_uctl_ctl
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. A BIST run with this bit set clears all entries in USBDRD
+ RAMs
+ to 0x0.
+
+ There are two major modes of BIST: full and clear. Full BIST is run by the BIST state
+ machine when [CLEAR_BIST] is deasserted during BIST. Clear BIST is run if [CLEAR_BIST] is
+ asserted during BIST.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts the
+ [CLEAR_BIST] setting into the correct state and then perform another CSR write operation
+ to
+ set the BIST trigger (keeping the [CLEAR_BIST] state constant).
+ CLEAR BIST completion is indicated by USBDRD()_UCTL_BIST_STATUS. A BIST clear operation
+ takes almost 2,000 controller-clock cycles for the largest RAM. */
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Rising edge starts BIST on the memories in USBDRD.
+ To run BIST, the controller clock must be both configured and enabled, and should be
+ configured to the maximum available frequency given the available coprocessor clock and
+ dividers.
+ Also, the UCTL, UAHC, and UPHY should be held in software- initiated reset (using
+ [UPHY_RST], [UAHC_RST], [UCTL_RST]) until BIST is complete.
+ BIST defect status can be checked after FULL BIST completion, both of which are indicated
+ in USBDRD()_UCTL_BIST_STATUS. The full BIST run takes almost 80,000 controller-clock
+ cycles
+ for
+ the largest RAM. */
+ uint64_t reserved_60_61 : 2;
+ uint64_t ssc_en : 1; /**< [ 59: 59](R/W) Spread-spectrum clock enable. Enables spread-spectrum clock production in the SuperSpeed
+ function. If the input reference clock for the SuperSpeed PLL is already spread-spectrum,
+ then do not enable this feature. The clocks sourced to the SuperSpeed function must have
+ spread-spectrum to be compliant with the USB specification.
+
+ The high-speed PLL cannot support a spread-spectrum input, so [REF_CLK_SEL] =
+ 0x0, 0x1, or 0x2 must enable this feature.
+
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t ssc_range : 3; /**< [ 58: 56](R/W) Spread-spectrum clock range. Selects the range of spread-spectrum modulation when SSC_EN
+ is asserted and the PHY is spreading the SuperSpeed transmit clocks.
+ Applies a fixed offset to the phase accumulator.
+ 0x0 = -4980 ppm downspread of clock.
+ 0x1 = -4492 ppm.
+ 0x2 = -4003 ppm.
+ 0x3-0x7 = reserved.
+
+ All of these settings are within the USB 3.0 specification. The amount of EMI emission
+ reduction might decrease as the [SSC_RANGE] increases; therefore, the [SSC_RANGE] settings
+ can
+ be registered to enable the amount of spreading to be adjusted on a per-application basis.
+ This value can be changed only during UPHY_RST. */
+ uint64_t ssc_ref_clk_sel : 9; /**< [ 55: 47](R/W) Enables non-standard oscillator frequencies to generate targeted MPLL output rates. Input
+ corresponds to the frequency-synthesis coefficient.
+
+ [55:53]: modulus - 1,
+ [52:47]: 2's complement push amount.
+
+ A value of 0x0 means this feature is disabled.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6, then the legal values are:
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ MPLL_MULTIPLIER description).
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then [MPLL_MULTPLIER], [REF_CLK_DIV2], and
+ [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ * 0x108: if DLMC_REF_CLK* is 19.2MHz, 24MHz, 26MHz, 38.4MHz, 48MHz,
+ 52MHz, 76.8MHz, 96MHz, 104MHz.
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t mpll_multiplier : 7; /**< [ 46: 40](R/W) Multiplies the reference clock to a frequency suitable for intended operating speed.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then the legal values are:
+
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then [MPLL_MULTPLIER], [REF_CLK_DIV2],
+ and [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x06, then:
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x64 = 25 MHz on DLMC_REF_CLK*.
+ 0x60 = 26 MHz on DLMC_REF_CLK*.
+ 0x41 = 38.4MHz on DLMC_REF_CLK*.
+ 0x7D = 40 MHz on DLMC_REF_CLK*.
+ 0x34 = 48 MHz on DLMC_REF_CLK*.
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x30 = 52 MHz on DLMC_REF_CLK*.
+ 0x41 = 76.8MHz on DLMC_REF_CLK*.
+ 0x1A = 96 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x30 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x1.
+ 0x18 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x0.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+ 0x19 = 200 MHz on DLMC_REF_CLK*. */
+ uint64_t ref_ssp_en : 1; /**< [ 39: 39](R/W) Enables reference clock to the prescaler for SuperSpeed function. This should always be
+ enabled since this output clock is used to drive the UAHC suspend-mode clock during
+ low-power states.
+
+ This value can be changed only during UPHY_RST or during low-power states.
+ The reference clock must be running and stable before [UPHY_RST] is deasserted and before
+ [REF_SSP_EN] is asserted. */
+ uint64_t ref_clk_div2 : 1; /**< [ 38: 38](R/W) Divides the reference clock by two before feeding it into the REF_CLK_FSEL divider.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0X2 then the legal values are:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6 then the legal values are:
+
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ MPLL_MULTIPLIER description).
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x2 or 0x3, then [MPLL_MULTPLIER], [REF_CLK_DIV2], and
+ [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+ 0x1: if DLMC_REF_CLK* is 40MHz, 76.8MHz, or 200MHz.
+ 0x0, 0x1 if DLMC_REF_CLK* is 104MHz (depending on [MPLL_MULTIPLIER]).
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ref_clk_fsel : 6; /**< [ 37: 32](R/W) Selects the reference clock frequency for the SuperSpeed and high-speed PLL blocks.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6, then the legal values are:
+
+ 0x07 is the only legal value.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ When [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, the [MPLL_MULTIPLIER], [REF_CLK_DIV2],
+ and [SSC_REF_CLK_SEL] settings are used to configure the SuperSpeed reference
+ clock multiplier.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+ 0x2A = 24 MHz on DLMC_REF_CLK*.
+ 0x31 = 20 MHz on DLMC_REF_CLK*.
+ 0x38 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6 then:
+ 0x07 is the only legal value. */
+ uint64_t cmd_flr_en : 1; /**< [ 31: 31](R/W) The host controller will stop accepting commands if this bit is set. This bit is
+ for host_mode only.
+
+ In normal FLR, this bit should be set to 0. If software wants the command to
+ finish before FLR, write this bit to 1 and poll USBDRD()_UAHC_USBSTS[HCH] to
+ make sure the command is finished before disabling USBDRD's PCCPF_XXX_CMD[ME]. */
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) Controller-clock enable. When set to 1, the controller clock is generated. This also
+ enables access to UCTL registers 0x30-0xF8. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the controller-clock divider.
+ 0 = Use the divided coprocessor clock from the H_CLKDIV divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the controller clock.
+ You must still set H_CLKDIV_EN separately. [H_CLK_BYP_SEL] select should not be changed
+ unless H_CLKDIV_EN is disabled.
+
+ The bypass clock can be selected and running even if the controller-clock dividers are not
+ running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) Controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) Controller clock-frequency-divider select. The controller-clock frequency is the
+ coprocessor-clock frequency divided by [H_CLKDIV_SEL] and must be at or below 300 MHz.
+ The divider values are the following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 4.
+ 0x3 = divide by 6.
+ 0x4 = divide by 8.
+ 0x5 = divide by 16.
+ 0x6 = divide by 24.
+ 0x7 = divide by 32.
+
+ The hclk frequency must be at or below 300MHz.
+ The hclk frequency must be at or above 150MHz for full-rate USB3
+ operation.
+ The hclk frequency must be at or above 125MHz for any USB3
+ functionality.
+
+ If DRD_MODE = DEVICE, the hclk frequency must be at or above 125MHz for
+ correct USB2 functionality.
+
+ If DRD_MODE = HOST, the hclk frequency must be at or above 90MHz
+ for full-rate USB2 operation.
+
+ If DRD_MODE = HOST, the hclk frequency must be at or above 62.5MHz
+ for any USB2 operation.
+
+ This field can be changed only when [H_CLKDIV_RST] = 1.
+
+ Internal:
+ 150MHz is from the maximum of:
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 1, col 12.
+ Synopsys DWC_usb3 Databook v2.80a, table A-17, row 7, col 9.
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 7, col 9.
+ DEVICE\>125MHz is from Synopsys DWC_usb3 Databook v2.80a, section A.12.4.
+ HOST2\>62.5MHz in HOST mode is from Synopsys DWC_usb3 Databook v2.80a,
+ section A.12.5, 3rd bullet in Note on page 894.
+ HOST2\>90MHz was arrived at from some math: 62.5MHz +
+ (diff between row 1 and 2, col 12 of table A-16). */
+ uint64_t reserved_22_23 : 2;
+ uint64_t usb3_port_perm_attach : 1; /**< [ 21: 21](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t usb2_port_perm_attach : 1; /**< [ 20: 20](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t reserved_19 : 1;
+ uint64_t usb3_port_disable : 1; /**< [ 18: 18](R/W) Disables the USB3 (SuperSpeed) portion of this PHY. When set to 1, this signal stops
+ reporting connect/disconnect events on the port and keeps the port in disabled state. This
+ could be used for security reasons where hardware can disable a port regardless of whether
+ xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should be modified only when [UPHY_RST] is asserted. */
+ uint64_t reserved_17 : 1;
+ uint64_t usb2_port_disable : 1; /**< [ 16: 16](R/W) Disables USB2 (high-speed/full-speed/low-speed) portion of this PHY. When set to 1, this
+ signal stops reporting connect/disconnect events on the port and keeps the port in
+ disabled state. This could be used for security reasons where hardware can disable a port
+ regardless of whether xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted.
+ If Port0 is required to be disabled, ensure that the utmi_clk[0] is running at the normal
+ speed. Also, all the enabled USB2.0 ports should have the same clock frequency as Port0. */
+ uint64_t reserved_15 : 1;
+ uint64_t ss_power_en : 1; /**< [ 14: 14](R/W) PHY SuperSpeed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_13 : 1;
+ uint64_t hs_power_en : 1; /**< [ 12: 12](R/W) PHY high-speed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t ref_clk_sel : 3; /**< [ 11: 9](R/W) Reference clock select. Choose reference-clock source for the SuperSpeed and high-speed
+ PLL blocks. Both HighSpeed and SuperSpeed reference clocks must be supplied for USB
+ operation.
+
+ \<pre\>
+ Source for Source for
+ [REF_CLK_SEL] SuperSpeed PLL HighSpeed PLL
+ ------------- -------------- ------------------------
+ 0x0 DLMC_REF_CLK0 DLMC_REF_CLK0
+ 0x1 DLMC_REF_CLK1 DLMC_REF_CLK1
+ 0x2 PAD_REF_CLK PAD_REF_CLK
+ 0x3 Reserved.
+ 0x4 DLMC_REF_CLK0 PLL_REF_CLK
+ 0x5 DLMC_REF_CLK1 PLL_REF_CLK
+ 0x6 PAD_REF_CLK PLL_REF_CLK
+ 0x7 Reserved.
+ \</pre\>
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x0, 0x1, or 0x2 then the reference clock input cannot be
+ spread-spectrum.
+
+ Internal:
+ For the 0x6 selection, reference clock source for SuperSpeed PLL is from the USB
+ pads, reference clock source for high-speed PLL is PLL_REF_CLK. But in CNXXXX,
+ PLL_REF_CLK cannot be routed to USB without violating jitter requirements */
+ uint64_t reserved_6_8 : 3;
+ uint64_t dma_psn_ign : 1; /**< [ 5: 5](R/W) Handling of poison indication on DMA read responses.
+ 0 = Treat poison data the same way as fault, sending an AXI error to the USB
+ controller.
+ 1 = Ignore poison and proceed with the transaction as if no problems. */
+ uint64_t reserved_4 : 1;
+ uint64_t drd_mode : 1; /**< [ 3: 3](R/W) Switches between host or device mode for USBDRD.
+ 0 = Host.
+ 1 = Device. */
+ uint64_t uphy_rst : 1; /**< [ 2: 2](R/W) PHY reset; resets UPHY; active-high. */
+ uint64_t uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UAHC DMA and register shims. Resets UCTL registers 0x30-0xF8.
+ Does not reset UCTL registers 0x0-0x28.
+ UCTL registers starting from 0x30 can be accessed only after the controller clock is
+ active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and CIB protocols. */
+#else /* Word 0 - Little Endian */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UAHC DMA and register shims. Resets UCTL registers 0x30-0xF8.
+ Does not reset UCTL registers 0x0-0x28.
+ UCTL registers starting from 0x30 can be accessed only after the controller clock is
+ active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and CIB protocols. */
+ uint64_t uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t uphy_rst : 1; /**< [ 2: 2](R/W) PHY reset; resets UPHY; active-high. */
+ uint64_t drd_mode : 1; /**< [ 3: 3](R/W) Switches between host or device mode for USBDRD.
+ 0 = Host.
+ 1 = Device. */
+ uint64_t reserved_4 : 1;
+ uint64_t dma_psn_ign : 1; /**< [ 5: 5](R/W) Handling of poison indication on DMA read responses.
+ 0 = Treat poison data the same way as fault, sending an AXI error to the USB
+ controller.
+ 1 = Ignore poison and proceed with the transaction as if no problems. */
+ uint64_t reserved_6_8 : 3;
+ uint64_t ref_clk_sel : 3; /**< [ 11: 9](R/W) Reference clock select. Choose reference-clock source for the SuperSpeed and high-speed
+ PLL blocks. Both HighSpeed and SuperSpeed reference clocks must be supplied for USB
+ operation.
+
+ \<pre\>
+ Source for Source for
+ [REF_CLK_SEL] SuperSpeed PLL HighSpeed PLL
+ ------------- -------------- ------------------------
+ 0x0 DLMC_REF_CLK0 DLMC_REF_CLK0
+ 0x1 DLMC_REF_CLK1 DLMC_REF_CLK1
+ 0x2 PAD_REF_CLK PAD_REF_CLK
+ 0x3 Reserved.
+ 0x4 DLMC_REF_CLK0 PLL_REF_CLK
+ 0x5 DLMC_REF_CLK1 PLL_REF_CLK
+ 0x6 PAD_REF_CLK PLL_REF_CLK
+ 0x7 Reserved.
+ \</pre\>
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x0, 0x1, or 0x2 then the reference clock input cannot be
+ spread-spectrum.
+
+ Internal:
+ For the 0x6 selection, reference clock source for SuperSpeed PLL is from the USB
+ pads, reference clock source for high-speed PLL is PLL_REF_CLK. But in CNXXXX,
+ PLL_REF_CLK cannot be routed to USB without violating jitter requirements */
+ uint64_t hs_power_en : 1; /**< [ 12: 12](R/W) PHY high-speed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_13 : 1;
+ uint64_t ss_power_en : 1; /**< [ 14: 14](R/W) PHY SuperSpeed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_15 : 1;
+ uint64_t usb2_port_disable : 1; /**< [ 16: 16](R/W) Disables USB2 (high-speed/full-speed/low-speed) portion of this PHY. When set to 1, this
+ signal stops reporting connect/disconnect events on the port and keeps the port in
+ disabled state. This could be used for security reasons where hardware can disable a port
+ regardless of whether xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted.
+ If Port0 is required to be disabled, ensure that the utmi_clk[0] is running at the normal
+ speed. Also, all the enabled USB2.0 ports should have the same clock frequency as Port0. */
+ uint64_t reserved_17 : 1;
+ uint64_t usb3_port_disable : 1; /**< [ 18: 18](R/W) Disables the USB3 (SuperSpeed) portion of this PHY. When set to 1, this signal stops
+ reporting connect/disconnect events on the port and keeps the port in disabled state. This
+ could be used for security reasons where hardware can disable a port regardless of whether
+ xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should be modified only when [UPHY_RST] is asserted. */
+ uint64_t reserved_19 : 1;
+ uint64_t usb2_port_perm_attach : 1; /**< [ 20: 20](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t usb3_port_perm_attach : 1; /**< [ 21: 21](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) Controller clock-frequency-divider select. The controller-clock frequency is the
+ coprocessor-clock frequency divided by [H_CLKDIV_SEL] and must be at or below 300 MHz.
+ The divider values are the following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 4.
+ 0x3 = divide by 6.
+ 0x4 = divide by 8.
+ 0x5 = divide by 16.
+ 0x6 = divide by 24.
+ 0x7 = divide by 32.
+
+ The hclk frequency must be at or below 300MHz.
+ The hclk frequency must be at or above 150MHz for full-rate USB3
+ operation.
+ The hclk frequency must be at or above 125MHz for any USB3
+ functionality.
+
+ If DRD_MODE = DEVICE, the hclk frequency must be at or above 125MHz for
+ correct USB2 functionality.
+
+ If DRD_MODE = HOST, the hclk frequency must be at or above 90MHz
+ for full-rate USB2 operation.
+
+ If DRD_MODE = HOST, the hclk frequency must be at or above 62.5MHz
+ for any USB2 operation.
+
+ This field can be changed only when [H_CLKDIV_RST] = 1.
+
+ Internal:
+ 150MHz is from the maximum of:
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 1, col 12.
+ Synopsys DWC_usb3 Databook v2.80a, table A-17, row 7, col 9.
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 7, col 9.
+ DEVICE\>125MHz is from Synopsys DWC_usb3 Databook v2.80a, section A.12.4.
+ HOST2\>62.5MHz in HOST mode is from Synopsys DWC_usb3 Databook v2.80a,
+ section A.12.5, 3rd bullet in Note on page 894.
+ HOST2\>90MHz was arrived at from some math: 62.5MHz +
+ (diff between row 1 and 2, col 12 of table A-16). */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) Controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the controller-clock divider.
+ 0 = Use the divided coprocessor clock from the H_CLKDIV divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the controller clock.
+ You must still set H_CLKDIV_EN separately. [H_CLK_BYP_SEL] select should not be changed
+ unless H_CLKDIV_EN is disabled.
+
+ The bypass clock can be selected and running even if the controller-clock dividers are not
+ running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) Controller-clock enable. When set to 1, the controller clock is generated. This also
+ enables access to UCTL registers 0x30-0xF8. */
+ uint64_t cmd_flr_en : 1; /**< [ 31: 31](R/W) The host controller will stop accepting commands if this bit is set. This bit is
+ for host_mode only.
+
+ In normal FLR, this bit should be set to 0. If software wants the command to
+ finish before FLR, write this bit to 1 and poll USBDRD()_UAHC_USBSTS[HCH] to
+ make sure the command is finished before disabling USBDRD's PCCPF_XXX_CMD[ME]. */
+ uint64_t ref_clk_fsel : 6; /**< [ 37: 32](R/W) Selects the reference clock frequency for the SuperSpeed and high-speed PLL blocks.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6, then the legal values are:
+
+ 0x07 is the only legal value.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ When [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, the [MPLL_MULTIPLIER], [REF_CLK_DIV2],
+ and [SSC_REF_CLK_SEL] settings are used to configure the SuperSpeed reference
+ clock multiplier.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+ 0x2A = 24 MHz on DLMC_REF_CLK*.
+ 0x31 = 20 MHz on DLMC_REF_CLK*.
+ 0x38 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6 then:
+ 0x07 is the only legal value. */
+ uint64_t ref_clk_div2 : 1; /**< [ 38: 38](R/W) Divides the reference clock by two before feeding it into the REF_CLK_FSEL divider.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0X2 then the legal values are:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6 then the legal values are:
+
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ MPLL_MULTIPLIER description).
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x2 or 0x3, then [MPLL_MULTPLIER], [REF_CLK_DIV2], and
+ [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+ 0x1: if DLMC_REF_CLK* is 40MHz, 76.8MHz, or 200MHz.
+ 0x0, 0x1 if DLMC_REF_CLK* is 104MHz (depending on [MPLL_MULTIPLIER]).
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ref_ssp_en : 1; /**< [ 39: 39](R/W) Enables reference clock to the prescaler for SuperSpeed function. This should always be
+ enabled since this output clock is used to drive the UAHC suspend-mode clock during
+ low-power states.
+
+ This value can be changed only during UPHY_RST or during low-power states.
+ The reference clock must be running and stable before [UPHY_RST] is deasserted and before
+ [REF_SSP_EN] is asserted. */
+ uint64_t mpll_multiplier : 7; /**< [ 46: 40](R/W) Multiplies the reference clock to a frequency suitable for intended operating speed.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then the legal values are:
+
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then [MPLL_MULTPLIER], [REF_CLK_DIV2],
+ and [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x06, then:
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x64 = 25 MHz on DLMC_REF_CLK*.
+ 0x60 = 26 MHz on DLMC_REF_CLK*.
+ 0x41 = 38.4MHz on DLMC_REF_CLK*.
+ 0x7D = 40 MHz on DLMC_REF_CLK*.
+ 0x34 = 48 MHz on DLMC_REF_CLK*.
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x30 = 52 MHz on DLMC_REF_CLK*.
+ 0x41 = 76.8MHz on DLMC_REF_CLK*.
+ 0x1A = 96 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x30 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x1.
+ 0x18 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x0.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+ 0x19 = 200 MHz on DLMC_REF_CLK*. */
+ uint64_t ssc_ref_clk_sel : 9; /**< [ 55: 47](R/W) Enables non-standard oscillator frequencies to generate targeted MPLL output rates. Input
+ corresponds to the frequency-synthesis coefficient.
+
+ [55:53]: modulus - 1,
+ [52:47]: 2's complement push amount.
+
+ A value of 0x0 means this feature is disabled.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6, then the legal values are:
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ MPLL_MULTIPLIER description).
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then [MPLL_MULTPLIER], [REF_CLK_DIV2], and
+ [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ * 0x108: if DLMC_REF_CLK* is 19.2MHz, 24MHz, 26MHz, 38.4MHz, 48MHz,
+ 52MHz, 76.8MHz, 96MHz, 104MHz.
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ssc_range : 3; /**< [ 58: 56](R/W) Spread-spectrum clock range. Selects the range of spread-spectrum modulation when SSC_EN
+ is asserted and the PHY is spreading the SuperSpeed transmit clocks.
+ Applies a fixed offset to the phase accumulator.
+ 0x0 = -4980 ppm downspread of clock.
+ 0x1 = -4492 ppm.
+ 0x2 = -4003 ppm.
+ 0x3-0x7 = reserved.
+
+ All of these settings are within the USB 3.0 specification. The amount of EMI emission
+ reduction might decrease as the [SSC_RANGE] increases; therefore, the [SSC_RANGE] settings
+ can
+ be registered to enable the amount of spreading to be adjusted on a per-application basis.
+ This value can be changed only during UPHY_RST. */
+ uint64_t ssc_en : 1; /**< [ 59: 59](R/W) Spread-spectrum clock enable. Enables spread-spectrum clock production in the SuperSpeed
+ function. If the input reference clock for the SuperSpeed PLL is already spread-spectrum,
+ then do not enable this feature. The clocks sourced to the SuperSpeed function must have
+ spread-spectrum to be compliant with the USB specification.
+
+ The high-speed PLL cannot support a spread-spectrum input, so [REF_CLK_SEL] =
+ 0x0, 0x1, or 0x2 must enable this feature.
+
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t reserved_60_61 : 2;
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Rising edge starts BIST on the memories in USBDRD.
+ To run BIST, the controller clock must be both configured and enabled, and should be
+ configured to the maximum available frequency given the available coprocessor clock and
+ dividers.
+ Also, the UCTL, UAHC, and UPHY should be held in software- initiated reset (using
+ [UPHY_RST], [UAHC_RST], [UCTL_RST]) until BIST is complete.
+ BIST defect status can be checked after FULL BIST completion, both of which are indicated
+ in USBDRD()_UCTL_BIST_STATUS. The full BIST run takes almost 80,000 controller-clock
+ cycles
+ for
+ the largest RAM. */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. A BIST run with this bit set clears all entries in USBDRD
+ RAMs
+ to 0x0.
+
+ There are two major modes of BIST: full and clear. Full BIST is run by the BIST state
+ machine when [CLEAR_BIST] is deasserted during BIST. Clear BIST is run if [CLEAR_BIST] is
+ asserted during BIST.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts the
+ [CLEAR_BIST] setting into the correct state and then perform another CSR write operation
+ to
+ set the BIST trigger (keeping the [CLEAR_BIST] state constant).
+ CLEAR BIST completion is indicated by USBDRD()_UCTL_BIST_STATUS. A BIST clear operation
+ takes almost 2,000 controller-clock cycles for the largest RAM. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uctl_ctl_cn81xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. A BIST run with this bit set clears all entries in USBDRD
+ RAMs
+ to 0x0.
+
+ There are two major modes of BIST: full and clear. Full BIST is run by the BIST state
+ machine when [CLEAR_BIST] is deasserted during BIST. Clear BIST is run if [CLEAR_BIST] is
+ asserted during BIST.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts the
+ [CLEAR_BIST] setting into the correct state and then perform another CSR write operation
+ to
+ set the BIST trigger (keeping the [CLEAR_BIST] state constant).
+ CLEAR BIST completion is indicated by USBDRD()_UCTL_BIST_STATUS. A BIST clear operation
+ takes almost 2,000 controller-clock cycles for the largest RAM. */
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Rising edge starts BIST on the memories in USBDRD.
+ To run BIST, the controller clock must be both configured and enabled, and should be
+ configured to the maximum available frequency given the available coprocessor clock and
+ dividers.
+ Also, the UCTL, UAHC, and UPHY should be held in software- initiated reset (using
+ [UPHY_RST], [UAHC_RST], [UCTL_RST]) until BIST is complete.
+ BIST defect status can be checked after FULL BIST completion, both of which are indicated
+ in USBDRD()_UCTL_BIST_STATUS. The full BIST run takes almost 80,000 controller-clock
+ cycles
+ for
+ the largest RAM. */
+ uint64_t reserved_60_61 : 2;
+ uint64_t ssc_en : 1; /**< [ 59: 59](R/W) Spread-spectrum clock enable. Enables spread-spectrum clock production in the SuperSpeed
+ function. If the input reference clock for the SuperSpeed PLL is already spread-spectrum,
+ then do not enable this feature. The clocks sourced to the SuperSpeed function must have
+ spread-spectrum to be compliant with the USB specification.
+
+ The high-speed PLL cannot support a spread-spectrum input, so [REF_CLK_SEL] =
+ 0x0, 0x1, or 0x2 must enable this feature.
+
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t ssc_range : 3; /**< [ 58: 56](R/W) Spread-spectrum clock range. Selects the range of spread-spectrum modulation when SSC_EN
+ is asserted and the PHY is spreading the SuperSpeed transmit clocks.
+ Applies a fixed offset to the phase accumulator.
+ 0x0 = -4980 ppm downspread of clock.
+ 0x1 = -4492 ppm.
+ 0x2 = -4003 ppm.
+ 0x3-0x7 = reserved.
+
+ All of these settings are within the USB 3.0 specification. The amount of EMI emission
+ reduction might decrease as the [SSC_RANGE] increases; therefore, the [SSC_RANGE] settings
+ can
+ be registered to enable the amount of spreading to be adjusted on a per-application basis.
+ This value can be changed only during UPHY_RST. */
+ uint64_t ssc_ref_clk_sel : 9; /**< [ 55: 47](R/W) Enables non-standard oscillator frequencies to generate targeted MPLL output rates. Input
+ corresponds to the frequency-synthesis coefficient.
+
+ [55:53]: modulus - 1,
+ [52:47]: 2's complement push amount.
+
+ A value of 0x0 means this feature is disabled.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6, then the legal values are:
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ MPLL_MULTIPLIER description).
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then [MPLL_MULTPLIER], [REF_CLK_DIV2], and
+ [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ * 0x108: if DLMC_REF_CLK* is 19.2MHz, 24MHz, 26MHz, 38.4MHz, 48MHz,
+ 52MHz, 76.8MHz, 96MHz, 104MHz.
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t mpll_multiplier : 7; /**< [ 46: 40](R/W) Multiplies the reference clock to a frequency suitable for intended operating speed.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then the legal values are:
+
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then [MPLL_MULTPLIER], [REF_CLK_DIV2],
+ and [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x06, then:
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x64 = 25 MHz on DLMC_REF_CLK*.
+ 0x60 = 26 MHz on DLMC_REF_CLK*.
+ 0x41 = 38.4MHz on DLMC_REF_CLK*.
+ 0x7D = 40 MHz on DLMC_REF_CLK*.
+ 0x34 = 48 MHz on DLMC_REF_CLK*.
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x30 = 52 MHz on DLMC_REF_CLK*.
+ 0x41 = 76.8MHz on DLMC_REF_CLK*.
+ 0x1A = 96 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x30 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x1.
+ 0x18 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x0.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+ 0x19 = 200 MHz on DLMC_REF_CLK*. */
+ uint64_t ref_ssp_en : 1; /**< [ 39: 39](R/W) Enables reference clock to the prescaler for SuperSpeed function. This should always be
+ enabled since this output clock is used to drive the UAHC suspend-mode clock during
+ low-power states.
+
+ This value can be changed only during UPHY_RST or during low-power states.
+ The reference clock must be running and stable before [UPHY_RST] is deasserted and before
+ [REF_SSP_EN] is asserted. */
+ uint64_t ref_clk_div2 : 1; /**< [ 38: 38](R/W) Divides the reference clock by two before feeding it into the REF_CLK_FSEL divider.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0X2 then the legal values are:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6 then the legal values are:
+
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ MPLL_MULTIPLIER description).
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x2 or 0x3, then [MPLL_MULTPLIER], [REF_CLK_DIV2], and
+ [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+ 0x1: if DLMC_REF_CLK* is 40MHz, 76.8MHz, or 200MHz.
+ 0x0, 0x1 if DLMC_REF_CLK* is 104MHz (depending on [MPLL_MULTIPLIER]).
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ref_clk_fsel : 6; /**< [ 37: 32](R/W) Selects the reference clock frequency for the SuperSpeed and high-speed PLL blocks.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6, then the legal values are:
+
+ 0x07 is the only legal value.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ When [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, the [MPLL_MULTIPLIER], [REF_CLK_DIV2],
+ and [SSC_REF_CLK_SEL] settings are used to configure the SuperSpeed reference
+ clock multiplier.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+ 0x2A = 24 MHz on DLMC_REF_CLK*.
+ 0x31 = 20 MHz on DLMC_REF_CLK*.
+ 0x38 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6 then:
+ 0x07 is the only legal value. */
+ uint64_t reserved_31 : 1;
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) Controller-clock enable. When set to 1, the controller clock is generated. This also
+ enables access to UCTL registers 0x30-0xF8. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the controller-clock divider.
+ 0 = Use the divided coprocessor clock from the H_CLKDIV divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the controller clock.
+ You must still set H_CLKDIV_EN separately. [H_CLK_BYP_SEL] select should not be changed
+ unless H_CLKDIV_EN is disabled.
+
+ The bypass clock can be selected and running even if the controller-clock dividers are not
+ running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) Controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) Controller clock-frequency-divider select. The controller-clock frequency is the
+ coprocessor-clock frequency divided by [H_CLKDIV_SEL] and must be at or below 300 MHz.
+ The divider values are the following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 4.
+ 0x3 = divide by 6.
+ 0x4 = divide by 8.
+ 0x5 = divide by 16.
+ 0x6 = divide by 24.
+ 0x7 = divide by 32.
+
+ The hclk frequency must be at or below 300MHz.
+ The hclk frequency must be at or above 150MHz for full-rate USB3
+ operation.
+ The hclk frequency must be at or above 125MHz for any USB3
+ functionality.
+
+ If DRD_MODE = DEVICE, the hclk frequency must be at or above 125MHz for
+ correct USB2 functionality.
+
+ If DRD_MODE = HOST, the hclk frequency must be at or above 90MHz
+ for full-rate USB2 operation.
+
+ If DRD_MODE = HOST, the hclk frequency must be at or above 62.5MHz
+ for any USB2 operation.
+
+ This field can be changed only when [H_CLKDIV_RST] = 1.
+
+ Internal:
+ 150MHz is from the maximum of:
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 1, col 12.
+ Synopsys DWC_usb3 Databook v2.80a, table A-17, row 7, col 9.
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 7, col 9.
+ DEVICE\>125MHz is from Synopsys DWC_usb3 Databook v2.80a, section A.12.4.
+ HOST2\>62.5MHz in HOST mode is from Synopsys DWC_usb3 Databook v2.80a,
+ section A.12.5, 3rd bullet in Note on page 894.
+ HOST2\>90MHz was arrived at from some math: 62.5MHz +
+ (diff between row 1 and 2, col 12 of table A-16). */
+ uint64_t reserved_22_23 : 2;
+ uint64_t usb3_port_perm_attach : 1; /**< [ 21: 21](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t usb2_port_perm_attach : 1; /**< [ 20: 20](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t reserved_19 : 1;
+ uint64_t usb3_port_disable : 1; /**< [ 18: 18](R/W) Disables the USB3 (SuperSpeed) portion of this PHY. When set to 1, this signal stops
+ reporting connect/disconnect events on the port and keeps the port in disabled state. This
+ could be used for security reasons where hardware can disable a port regardless of whether
+ xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should be modified only when [UPHY_RST] is asserted. */
+ uint64_t reserved_17 : 1;
+ uint64_t usb2_port_disable : 1; /**< [ 16: 16](R/W) Disables USB2 (high-speed/full-speed/low-speed) portion of this PHY. When set to 1, this
+ signal stops reporting connect/disconnect events on the port and keeps the port in
+ disabled state. This could be used for security reasons where hardware can disable a port
+ regardless of whether xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted.
+ If Port0 is required to be disabled, ensure that the utmi_clk[0] is running at the normal
+ speed. Also, all the enabled USB2.0 ports should have the same clock frequency as Port0. */
+ uint64_t reserved_15 : 1;
+ uint64_t ss_power_en : 1; /**< [ 14: 14](R/W) PHY SuperSpeed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_13 : 1;
+ uint64_t hs_power_en : 1; /**< [ 12: 12](R/W) PHY high-speed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t ref_clk_sel : 3; /**< [ 11: 9](R/W) Reference clock select. Choose reference-clock source for the SuperSpeed and high-speed
+ PLL blocks. Both HighSpeed and SuperSpeed reference clocks must be supplied for USB
+ operation.
+
+ \<pre\>
+ Source for Source for
+ [REF_CLK_SEL] SuperSpeed PLL HighSpeed PLL
+ ------------- -------------- ------------------------
+ 0x0 DLMC_REF_CLK0 DLMC_REF_CLK0
+ 0x1 DLMC_REF_CLK1 DLMC_REF_CLK1
+ 0x2 PAD_REF_CLK PAD_REF_CLK
+ 0x3 Reserved.
+ 0x4 DLMC_REF_CLK0 PLL_REF_CLK
+ 0x5 DLMC_REF_CLK1 PLL_REF_CLK
+ 0x6 PAD_REF_CLK PLL_REF_CLK
+ 0x7 Reserved.
+ \</pre\>
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x0, 0x1, or 0x2 then the reference clock input cannot be
+ spread-spectrum.
+
+ Internal:
+ For the 0x6 selection, reference clock source for SuperSpeed PLL is from the USB
+ pads, reference clock source for high-speed PLL is PLL_REF_CLK. But in CNXXXX,
+ PLL_REF_CLK cannot be routed to USB without violating jitter requirements */
+ uint64_t reserved_5_8 : 4;
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the USB UCTL interface clock (coprocessor clock). This enables access to UAHC
+ and UCTL registers starting from 0x30. */
+ uint64_t drd_mode : 1; /**< [ 3: 3](R/W) Switches between host or device mode for USBDRD.
+ 0 = Host.
+ 1 = Device. */
+ uint64_t uphy_rst : 1; /**< [ 2: 2](R/W) PHY reset; resets UPHY; active-high. */
+ uint64_t uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UAHC DMA and register shims. Resets UCTL registers 0x30-0xF8.
+ Does not reset UCTL registers 0x0-0x28.
+ UCTL registers starting from 0x30 can be accessed only after the controller clock is
+ active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and CIB protocols. */
+#else /* Word 0 - Little Endian */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UAHC DMA and register shims. Resets UCTL registers 0x30-0xF8.
+ Does not reset UCTL registers 0x0-0x28.
+ UCTL registers starting from 0x30 can be accessed only after the controller clock is
+ active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and CIB protocols. */
+ uint64_t uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t uphy_rst : 1; /**< [ 2: 2](R/W) PHY reset; resets UPHY; active-high. */
+ uint64_t drd_mode : 1; /**< [ 3: 3](R/W) Switches between host or device mode for USBDRD.
+ 0 = Host.
+ 1 = Device. */
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the USB UCTL interface clock (coprocessor clock). This enables access to UAHC
+ and UCTL registers starting from 0x30. */
+ uint64_t reserved_5_8 : 4;
+ uint64_t ref_clk_sel : 3; /**< [ 11: 9](R/W) Reference clock select. Choose reference-clock source for the SuperSpeed and high-speed
+ PLL blocks. Both HighSpeed and SuperSpeed reference clocks must be supplied for USB
+ operation.
+
+ \<pre\>
+ Source for Source for
+ [REF_CLK_SEL] SuperSpeed PLL HighSpeed PLL
+ ------------- -------------- ------------------------
+ 0x0 DLMC_REF_CLK0 DLMC_REF_CLK0
+ 0x1 DLMC_REF_CLK1 DLMC_REF_CLK1
+ 0x2 PAD_REF_CLK PAD_REF_CLK
+ 0x3 Reserved.
+ 0x4 DLMC_REF_CLK0 PLL_REF_CLK
+ 0x5 DLMC_REF_CLK1 PLL_REF_CLK
+ 0x6 PAD_REF_CLK PLL_REF_CLK
+ 0x7 Reserved.
+ \</pre\>
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x0, 0x1, or 0x2 then the reference clock input cannot be
+ spread-spectrum.
+
+ Internal:
+ For the 0x6 selection, reference clock source for SuperSpeed PLL is from the USB
+ pads, reference clock source for high-speed PLL is PLL_REF_CLK. But in CNXXXX,
+ PLL_REF_CLK cannot be routed to USB without violating jitter requirements */
+ uint64_t hs_power_en : 1; /**< [ 12: 12](R/W) PHY high-speed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_13 : 1;
+ uint64_t ss_power_en : 1; /**< [ 14: 14](R/W) PHY SuperSpeed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_15 : 1;
+ uint64_t usb2_port_disable : 1; /**< [ 16: 16](R/W) Disables USB2 (high-speed/full-speed/low-speed) portion of this PHY. When set to 1, this
+ signal stops reporting connect/disconnect events on the port and keeps the port in
+ disabled state. This could be used for security reasons where hardware can disable a port
+ regardless of whether xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted.
+ If Port0 is required to be disabled, ensure that the utmi_clk[0] is running at the normal
+ speed. Also, all the enabled USB2.0 ports should have the same clock frequency as Port0. */
+ uint64_t reserved_17 : 1;
+ uint64_t usb3_port_disable : 1; /**< [ 18: 18](R/W) Disables the USB3 (SuperSpeed) portion of this PHY. When set to 1, this signal stops
+ reporting connect/disconnect events on the port and keeps the port in disabled state. This
+ could be used for security reasons where hardware can disable a port regardless of whether
+ xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should be modified only when [UPHY_RST] is asserted. */
+ uint64_t reserved_19 : 1;
+ uint64_t usb2_port_perm_attach : 1; /**< [ 20: 20](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t usb3_port_perm_attach : 1; /**< [ 21: 21](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) Controller clock-frequency-divider select. The controller-clock frequency is the
+ coprocessor-clock frequency divided by [H_CLKDIV_SEL] and must be at or below 300 MHz.
+ The divider values are the following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 4.
+ 0x3 = divide by 6.
+ 0x4 = divide by 8.
+ 0x5 = divide by 16.
+ 0x6 = divide by 24.
+ 0x7 = divide by 32.
+
+ The hclk frequency must be at or below 300MHz.
+ The hclk frequency must be at or above 150MHz for full-rate USB3
+ operation.
+ The hclk frequency must be at or above 125MHz for any USB3
+ functionality.
+
+ If DRD_MODE = DEVICE, the hclk frequency must be at or above 125MHz for
+ correct USB2 functionality.
+
+ If DRD_MODE = HOST, the hclk frequency must be at or above 90MHz
+ for full-rate USB2 operation.
+
+ If DRD_MODE = HOST, the hclk frequency must be at or above 62.5MHz
+ for any USB2 operation.
+
+ This field can be changed only when [H_CLKDIV_RST] = 1.
+
+ Internal:
+ 150MHz is from the maximum of:
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 1, col 12.
+ Synopsys DWC_usb3 Databook v2.80a, table A-17, row 7, col 9.
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 7, col 9.
+ DEVICE\>125MHz is from Synopsys DWC_usb3 Databook v2.80a, section A.12.4.
+ HOST2\>62.5MHz in HOST mode is from Synopsys DWC_usb3 Databook v2.80a,
+ section A.12.5, 3rd bullet in Note on page 894.
+ HOST2\>90MHz was arrived at from some math: 62.5MHz +
+ (diff between row 1 and 2, col 12 of table A-16). */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) Controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the controller-clock divider.
+ 0 = Use the divided coprocessor clock from the H_CLKDIV divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the controller clock.
+ You must still set H_CLKDIV_EN separately. [H_CLK_BYP_SEL] select should not be changed
+ unless H_CLKDIV_EN is disabled.
+
+ The bypass clock can be selected and running even if the controller-clock dividers are not
+ running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) Controller-clock enable. When set to 1, the controller clock is generated. This also
+ enables access to UCTL registers 0x30-0xF8. */
+ uint64_t reserved_31 : 1;
+ uint64_t ref_clk_fsel : 6; /**< [ 37: 32](R/W) Selects the reference clock frequency for the SuperSpeed and high-speed PLL blocks.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6, then the legal values are:
+
+ 0x07 is the only legal value.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ When [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, the [MPLL_MULTIPLIER], [REF_CLK_DIV2],
+ and [SSC_REF_CLK_SEL] settings are used to configure the SuperSpeed reference
+ clock multiplier.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+ 0x2A = 24 MHz on DLMC_REF_CLK*.
+ 0x31 = 20 MHz on DLMC_REF_CLK*.
+ 0x38 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6 then:
+ 0x07 is the only legal value. */
+ uint64_t ref_clk_div2 : 1; /**< [ 38: 38](R/W) Divides the reference clock by two before feeding it into the REF_CLK_FSEL divider.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0X2 then the legal values are:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6 then the legal values are:
+
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ MPLL_MULTIPLIER description).
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x2 or 0x3, then [MPLL_MULTPLIER], [REF_CLK_DIV2], and
+ [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+ 0x1: if DLMC_REF_CLK* is 40MHz, 76.8MHz, or 200MHz.
+ 0x0, 0x1 if DLMC_REF_CLK* is 104MHz (depending on [MPLL_MULTIPLIER]).
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ref_ssp_en : 1; /**< [ 39: 39](R/W) Enables reference clock to the prescaler for SuperSpeed function. This should always be
+ enabled since this output clock is used to drive the UAHC suspend-mode clock during
+ low-power states.
+
+ This value can be changed only during UPHY_RST or during low-power states.
+ The reference clock must be running and stable before [UPHY_RST] is deasserted and before
+ [REF_SSP_EN] is asserted. */
+ uint64_t mpll_multiplier : 7; /**< [ 46: 40](R/W) Multiplies the reference clock to a frequency suitable for intended operating speed.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then the legal values are:
+
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then [MPLL_MULTPLIER], [REF_CLK_DIV2],
+ and [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x06, then:
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x64 = 25 MHz on DLMC_REF_CLK*.
+ 0x60 = 26 MHz on DLMC_REF_CLK*.
+ 0x41 = 38.4MHz on DLMC_REF_CLK*.
+ 0x7D = 40 MHz on DLMC_REF_CLK*.
+ 0x34 = 48 MHz on DLMC_REF_CLK*.
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x30 = 52 MHz on DLMC_REF_CLK*.
+ 0x41 = 76.8MHz on DLMC_REF_CLK*.
+ 0x1A = 96 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x30 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x1.
+ 0x18 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x0.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+ 0x19 = 200 MHz on DLMC_REF_CLK*. */
+ uint64_t ssc_ref_clk_sel : 9; /**< [ 55: 47](R/W) Enables non-standard oscillator frequencies to generate targeted MPLL output rates. Input
+ corresponds to the frequency-synthesis coefficient.
+
+ [55:53]: modulus - 1,
+ [52:47]: 2's complement push amount.
+
+ A value of 0x0 means this feature is disabled.
+
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then the legal values are:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4, 0x5 or 0x6, then the legal values are:
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ MPLL_MULTIPLIER description).
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then [MPLL_MULTPLIER], [REF_CLK_DIV2], and
+ [SSC_REF_CLK_SEL] must all be programmed to the same frequency setting.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ * 0x108: if DLMC_REF_CLK* is 19.2MHz, 24MHz, 26MHz, 38.4MHz, 48MHz,
+ 52MHz, 76.8MHz, 96MHz, 104MHz.
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ssc_range : 3; /**< [ 58: 56](R/W) Spread-spectrum clock range. Selects the range of spread-spectrum modulation when SSC_EN
+ is asserted and the PHY is spreading the SuperSpeed transmit clocks.
+ Applies a fixed offset to the phase accumulator.
+ 0x0 = -4980 ppm downspread of clock.
+ 0x1 = -4492 ppm.
+ 0x2 = -4003 ppm.
+ 0x3-0x7 = reserved.
+
+ All of these settings are within the USB 3.0 specification. The amount of EMI emission
+ reduction might decrease as the [SSC_RANGE] increases; therefore, the [SSC_RANGE] settings
+ can
+ be registered to enable the amount of spreading to be adjusted on a per-application basis.
+ This value can be changed only during UPHY_RST. */
+ uint64_t ssc_en : 1; /**< [ 59: 59](R/W) Spread-spectrum clock enable. Enables spread-spectrum clock production in the SuperSpeed
+ function. If the input reference clock for the SuperSpeed PLL is already spread-spectrum,
+ then do not enable this feature. The clocks sourced to the SuperSpeed function must have
+ spread-spectrum to be compliant with the USB specification.
+
+ The high-speed PLL cannot support a spread-spectrum input, so [REF_CLK_SEL] =
+ 0x0, 0x1, or 0x2 must enable this feature.
+
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t reserved_60_61 : 2;
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Rising edge starts BIST on the memories in USBDRD.
+ To run BIST, the controller clock must be both configured and enabled, and should be
+ configured to the maximum available frequency given the available coprocessor clock and
+ dividers.
+ Also, the UCTL, UAHC, and UPHY should be held in software- initiated reset (using
+ [UPHY_RST], [UAHC_RST], [UCTL_RST]) until BIST is complete.
+ BIST defect status can be checked after FULL BIST completion, both of which are indicated
+ in USBDRD()_UCTL_BIST_STATUS. The full BIST run takes almost 80,000 controller-clock
+ cycles
+ for
+ the largest RAM. */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. A BIST run with this bit set clears all entries in USBDRD
+ RAMs
+ to 0x0.
+
+ There are two major modes of BIST: full and clear. Full BIST is run by the BIST state
+ machine when [CLEAR_BIST] is deasserted during BIST. Clear BIST is run if [CLEAR_BIST] is
+ asserted during BIST.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts the
+ [CLEAR_BIST] setting into the correct state and then perform another CSR write operation
+ to
+ set the BIST trigger (keeping the [CLEAR_BIST] state constant).
+ CLEAR BIST completion is indicated by USBDRD()_UCTL_BIST_STATUS. A BIST clear operation
+ takes almost 2,000 controller-clock cycles for the largest RAM. */
+#endif /* Word 0 - End */
+ } cn81xx;
+ struct bdk_usbdrdx_uctl_ctl_cn83xx
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. A BIST run with this bit set clears all entries in USBDRD
+ RAMs
+ to 0x0.
+
+ There are two major modes of BIST: full and clear. Full BIST is run by the BIST state
+ machine when [CLEAR_BIST] is deasserted during BIST. Clear BIST is run if [CLEAR_BIST] is
+ asserted during BIST.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts the
+ [CLEAR_BIST] setting into the correct state and then perform another CSR write operation
+ to
+ set the BIST trigger (keeping the [CLEAR_BIST] state constant).
+ CLEAR BIST completion is indicated by USBDRD()_UCTL_BIST_STATUS. A BIST clear operation
+ takes almost 2,000 controller-clock cycles for the largest RAM. */
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Rising edge starts BIST on the memories in USBDRD.
+ To run BIST, the controller clock must be both configured and enabled, and should be
+ configured to the maximum available frequency given the available coprocessor clock and
+ dividers.
+ Also, the UCTL, UAHC, and UPHY should be held in software- initiated reset (using
+ [UPHY_RST], [UAHC_RST], [UCTL_RST]) until BIST is complete.
+ BIST defect status can be checked after FULL BIST completion, both of which are indicated
+ in USBDRD()_UCTL_BIST_STATUS. The full BIST run takes almost 80,000 controller-clock
+ cycles
+ for
+ the largest RAM. */
+ uint64_t reserved_60_61 : 2;
+ uint64_t ssc_en : 1; /**< [ 59: 59](R/W) Spread-spectrum clock enable. Enables spread-spectrum clock production in the SuperSpeed
+ function. If the input reference clock for the SuperSpeed PLL is already spread-spectrum,
+ then do not enable this feature. The clocks sourced to the SuperSpeed function must have
+ spread-spectrum to be compliant with the USB specification.
+
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t ssc_range : 3; /**< [ 58: 56](R/W) Spread-spectrum clock range. Selects the range of spread-spectrum modulation when SSC_EN
+ is asserted and the PHY is spreading the SuperSpeed transmit clocks.
+ Applies a fixed offset to the phase accumulator.
+ 0x0 = -4980 ppm downspread of clock.
+ 0x1 = -4492 ppm.
+ 0x2 = -4003 ppm.
+ 0x3-0x7 = reserved.
+
+ All of these settings are within the USB 3.0 specification. The amount of EMI emission
+ reduction might decrease as the [SSC_RANGE] increases; therefore, the [SSC_RANGE] settings
+ can
+ be registered to enable the amount of spreading to be adjusted on a per-application basis.
+ This value can be changed only during UPHY_RST. */
+ uint64_t ssc_ref_clk_sel : 9; /**< [ 55: 47](R/W) Enables non-standard oscillator frequencies to generate targeted MPLL output rates. Input
+ corresponds to the frequency-synthesis coefficient.
+
+ [55:53]: modulus - 1,
+ [52:47]: 2's complement push amount.
+
+ A value of 0x0 means this feature is disabled.
+
+ The legal values are 0x0.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ * 0x108: if DLMC_REF_CLK* is 19.2MHz, 24MHz, 26MHz, 38.4MHz, 48MHz,
+ 52MHz, 76.8MHz, 96MHz, 104MHz.
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t mpll_multiplier : 7; /**< [ 46: 40](R/W) Multiplies the reference clock to a frequency suitable for intended operating speed.
+
+ As [REF_CLK_SEL] = 0x0, the legal values are:
+
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x06, then:
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x64 = 25 MHz on DLMC_REF_CLK*.
+ 0x60 = 26 MHz on DLMC_REF_CLK*.
+ 0x41 = 38.4MHz on DLMC_REF_CLK*.
+ 0x7D = 40 MHz on DLMC_REF_CLK*.
+ 0x34 = 48 MHz on DLMC_REF_CLK*.
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x30 = 52 MHz on DLMC_REF_CLK*.
+ 0x41 = 76.8MHz on DLMC_REF_CLK*.
+ 0x1A = 96 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x30 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x1.
+ 0x18 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x0.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+ 0x19 = 200 MHz on DLMC_REF_CLK*. */
+ uint64_t ref_ssp_en : 1; /**< [ 39: 39](R/W) Enables reference clock to the prescaler for SuperSpeed function. This should always be
+ enabled since this output clock is used to drive the UAHC suspend-mode clock during
+ low-power states.
+
+ This value can be changed only during UPHY_RST or during low-power states.
+ The reference clock must be running and stable before [UPHY_RST] is deasserted and before
+ [REF_SSP_EN] is asserted. */
+ uint64_t ref_clk_div2 : 1; /**< [ 38: 38](R/W) Divides the reference clock by two before feeding it into the REF_CLK_FSEL divider.
+
+ As [REF_CLK_SEL] = 0x0, the legal value is 0x0.
+
+ This value can be changed only during UPHY_RST.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+ 0x1: if DLMC_REF_CLK* is 40MHz, 76.8MHz, or 200MHz.
+ 0x0, 0x1 if DLMC_REF_CLK* is 104MHz (depending on [MPLL_MULTIPLIER]).
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ref_clk_fsel : 6; /**< [ 37: 32](R/W) Selects the reference clock frequency for the SuperSpeed and high-speed PLL blocks.
+
+ As [REF_CLK_SEL] = 0x0, the legal values are:
+
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+ 0x2A = 24 MHz on DLMC_REF_CLK*.
+ 0x31 = 20 MHz on DLMC_REF_CLK*.
+ 0x38 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6 then:
+ 0x07 is the only legal value. */
+ uint64_t reserved_31 : 1;
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) Controller-clock enable. When set to 1, the controller clock is generated. This also
+ enables access to UCTL registers 0x30-0xF8. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the controller-clock divider.
+ 0 = Use the divided coprocessor clock from the H_CLKDIV divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the controller clock.
+ You must still set H_CLKDIV_EN separately. [H_CLK_BYP_SEL] select should not be changed
+ unless H_CLKDIV_EN is disabled.
+
+ The bypass clock can be selected and running even if the controller-clock dividers are not
+ running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) Controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) Controller clock-frequency-divider select. The controller-clock frequency is the
+ coprocessor-clock frequency divided by [H_CLKDIV_SEL] and must be at or below 300 MHz.
+ The divider values are the following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 4.
+ 0x3 = divide by 6.
+ 0x4 = divide by 8.
+ 0x5 = divide by 16.
+ 0x6 = divide by 24.
+ 0x7 = divide by 32.
+
+ The HCLK frequency must be at or below 300 MHz.
+ The HCLK frequency must be at or above 150 MHz for full-rate USB3
+ operation.
+ The HCLK frequency must be at or above 125 MHz for any USB3
+ functionality.
+
+ If [DRD_MODE] = DEVICE, the HCLK frequency must be at or above 125 MHz for
+ correct USB2 functionality.
+
+ If [DRD_MODE] = HOST, the HCLK frequency must be at or above 90 MHz
+ for full-rate USB2 operation.
+
+ If [DRD_MODE] = HOST, the HCLK frequency must be at or above 62.5 MHz
+ for any USB2 operation.
+
+ This field can be changed only when [H_CLKDIV_RST] = 1.
+
+ Internal:
+ 150MHz is from the maximum of:
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 1, col 12.
+ Synopsys DWC_usb3 Databook v2.80a, table A-17, row 7, col 9.
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 7, col 9.
+ DEVICE\>125MHz is from Synopsys DWC_usb3 Databook v2.80a, section A.12.4.
+ HOST2\>62.5MHz in HOST mode is from Synopsys DWC_usb3 Databook v2.80a,
+ section A.12.5, 3rd bullet in Note on page 894.
+ HOST2\>90MHz was arrived at from some math: 62.5MHz +
+ (diff between row 1 and 2, col 12 of table A-16). */
+ uint64_t reserved_22_23 : 2;
+ uint64_t usb3_port_perm_attach : 1; /**< [ 21: 21](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t usb2_port_perm_attach : 1; /**< [ 20: 20](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t reserved_19 : 1;
+ uint64_t usb3_port_disable : 1; /**< [ 18: 18](R/W) Disables the USB3 (SuperSpeed) portion of this PHY. When set to 1, this signal stops
+ reporting connect/disconnect events on the port and keeps the port in disabled state. This
+ could be used for security reasons where hardware can disable a port regardless of whether
+ xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should be modified only when [UPHY_RST] is asserted. */
+ uint64_t reserved_17 : 1;
+ uint64_t usb2_port_disable : 1; /**< [ 16: 16](R/W) Disables USB2 (high-speed/full-speed/low-speed) portion of this PHY. When set to 1, this
+ signal stops reporting connect/disconnect events on the port and keeps the port in
+ disabled state. This could be used for security reasons where hardware can disable a port
+ regardless of whether xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted.
+ If Port0 is required to be disabled, ensure that the utmi_clk[0] is running at the normal
+ speed. Also, all the enabled USB2.0 ports should have the same clock frequency as Port0. */
+ uint64_t reserved_15 : 1;
+ uint64_t ss_power_en : 1; /**< [ 14: 14](R/W) PHY SuperSpeed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_13 : 1;
+ uint64_t hs_power_en : 1; /**< [ 12: 12](R/W) PHY high-speed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t ref_clk_sel : 3; /**< [ 11: 9](R/W) Reference clock select. Choose reference-clock source for the SuperSpeed and high-speed
+ PLL blocks.
+ 0x0 = Reference clock sources for both PLLs come from the USB pads.
+ 0x1 = Reserved.
+ 0x2 = Reserved.
+ 0x3 = Reserved.
+ 0x4 = Reserved.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x0, 0x1, or 0x2 then the reference clock input cannot be
+ spread-spectrum.
+
+ Internal:
+ For the 0x6 selection, reference clock source for SuperSpeed PLL is from the USB
+ pads, reference clock source for high-speed PLL is PLL_REF_CLK. But in CNXXXX,
+ PLL_REF_CLK cannot be routed to USB without violating jitter requirements */
+ uint64_t reserved_5_8 : 4;
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the USB UCTL interface clock (coprocessor clock). This enables access to UAHC
+ and UCTL registers starting from 0x30. */
+ uint64_t drd_mode : 1; /**< [ 3: 3](R/W) Switches between host or device mode for USBDRD.
+ 0 = Host.
+ 1 = Device. */
+ uint64_t uphy_rst : 1; /**< [ 2: 2](R/W) PHY reset; resets UPHY; active-high. */
+ uint64_t uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UAHC DMA and register shims. Resets UCTL registers 0x30-0xF8.
+ Does not reset UCTL registers 0x0-0x28.
+ UCTL registers starting from 0x30 can be accessed only after the controller clock is
+ active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and CIB protocols. */
+#else /* Word 0 - Little Endian */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UAHC DMA and register shims. Resets UCTL registers 0x30-0xF8.
+ Does not reset UCTL registers 0x0-0x28.
+ UCTL registers starting from 0x30 can be accessed only after the controller clock is
+ active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and CIB protocols. */
+ uint64_t uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t uphy_rst : 1; /**< [ 2: 2](R/W) PHY reset; resets UPHY; active-high. */
+ uint64_t drd_mode : 1; /**< [ 3: 3](R/W) Switches between host or device mode for USBDRD.
+ 0 = Host.
+ 1 = Device. */
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the USB UCTL interface clock (coprocessor clock). This enables access to UAHC
+ and UCTL registers starting from 0x30. */
+ uint64_t reserved_5_8 : 4;
+ uint64_t ref_clk_sel : 3; /**< [ 11: 9](R/W) Reference clock select. Choose reference-clock source for the SuperSpeed and high-speed
+ PLL blocks.
+ 0x0 = Reference clock sources for both PLLs come from the USB pads.
+ 0x1 = Reserved.
+ 0x2 = Reserved.
+ 0x3 = Reserved.
+ 0x4 = Reserved.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x0, 0x1, or 0x2 then the reference clock input cannot be
+ spread-spectrum.
+
+ Internal:
+ For the 0x6 selection, reference clock source for SuperSpeed PLL is from the USB
+ pads, reference clock source for high-speed PLL is PLL_REF_CLK. But in CNXXXX,
+ PLL_REF_CLK cannot be routed to USB without violating jitter requirements */
+ uint64_t hs_power_en : 1; /**< [ 12: 12](R/W) PHY high-speed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_13 : 1;
+ uint64_t ss_power_en : 1; /**< [ 14: 14](R/W) PHY SuperSpeed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_15 : 1;
+ uint64_t usb2_port_disable : 1; /**< [ 16: 16](R/W) Disables USB2 (high-speed/full-speed/low-speed) portion of this PHY. When set to 1, this
+ signal stops reporting connect/disconnect events on the port and keeps the port in
+ disabled state. This could be used for security reasons where hardware can disable a port
+ regardless of whether xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted.
+ If Port0 is required to be disabled, ensure that the utmi_clk[0] is running at the normal
+ speed. Also, all the enabled USB2.0 ports should have the same clock frequency as Port0. */
+ uint64_t reserved_17 : 1;
+ uint64_t usb3_port_disable : 1; /**< [ 18: 18](R/W) Disables the USB3 (SuperSpeed) portion of this PHY. When set to 1, this signal stops
+ reporting connect/disconnect events on the port and keeps the port in disabled state. This
+ could be used for security reasons where hardware can disable a port regardless of whether
+ xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should be modified only when [UPHY_RST] is asserted. */
+ uint64_t reserved_19 : 1;
+ uint64_t usb2_port_perm_attach : 1; /**< [ 20: 20](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t usb3_port_perm_attach : 1; /**< [ 21: 21](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) Controller clock-frequency-divider select. The controller-clock frequency is the
+ coprocessor-clock frequency divided by [H_CLKDIV_SEL] and must be at or below 300 MHz.
+ The divider values are the following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 4.
+ 0x3 = divide by 6.
+ 0x4 = divide by 8.
+ 0x5 = divide by 16.
+ 0x6 = divide by 24.
+ 0x7 = divide by 32.
+
+ The HCLK frequency must be at or below 300 MHz.
+ The HCLK frequency must be at or above 150 MHz for full-rate USB3
+ operation.
+ The HCLK frequency must be at or above 125 MHz for any USB3
+ functionality.
+
+ If [DRD_MODE] = DEVICE, the HCLK frequency must be at or above 125 MHz for
+ correct USB2 functionality.
+
+ If [DRD_MODE] = HOST, the HCLK frequency must be at or above 90 MHz
+ for full-rate USB2 operation.
+
+ If [DRD_MODE] = HOST, the HCLK frequency must be at or above 62.5 MHz
+ for any USB2 operation.
+
+ This field can be changed only when [H_CLKDIV_RST] = 1.
+
+ Internal:
+ 150MHz is from the maximum of:
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 1, col 12.
+ Synopsys DWC_usb3 Databook v2.80a, table A-17, row 7, col 9.
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 7, col 9.
+ DEVICE\>125MHz is from Synopsys DWC_usb3 Databook v2.80a, section A.12.4.
+ HOST2\>62.5MHz in HOST mode is from Synopsys DWC_usb3 Databook v2.80a,
+ section A.12.5, 3rd bullet in Note on page 894.
+ HOST2\>90MHz was arrived at from some math: 62.5MHz +
+ (diff between row 1 and 2, col 12 of table A-16). */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) Controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the controller-clock divider.
+ 0 = Use the divided coprocessor clock from the H_CLKDIV divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the controller clock.
+ You must still set H_CLKDIV_EN separately. [H_CLK_BYP_SEL] select should not be changed
+ unless H_CLKDIV_EN is disabled.
+
+ The bypass clock can be selected and running even if the controller-clock dividers are not
+ running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) Controller-clock enable. When set to 1, the controller clock is generated. This also
+ enables access to UCTL registers 0x30-0xF8. */
+ uint64_t reserved_31 : 1;
+ uint64_t ref_clk_fsel : 6; /**< [ 37: 32](R/W) Selects the reference clock frequency for the SuperSpeed and high-speed PLL blocks.
+
+ As [REF_CLK_SEL] = 0x0, the legal values are:
+
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+ 0x2A = 24 MHz on DLMC_REF_CLK*.
+ 0x31 = 20 MHz on DLMC_REF_CLK*.
+ 0x38 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6 then:
+ 0x07 is the only legal value. */
+ uint64_t ref_clk_div2 : 1; /**< [ 38: 38](R/W) Divides the reference clock by two before feeding it into the REF_CLK_FSEL divider.
+
+ As [REF_CLK_SEL] = 0x0, the legal value is 0x0.
+
+ This value can be changed only during UPHY_RST.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+ 0x1: if DLMC_REF_CLK* is 40MHz, 76.8MHz, or 200MHz.
+ 0x0, 0x1 if DLMC_REF_CLK* is 104MHz (depending on [MPLL_MULTIPLIER]).
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ref_ssp_en : 1; /**< [ 39: 39](R/W) Enables reference clock to the prescaler for SuperSpeed function. This should always be
+ enabled since this output clock is used to drive the UAHC suspend-mode clock during
+ low-power states.
+
+ This value can be changed only during UPHY_RST or during low-power states.
+ The reference clock must be running and stable before [UPHY_RST] is deasserted and before
+ [REF_SSP_EN] is asserted. */
+ uint64_t mpll_multiplier : 7; /**< [ 46: 40](R/W) Multiplies the reference clock to a frequency suitable for intended operating speed.
+
+ As [REF_CLK_SEL] = 0x0, the legal values are:
+
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x06, then:
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x64 = 25 MHz on DLMC_REF_CLK*.
+ 0x60 = 26 MHz on DLMC_REF_CLK*.
+ 0x41 = 38.4MHz on DLMC_REF_CLK*.
+ 0x7D = 40 MHz on DLMC_REF_CLK*.
+ 0x34 = 48 MHz on DLMC_REF_CLK*.
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x30 = 52 MHz on DLMC_REF_CLK*.
+ 0x41 = 76.8MHz on DLMC_REF_CLK*.
+ 0x1A = 96 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x30 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x1.
+ 0x18 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x0.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+ 0x19 = 200 MHz on DLMC_REF_CLK*. */
+ uint64_t ssc_ref_clk_sel : 9; /**< [ 55: 47](R/W) Enables non-standard oscillator frequencies to generate targeted MPLL output rates. Input
+ corresponds to the frequency-synthesis coefficient.
+
+ [55:53]: modulus - 1,
+ [52:47]: 2's complement push amount.
+
+ A value of 0x0 means this feature is disabled.
+
+ The legal values are 0x0.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ * 0x108: if DLMC_REF_CLK* is 19.2MHz, 24MHz, 26MHz, 38.4MHz, 48MHz,
+ 52MHz, 76.8MHz, 96MHz, 104MHz.
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ssc_range : 3; /**< [ 58: 56](R/W) Spread-spectrum clock range. Selects the range of spread-spectrum modulation when SSC_EN
+ is asserted and the PHY is spreading the SuperSpeed transmit clocks.
+ Applies a fixed offset to the phase accumulator.
+ 0x0 = -4980 ppm downspread of clock.
+ 0x1 = -4492 ppm.
+ 0x2 = -4003 ppm.
+ 0x3-0x7 = reserved.
+
+ All of these settings are within the USB 3.0 specification. The amount of EMI emission
+ reduction might decrease as the [SSC_RANGE] increases; therefore, the [SSC_RANGE] settings
+ can
+ be registered to enable the amount of spreading to be adjusted on a per-application basis.
+ This value can be changed only during UPHY_RST. */
+ uint64_t ssc_en : 1; /**< [ 59: 59](R/W) Spread-spectrum clock enable. Enables spread-spectrum clock production in the SuperSpeed
+ function. If the input reference clock for the SuperSpeed PLL is already spread-spectrum,
+ then do not enable this feature. The clocks sourced to the SuperSpeed function must have
+ spread-spectrum to be compliant with the USB specification.
+
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t reserved_60_61 : 2;
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Rising edge starts BIST on the memories in USBDRD.
+ To run BIST, the controller clock must be both configured and enabled, and should be
+ configured to the maximum available frequency given the available coprocessor clock and
+ dividers.
+ Also, the UCTL, UAHC, and UPHY should be held in software- initiated reset (using
+ [UPHY_RST], [UAHC_RST], [UCTL_RST]) until BIST is complete.
+ BIST defect status can be checked after FULL BIST completion, both of which are indicated
+ in USBDRD()_UCTL_BIST_STATUS. The full BIST run takes almost 80,000 controller-clock
+ cycles
+ for
+ the largest RAM. */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. A BIST run with this bit set clears all entries in USBDRD
+ RAMs
+ to 0x0.
+
+ There are two major modes of BIST: full and clear. Full BIST is run by the BIST state
+ machine when [CLEAR_BIST] is deasserted during BIST. Clear BIST is run if [CLEAR_BIST] is
+ asserted during BIST.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts the
+ [CLEAR_BIST] setting into the correct state and then perform another CSR write operation
+ to
+ set the BIST trigger (keeping the [CLEAR_BIST] state constant).
+ CLEAR BIST completion is indicated by USBDRD()_UCTL_BIST_STATUS. A BIST clear operation
+ takes almost 2,000 controller-clock cycles for the largest RAM. */
+#endif /* Word 0 - End */
+ } cn83xx;
+ struct bdk_usbdrdx_uctl_ctl_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ssc_en : 1; /**< [ 59: 59](R/W) Spread-spectrum clock enable. Enables spread-spectrum clock production in the SuperSpeed
+ function. If the input reference clock for the SuperSpeed PLL is already spread-spectrum,
+ then do not enable this feature. The clocks sourced to the SuperSpeed function must have
+ spread-spectrum to be compliant with the USB specification.
+
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t ssc_range : 3; /**< [ 58: 56](R/W) Spread-spectrum clock range. Selects the range of spread-spectrum modulation when SSC_EN
+ is asserted and the PHY is spreading the SuperSpeed transmit clocks.
+ Applies a fixed offset to the phase accumulator.
+ 0x0 = -4980 ppm downspread of clock.
+ 0x1 = -4492 ppm.
+ 0x2 = -4003 ppm.
+ 0x3-0x7 = reserved.
+
+ All of these settings are within the USB 3.0 specification. The amount of EMI emission
+ reduction might decrease as the [SSC_RANGE] increases; therefore, the [SSC_RANGE] settings
+ can
+ be registered to enable the amount of spreading to be adjusted on a per-application basis.
+ This value can be changed only during UPHY_RST. */
+ uint64_t ssc_ref_clk_sel : 9; /**< [ 55: 47](R/W) Enables non-standard oscillator frequencies to generate targeted MPLL output rates. Input
+ corresponds to the frequency-synthesis coefficient.
+
+ [55:53]: modulus - 1,
+ [52:47]: 2's complement push amount.
+
+ A value of 0x0 means this feature is disabled.
+
+ The legal values are 0x0.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ * 0x108: if DLMC_REF_CLK* is 19.2MHz, 24MHz, 26MHz, 38.4MHz, 48MHz,
+ 52MHz, 76.8MHz, 96MHz, 104MHz.
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t mpll_multiplier : 7; /**< [ 46: 40](R/W) Multiplies the reference clock to a frequency suitable for intended operating speed.
+
+ As [REF_CLK_SEL] = 0x0, the legal values are:
+
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x06, then:
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x64 = 25 MHz on DLMC_REF_CLK*.
+ 0x60 = 26 MHz on DLMC_REF_CLK*.
+ 0x41 = 38.4MHz on DLMC_REF_CLK*.
+ 0x7D = 40 MHz on DLMC_REF_CLK*.
+ 0x34 = 48 MHz on DLMC_REF_CLK*.
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x30 = 52 MHz on DLMC_REF_CLK*.
+ 0x41 = 76.8MHz on DLMC_REF_CLK*.
+ 0x1A = 96 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x30 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x1.
+ 0x18 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x0.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+ 0x19 = 200 MHz on DLMC_REF_CLK*. */
+ uint64_t ref_ssp_en : 1; /**< [ 39: 39](R/W) Enables reference clock to the prescaler for SuperSpeed function. This should always be
+ enabled since this output clock is used to drive the UAHC suspend-mode clock during
+ low-power states.
+
+ This value can be changed only during UPHY_RST or during low-power states.
+ The reference clock must be running and stable before [UPHY_RST] is deasserted and before
+ [REF_SSP_EN] is asserted. */
+ uint64_t ref_clk_div2 : 1; /**< [ 38: 38](R/W) Divides the reference clock by two before feeding it into the REF_CLK_FSEL divider.
+
+ As [REF_CLK_SEL] = 0x0, the legal value is 0x0.
+
+ This value can be changed only during UPHY_RST.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+ 0x1: if DLMC_REF_CLK* is 40MHz, 76.8MHz, or 200MHz.
+ 0x0, 0x1 if DLMC_REF_CLK* is 104MHz (depending on [MPLL_MULTIPLIER]).
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ref_clk_fsel : 6; /**< [ 37: 32](R/W) Selects the reference clock frequency for the SuperSpeed and high-speed PLL blocks.
+
+ As [REF_CLK_SEL] = 0x0, the legal values are:
+
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+ 0x2A = 24 MHz on DLMC_REF_CLK*.
+ 0x31 = 20 MHz on DLMC_REF_CLK*.
+ 0x38 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6 then:
+ 0x07 is the only legal value. */
+ uint64_t cmd_flr_en : 1; /**< [ 31: 31](R/W) The host controller will stop accepting commands if this bit is set. This bit is
+ for host_mode only.
+
+ In normal FLR, this bit should be set to 0. If software wants the command to
+ finish before FLR, write this bit to 1 and poll USBDRD()_UAHC_USBSTS[HCH] to
+ make sure the command is finished before disabling USBDRD's PCCPF_XXX_CMD[ME]. */
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) Controller-clock enable. When set to 1, the controller clock is generated. This also
+ enables access to UCTL registers 0x30-0xF8. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the controller-clock divider.
+ 0 = Use the divided coprocessor clock from the H_CLKDIV divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the controller clock.
+ You must still set H_CLKDIV_EN separately. [H_CLK_BYP_SEL] select should not be changed
+ unless H_CLKDIV_EN is disabled.
+
+ The bypass clock can be selected and running even if the controller-clock dividers are not
+ running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) Controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) Controller clock-frequency-divider select. The controller-clock frequency is the
+ coprocessor-clock frequency divided by [H_CLKDIV_SEL] and must be at or below 300 MHz.
+ The divider values are the following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 4.
+ 0x3 = divide by 6.
+ 0x4 = divide by 8.
+ 0x5 = divide by 16.
+ 0x6 = divide by 24.
+ 0x7 = divide by 32.
+
+ The HCLK frequency must be at or below 300 MHz.
+ The HCLK frequency must be at or above 150 MHz for full-rate USB3
+ operation.
+ The HCLK frequency must be at or above 125 MHz for any USB3
+ functionality.
+
+ If [DRD_MODE] = DEVICE, the HCLK frequency must be at or above 125 MHz for
+ correct USB2 functionality.
+
+ If [DRD_MODE] = HOST, the HCLK frequency must be at or above 90 MHz
+ for full-rate USB2 operation.
+
+ If [DRD_MODE] = HOST, the HCLK frequency must be at or above 62.5 MHz
+ for any USB2 operation.
+
+ This field can be changed only when [H_CLKDIV_RST] = 1.
+
+ Internal:
+ 150MHz is from the maximum of:
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 1, col 12.
+ Synopsys DWC_usb3 Databook v2.80a, table A-17, row 7, col 9.
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 7, col 9.
+ DEVICE\>125MHz is from Synopsys DWC_usb3 Databook v2.80a, section A.12.4.
+ HOST2\>62.5MHz in HOST mode is from Synopsys DWC_usb3 Databook v2.80a,
+ section A.12.5, 3rd bullet in Note on page 894.
+ HOST2\>90MHz was arrived at from some math: 62.5MHz +
+ (diff between row 1 and 2, col 12 of table A-16). */
+ uint64_t reserved_22_23 : 2;
+ uint64_t usb3_port_perm_attach : 1; /**< [ 21: 21](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t usb2_port_perm_attach : 1; /**< [ 20: 20](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t reserved_19 : 1;
+ uint64_t usb3_port_disable : 1; /**< [ 18: 18](R/W) Disables the USB3 (SuperSpeed) portion of this PHY. When set to 1, this signal stops
+ reporting connect/disconnect events on the port and keeps the port in disabled state. This
+ could be used for security reasons where hardware can disable a port regardless of whether
+ xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should be modified only when [UPHY_RST] is asserted. */
+ uint64_t reserved_17 : 1;
+ uint64_t usb2_port_disable : 1; /**< [ 16: 16](R/W) Disables USB2 (high-speed/full-speed/low-speed) portion of this PHY. When set to 1, this
+ signal stops reporting connect/disconnect events on the port and keeps the port in
+ disabled state. This could be used for security reasons where hardware can disable a port
+ regardless of whether xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted.
+ If Port0 is required to be disabled, ensure that the utmi_clk[0] is running at the normal
+ speed. Also, all the enabled USB2.0 ports should have the same clock frequency as Port0. */
+ uint64_t reserved_15 : 1;
+ uint64_t ss_power_en : 1; /**< [ 14: 14](R/W) PHY SuperSpeed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_13 : 1;
+ uint64_t hs_power_en : 1; /**< [ 12: 12](R/W) PHY high-speed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t ref_clk_sel : 3; /**< [ 11: 9](R/W) Reference clock select. Choose reference-clock source for the SuperSpeed and high-speed
+ PLL blocks.
+ 0x0 = Reference clock sources for both PLLs come from the USB pads.
+ 0x1 = Reserved.
+ 0x2 = Reserved.
+ 0x3 = Reserved.
+ 0x4 = Reserved.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x0, 0x1, or 0x2 then the reference clock input cannot be
+ spread-spectrum.
+
+ Internal:
+ For the 0x6 selection, reference clock source for SuperSpeed PLL is from the USB
+ pads, reference clock source for high-speed PLL is PLL_REF_CLK. But in CNXXXX,
+ PLL_REF_CLK cannot be routed to USB without violating jitter requirements */
+ uint64_t reserved_6_8 : 3;
+ uint64_t dma_psn_ign : 1; /**< [ 5: 5](R/W) Handling of poison indication on DMA read responses.
+ 0 = Treat poison data the same way as fault, sending an AXI error to the USB
+ controller.
+ 1 = Ignore poison and proceed with the transaction as if no problems. */
+ uint64_t csclk_force : 1; /**< [ 4: 4](R/W) Force conditional clock to be running. For diagnostic use only.
+ 0 = No override.
+ 1 = Override the enable of conditional clock to force it running. */
+ uint64_t drd_mode : 1; /**< [ 3: 3](R/W) Switches between host or device mode for USBDRD.
+ 0 = Host.
+ 1 = Device. */
+ uint64_t uphy_rst : 1; /**< [ 2: 2](R/W) PHY reset; resets UPHY; active-high. */
+ uint64_t uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UAHC DMA and register shims. Resets UCTL registers 0x30-0xF8.
+ Does not reset UCTL registers 0x0-0x28.
+ UCTL registers starting from 0x30 can be accessed only after the controller clock is
+ active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and CIB protocols. */
+#else /* Word 0 - Little Endian */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UAHC DMA and register shims. Resets UCTL registers 0x30-0xF8.
+ Does not reset UCTL registers 0x0-0x28.
+ UCTL registers starting from 0x30 can be accessed only after the controller clock is
+ active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and CIB protocols. */
+ uint64_t uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t uphy_rst : 1; /**< [ 2: 2](R/W) PHY reset; resets UPHY; active-high. */
+ uint64_t drd_mode : 1; /**< [ 3: 3](R/W) Switches between host or device mode for USBDRD.
+ 0 = Host.
+ 1 = Device. */
+ uint64_t csclk_force : 1; /**< [ 4: 4](R/W) Force conditional clock to be running. For diagnostic use only.
+ 0 = No override.
+ 1 = Override the enable of conditional clock to force it running. */
+ uint64_t dma_psn_ign : 1; /**< [ 5: 5](R/W) Handling of poison indication on DMA read responses.
+ 0 = Treat poison data the same way as fault, sending an AXI error to the USB
+ controller.
+ 1 = Ignore poison and proceed with the transaction as if no problems. */
+ uint64_t reserved_6_8 : 3;
+ uint64_t ref_clk_sel : 3; /**< [ 11: 9](R/W) Reference clock select. Choose reference-clock source for the SuperSpeed and high-speed
+ PLL blocks.
+ 0x0 = Reference clock sources for both PLLs come from the USB pads.
+ 0x1 = Reserved.
+ 0x2 = Reserved.
+ 0x3 = Reserved.
+ 0x4 = Reserved.
+ 0x5 = Reserved.
+ 0x6 = Reserved.
+ 0x7 = Reserved.
+
+ This value can be changed only during UPHY_RST.
+
+ If [REF_CLK_SEL] = 0x0, 0x1, or 0x2 then the reference clock input cannot be
+ spread-spectrum.
+
+ Internal:
+ For the 0x6 selection, reference clock source for SuperSpeed PLL is from the USB
+ pads, reference clock source for high-speed PLL is PLL_REF_CLK. But in CNXXXX,
+ PLL_REF_CLK cannot be routed to USB without violating jitter requirements */
+ uint64_t hs_power_en : 1; /**< [ 12: 12](R/W) PHY high-speed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_13 : 1;
+ uint64_t ss_power_en : 1; /**< [ 14: 14](R/W) PHY SuperSpeed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_15 : 1;
+ uint64_t usb2_port_disable : 1; /**< [ 16: 16](R/W) Disables USB2 (high-speed/full-speed/low-speed) portion of this PHY. When set to 1, this
+ signal stops reporting connect/disconnect events on the port and keeps the port in
+ disabled state. This could be used for security reasons where hardware can disable a port
+ regardless of whether xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted.
+ If Port0 is required to be disabled, ensure that the utmi_clk[0] is running at the normal
+ speed. Also, all the enabled USB2.0 ports should have the same clock frequency as Port0. */
+ uint64_t reserved_17 : 1;
+ uint64_t usb3_port_disable : 1; /**< [ 18: 18](R/W) Disables the USB3 (SuperSpeed) portion of this PHY. When set to 1, this signal stops
+ reporting connect/disconnect events on the port and keeps the port in disabled state. This
+ could be used for security reasons where hardware can disable a port regardless of whether
+ xHCI driver enables a port or not.
+ USBDRD()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should be modified only when [UPHY_RST] is asserted. */
+ uint64_t reserved_19 : 1;
+ uint64_t usb2_port_perm_attach : 1; /**< [ 20: 20](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t usb3_port_perm_attach : 1; /**< [ 21: 21](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) Controller clock-frequency-divider select. The controller-clock frequency is the
+ coprocessor-clock frequency divided by [H_CLKDIV_SEL] and must be at or below 300 MHz.
+ The divider values are the following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 4.
+ 0x3 = divide by 6.
+ 0x4 = divide by 8.
+ 0x5 = divide by 16.
+ 0x6 = divide by 24.
+ 0x7 = divide by 32.
+
+ The HCLK frequency must be at or below 300 MHz.
+ The HCLK frequency must be at or above 150 MHz for full-rate USB3
+ operation.
+ The HCLK frequency must be at or above 125 MHz for any USB3
+ functionality.
+
+ If [DRD_MODE] = DEVICE, the HCLK frequency must be at or above 125 MHz for
+ correct USB2 functionality.
+
+ If [DRD_MODE] = HOST, the HCLK frequency must be at or above 90 MHz
+ for full-rate USB2 operation.
+
+ If [DRD_MODE] = HOST, the HCLK frequency must be at or above 62.5 MHz
+ for any USB2 operation.
+
+ This field can be changed only when [H_CLKDIV_RST] = 1.
+
+ Internal:
+ 150MHz is from the maximum of:
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 1, col 12.
+ Synopsys DWC_usb3 Databook v2.80a, table A-17, row 7, col 9.
+ Synopsys DWC_usb3 Databook v2.80a, table A-16, row 7, col 9.
+ DEVICE\>125MHz is from Synopsys DWC_usb3 Databook v2.80a, section A.12.4.
+ HOST2\>62.5MHz in HOST mode is from Synopsys DWC_usb3 Databook v2.80a,
+ section A.12.5, 3rd bullet in Note on page 894.
+ HOST2\>90MHz was arrived at from some math: 62.5MHz +
+ (diff between row 1 and 2, col 12 of table A-16). */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) Controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the controller-clock divider.
+ 0 = Use the divided coprocessor clock from the H_CLKDIV divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the controller clock.
+ You must still set H_CLKDIV_EN separately. [H_CLK_BYP_SEL] select should not be changed
+ unless H_CLKDIV_EN is disabled.
+
+ The bypass clock can be selected and running even if the controller-clock dividers are not
+ running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) Controller-clock enable. When set to 1, the controller clock is generated. This also
+ enables access to UCTL registers 0x30-0xF8. */
+ uint64_t cmd_flr_en : 1; /**< [ 31: 31](R/W) The host controller will stop accepting commands if this bit is set. This bit is
+ for host_mode only.
+
+ In normal FLR, this bit should be set to 0. If software wants the command to
+ finish before FLR, write this bit to 1 and poll USBDRD()_UAHC_USBSTS[HCH] to
+ make sure the command is finished before disabling USBDRD's PCCPF_XXX_CMD[ME]. */
+ uint64_t ref_clk_fsel : 6; /**< [ 37: 32](R/W) Selects the reference clock frequency for the SuperSpeed and high-speed PLL blocks.
+
+ As [REF_CLK_SEL] = 0x0, the legal values are:
+
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ 0x27 = 100 MHz on DLMC_REF_CLK*.
+ 0x2A = 24 MHz on DLMC_REF_CLK*.
+ 0x31 = 20 MHz on DLMC_REF_CLK*.
+ 0x38 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6 then:
+ 0x07 is the only legal value. */
+ uint64_t ref_clk_div2 : 1; /**< [ 38: 38](R/W) Divides the reference clock by two before feeding it into the REF_CLK_FSEL divider.
+
+ As [REF_CLK_SEL] = 0x0, the legal value is 0x0.
+
+ This value can be changed only during UPHY_RST.
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2 then:
+ all DLMC_REF_CLK* frequencies: 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ 0x1: if DLMC_REF_CLK* is 125MHz.
+ 0x1: if DLMC_REF_CLK* is 40MHz, 76.8MHz, or 200MHz.
+ 0x0, 0x1 if DLMC_REF_CLK* is 104MHz (depending on [MPLL_MULTIPLIER]).
+ 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ref_ssp_en : 1; /**< [ 39: 39](R/W) Enables reference clock to the prescaler for SuperSpeed function. This should always be
+ enabled since this output clock is used to drive the UAHC suspend-mode clock during
+ low-power states.
+
+ This value can be changed only during UPHY_RST or during low-power states.
+ The reference clock must be running and stable before [UPHY_RST] is deasserted and before
+ [REF_SSP_EN] is asserted. */
+ uint64_t mpll_multiplier : 7; /**< [ 46: 40](R/W) Multiplies the reference clock to a frequency suitable for intended operating speed.
+
+ As [REF_CLK_SEL] = 0x0, the legal values are:
+
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x06, then:
+ 0x02 = 19.2MHz on DLMC_REF_CLK*.
+ 0x7D = 20 MHz on DLMC_REF_CLK*.
+ 0x68 = 24 MHz on DLMC_REF_CLK*.
+ 0x64 = 25 MHz on DLMC_REF_CLK*.
+ 0x60 = 26 MHz on DLMC_REF_CLK*.
+ 0x41 = 38.4MHz on DLMC_REF_CLK*.
+ 0x7D = 40 MHz on DLMC_REF_CLK*.
+ 0x34 = 48 MHz on DLMC_REF_CLK*.
+ 0x32 = 50 MHz on DLMC_REF_CLK*.
+ 0x30 = 52 MHz on DLMC_REF_CLK*.
+ 0x41 = 76.8MHz on DLMC_REF_CLK*.
+ 0x1A = 96 MHz on DLMC_REF_CLK*.
+ 0x19 = 100 MHz on DLMC_REF_CLK*.
+ 0x30 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x1.
+ 0x18 = 104 MHz on DLMC_REF_CLK* if [REF_CLK_DIV2] = 0x0.
+ 0x28 = 125 MHz on DLMC_REF_CLK*.
+ 0x19 = 200 MHz on DLMC_REF_CLK*. */
+ uint64_t ssc_ref_clk_sel : 9; /**< [ 55: 47](R/W) Enables non-standard oscillator frequencies to generate targeted MPLL output rates. Input
+ corresponds to the frequency-synthesis coefficient.
+
+ [55:53]: modulus - 1,
+ [52:47]: 2's complement push amount.
+
+ A value of 0x0 means this feature is disabled.
+
+ The legal values are 0x0.
+
+ All other values are reserved.
+
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ If [REF_CLK_SEL] = 0x0, 0x1 or 0x2, then:
+ * 0x0 is the only legal value.
+
+ If [REF_CLK_SEL] = 0x4 or 0x5 or 0x6, then:
+ * 0x108: if DLMC_REF_CLK* is 19.2MHz, 24MHz, 26MHz, 38.4MHz, 48MHz,
+ 52MHz, 76.8MHz, 96MHz, 104MHz.
+ * 0x0: if DLMC_REF_CLK* is another supported frequency (see list in
+ [MPLL_MULTIPLIER] description). */
+ uint64_t ssc_range : 3; /**< [ 58: 56](R/W) Spread-spectrum clock range. Selects the range of spread-spectrum modulation when SSC_EN
+ is asserted and the PHY is spreading the SuperSpeed transmit clocks.
+ Applies a fixed offset to the phase accumulator.
+ 0x0 = -4980 ppm downspread of clock.
+ 0x1 = -4492 ppm.
+ 0x2 = -4003 ppm.
+ 0x3-0x7 = reserved.
+
+ All of these settings are within the USB 3.0 specification. The amount of EMI emission
+ reduction might decrease as the [SSC_RANGE] increases; therefore, the [SSC_RANGE] settings
+ can
+ be registered to enable the amount of spreading to be adjusted on a per-application basis.
+ This value can be changed only during UPHY_RST. */
+ uint64_t ssc_en : 1; /**< [ 59: 59](R/W) Spread-spectrum clock enable. Enables spread-spectrum clock production in the SuperSpeed
+ function. If the input reference clock for the SuperSpeed PLL is already spread-spectrum,
+ then do not enable this feature. The clocks sourced to the SuperSpeed function must have
+ spread-spectrum to be compliant with the USB specification.
+
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_usbdrdx_uctl_ctl bdk_usbdrdx_uctl_ctl_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000100000ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000100000ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100000ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_CTL(a) bdk_usbdrdx_uctl_ctl_t
+#define bustype_BDK_USBDRDX_UCTL_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_CTL(a) "USBDRDX_UCTL_CTL"
+#define device_bar_BDK_USBDRDX_UCTL_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_CTL(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_ecc
+ *
+ * USB UCTL ECC Control Register
+ * This register can be used to disable ECC correction, insert ECC errors, and debug ECC
+ * failures.
+ * * The ECC_ERR* fields are captured when there are no outstanding ECC errors indicated in
+ * INTSTAT and a new ECC error arrives. Prioritization for multiple events occurring on the same
+ * cycle is indicated by the ECC_ERR_SOURCE enumeration: highest encoded value has highest
+ * priority.
+ * * The *ECC_*_DIS fields disable ECC correction; SBE and DBE errors are still reported. If
+ * *ECC_*_DIS = 0x1, then no data-correction occurs.
+ * * The *ECC_FLIP_SYND fields flip the syndrome\<1:0\> bits to generate single-bit/double-bit
+ * error for testing.
+ *
+ * 0x0 = Normal operation.
+ * 0x1 = SBE on bit[0].
+ * 0x2 = SBE on bit[1].
+ * 0x3 = DBE on bit[1:0].
+ *
+ * This register is accessible only when USBDRD()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbdrdx_uctl_ecc
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_ecc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ecc_err_source : 4; /**< [ 59: 56](RO/H) Source of ECC error, see UCTL_ECC_ERR_SOURCE_E. */
+ uint64_t ecc_err_syndrome : 8; /**< [ 55: 48](RO/H) Syndrome bits of the ECC error. */
+ uint64_t ecc_err_address : 16; /**< [ 47: 32](RO/H) RAM address of the ECC error. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t uctl_xm_r_ecc_flip_synd : 2;/**< [ 20: 19](R/W) Insert ECC error for testing purposes. */
+ uint64_t uctl_xm_r_ecc_cor_dis : 1; /**< [ 18: 18](R/W) Enables ECC correction on UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_w_ecc_flip_synd : 2;/**< [ 17: 16](R/W) Insert ECC error for testing purposes. */
+ uint64_t uctl_xm_w_ecc_cor_dis : 1; /**< [ 15: 15](R/W) Enables ECC correction on UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_9_14 : 6;
+ uint64_t uahc_ram2_ecc_flip_synd : 2;/**< [ 8: 7](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_ram2_ecc_cor_dis : 1; /**< [ 6: 6](R/W) Enables ECC correction on UAHC RxFIFO RAMs (RAM2). */
+ uint64_t uahc_ram1_ecc_flip_synd : 2;/**< [ 5: 4](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_ram1_ecc_cor_dis : 1; /**< [ 3: 3](R/W) Enables ECC correction on UAHC TxFIFO RAMs (RAM1). */
+ uint64_t uahc_ram0_ecc_flip_synd : 2;/**< [ 2: 1](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_ram0_ecc_cor_dis : 1; /**< [ 0: 0](R/W) Enables ECC correction on UAHC Desc/Reg cache (RAM0). */
+#else /* Word 0 - Little Endian */
+ uint64_t uahc_ram0_ecc_cor_dis : 1; /**< [ 0: 0](R/W) Enables ECC correction on UAHC Desc/Reg cache (RAM0). */
+ uint64_t uahc_ram0_ecc_flip_synd : 2;/**< [ 2: 1](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_ram1_ecc_cor_dis : 1; /**< [ 3: 3](R/W) Enables ECC correction on UAHC TxFIFO RAMs (RAM1). */
+ uint64_t uahc_ram1_ecc_flip_synd : 2;/**< [ 5: 4](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_ram2_ecc_cor_dis : 1; /**< [ 6: 6](R/W) Enables ECC correction on UAHC RxFIFO RAMs (RAM2). */
+ uint64_t uahc_ram2_ecc_flip_synd : 2;/**< [ 8: 7](R/W) Insert ECC error for testing purposes. */
+ uint64_t reserved_9_14 : 6;
+ uint64_t uctl_xm_w_ecc_cor_dis : 1; /**< [ 15: 15](R/W) Enables ECC correction on UCTL AxiMaster write-data FIFO. */
+ uint64_t uctl_xm_w_ecc_flip_synd : 2;/**< [ 17: 16](R/W) Insert ECC error for testing purposes. */
+ uint64_t uctl_xm_r_ecc_cor_dis : 1; /**< [ 18: 18](R/W) Enables ECC correction on UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_r_ecc_flip_synd : 2;/**< [ 20: 19](R/W) Insert ECC error for testing purposes. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t ecc_err_address : 16; /**< [ 47: 32](RO/H) RAM address of the ECC error. */
+ uint64_t ecc_err_syndrome : 8; /**< [ 55: 48](RO/H) Syndrome bits of the ECC error. */
+ uint64_t ecc_err_source : 4; /**< [ 59: 56](RO/H) Source of ECC error, see UCTL_ECC_ERR_SOURCE_E. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_ecc_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_ecc bdk_usbdrdx_uctl_ecc_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_ECC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_ECC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8680001000f0ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x8680001000f0ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_ECC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_ECC(a) bdk_usbdrdx_uctl_ecc_t
+#define bustype_BDK_USBDRDX_UCTL_ECC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_ECC(a) "USBDRDX_UCTL_ECC"
+#define device_bar_BDK_USBDRDX_UCTL_ECC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_ECC(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_ECC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_host_cfg
+ *
+ * USB UCTL Host Controller Configuration Register
+ * This register allows configuration of various host controller (UAHC) features. Most of these
+ * are strap signals and should be modified only while the controller is not running.
+ *
+ * This register is accessible only when USBDRD()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbdrdx_uctl_host_cfg
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_host_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t host_current_belt : 12; /**< [ 59: 48](RO) This signal indicates the minimum value of all received BELT values and the BELT that is
+ set by the Set LTV command. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t fla : 6; /**< [ 37: 32](R/W) High-speed jitter adjustment. Indicates the correction required to accommodate mac3 clock
+ and utmi clock jitter to measure 125 us duration. With FLA tied to 0x0, the high-speed
+ 125 us micro-frame is counted for 123933 ns. The value needs to be programmed in terms of
+ high-speed bit times in a 30 MHz cycle. Default value that needs to be driven is 0x20
+ (assuming 30 MHz perfect clock).
+
+ FLA connects to the FLADJ register defined in the xHCI spec in the PCI configuration
+ space. Each count is equal to 16 high-speed bit times. By default when this register is
+ set to 0x20, it gives 125 us interval. Now, based on the clock accuracy, you can decrement
+ the count or increment the count to get the 125 us uSOF window.
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t reserved_29_31 : 3;
+ uint64_t bme : 1; /**< [ 28: 28](R/W) Bus-master enable. This signal is used to disable the bus-mastering capability of the
+ host. Disabling this capability stalls DMA accesses. */
+ uint64_t oci_en : 1; /**< [ 27: 27](R/W) Overcurrent-indication enable. When enabled, OCI input to UAHC is taken from the GPIO
+ signals and sense-converted based on [OCI_ACTIVE_HIGH_EN]. The MIO GPIO multiplexer must be
+ programmed accordingly.
+
+ When disabled, OCI input to UAHC is forced to the correct inactive state based on
+ [OCI_ACTIVE_HIGH_EN].
+
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t oci_active_high_en : 1; /**< [ 26: 26](R/W) Overcurrent sense selection. The off-chip sense (high/low) is converted to match the
+ controller's active-high sense.
+ 0 = Overcurrent indication from off-chip source is active-low.
+ 1 = Overcurrent indication from off-chip source is active-high.
+
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t ppc_en : 1; /**< [ 25: 25](R/W) Port-power-control enable.
+ 0 = USBDRD()_UAHC_HCCPARAMS[PPC] report port-power-control feature is unavailable.
+ 1 = USBDRD()_UAHC_HCCPARAMS[PPC] reports port-power-control feature is available. PPC
+ output
+ from UAHC is taken to the GPIO signals and sense-converted based on [PPC_ACTIVE_HIGH_EN].
+
+ The MIO GPIO multiplexer must be programmed accordingly.
+
+ This is a strap signal; it should only be modified when either the UCTL_CTL[UAHC] or
+ UAHC_GCTL[CoreSoftReset] is asserted. */
+ uint64_t ppc_active_high_en : 1; /**< [ 24: 24](R/W) Port power control sense selection. The active-high port-power-control output to off-chip
+ source is converted to match the off-chip sense.
+ 0 = Port-power control to off-chip source is active-low.
+ 1 = Port-power control to off-chip source is active-high.
+
+ This is a strap signal; it should only be modified when either the UCTL_CTL[UAHC] or
+ UAHC_GCTL[CoreSoftReset] is asserted. */
+ uint64_t reserved_0_23 : 24;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_23 : 24;
+ uint64_t ppc_active_high_en : 1; /**< [ 24: 24](R/W) Port power control sense selection. The active-high port-power-control output to off-chip
+ source is converted to match the off-chip sense.
+ 0 = Port-power control to off-chip source is active-low.
+ 1 = Port-power control to off-chip source is active-high.
+
+ This is a strap signal; it should only be modified when either the UCTL_CTL[UAHC] or
+ UAHC_GCTL[CoreSoftReset] is asserted. */
+ uint64_t ppc_en : 1; /**< [ 25: 25](R/W) Port-power-control enable.
+ 0 = USBDRD()_UAHC_HCCPARAMS[PPC] report port-power-control feature is unavailable.
+ 1 = USBDRD()_UAHC_HCCPARAMS[PPC] reports port-power-control feature is available. PPC
+ output
+ from UAHC is taken to the GPIO signals and sense-converted based on [PPC_ACTIVE_HIGH_EN].
+
+ The MIO GPIO multiplexer must be programmed accordingly.
+
+ This is a strap signal; it should only be modified when either the UCTL_CTL[UAHC] or
+ UAHC_GCTL[CoreSoftReset] is asserted. */
+ uint64_t oci_active_high_en : 1; /**< [ 26: 26](R/W) Overcurrent sense selection. The off-chip sense (high/low) is converted to match the
+ controller's active-high sense.
+ 0 = Overcurrent indication from off-chip source is active-low.
+ 1 = Overcurrent indication from off-chip source is active-high.
+
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t oci_en : 1; /**< [ 27: 27](R/W) Overcurrent-indication enable. When enabled, OCI input to UAHC is taken from the GPIO
+ signals and sense-converted based on [OCI_ACTIVE_HIGH_EN]. The MIO GPIO multiplexer must be
+ programmed accordingly.
+
+ When disabled, OCI input to UAHC is forced to the correct inactive state based on
+ [OCI_ACTIVE_HIGH_EN].
+
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t bme : 1; /**< [ 28: 28](R/W) Bus-master enable. This signal is used to disable the bus-mastering capability of the
+ host. Disabling this capability stalls DMA accesses. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t fla : 6; /**< [ 37: 32](R/W) High-speed jitter adjustment. Indicates the correction required to accommodate mac3 clock
+ and utmi clock jitter to measure 125 us duration. With FLA tied to 0x0, the high-speed
+ 125 us micro-frame is counted for 123933 ns. The value needs to be programmed in terms of
+ high-speed bit times in a 30 MHz cycle. Default value that needs to be driven is 0x20
+ (assuming 30 MHz perfect clock).
+
+ FLA connects to the FLADJ register defined in the xHCI spec in the PCI configuration
+ space. Each count is equal to 16 high-speed bit times. By default when this register is
+ set to 0x20, it gives 125 us interval. Now, based on the clock accuracy, you can decrement
+ the count or increment the count to get the 125 us uSOF window.
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t reserved_38_47 : 10;
+ uint64_t host_current_belt : 12; /**< [ 59: 48](RO) This signal indicates the minimum value of all received BELT values and the BELT that is
+ set by the Set LTV command. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_host_cfg_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_host_cfg bdk_usbdrdx_uctl_host_cfg_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_HOST_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_HOST_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8680001000e0ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x8680001000e0ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x8680001000e0ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_HOST_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_HOST_CFG(a) bdk_usbdrdx_uctl_host_cfg_t
+#define bustype_BDK_USBDRDX_UCTL_HOST_CFG(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_HOST_CFG(a) "USBDRDX_UCTL_HOST_CFG"
+#define device_bar_BDK_USBDRDX_UCTL_HOST_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_HOST_CFG(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_HOST_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_intena_w1c
+ *
+ * USB UCTL Interrupt Status Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_usbdrdx_uctl_intena_w1c
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_intena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uctl_intena_w1c_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_usbdrdx_uctl_intena_w1c_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_usbdrdx_uctl_intena_w1c bdk_usbdrdx_uctl_intena_w1c_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_INTENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_INTENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000100040ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000100040ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100040ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_INTENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_INTENA_W1C(a) bdk_usbdrdx_uctl_intena_w1c_t
+#define bustype_BDK_USBDRDX_UCTL_INTENA_W1C(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_INTENA_W1C(a) "USBDRDX_UCTL_INTENA_W1C"
+#define device_bar_BDK_USBDRDX_UCTL_INTENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_INTENA_W1C(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_INTENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_intena_w1s
+ *
+ * USB UCTL Interrupt Status Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_usbdrdx_uctl_intena_w1s
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_intena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uctl_intena_w1s_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_usbdrdx_uctl_intena_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_usbdrdx_uctl_intena_w1s bdk_usbdrdx_uctl_intena_w1s_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_INTENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_INTENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000100048ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000100048ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100048ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_INTENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_INTENA_W1S(a) bdk_usbdrdx_uctl_intena_w1s_t
+#define bustype_BDK_USBDRDX_UCTL_INTENA_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_INTENA_W1S(a) "USBDRDX_UCTL_INTENA_W1S"
+#define device_bar_BDK_USBDRDX_UCTL_INTENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_INTENA_W1S(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_INTENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_intstat
+ *
+ * USB UCTL Interrupt Status Register
+ * This register provides a summary of interrupts. DBEs are detected and
+ * SBE are corrected. For debugging output for ECC DBEs/SBEs, see USBDRD()_UCTL_ECC. This
+ * register can be reset by NCB reset.
+ */
+union bdk_usbdrdx_uctl_intstat
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_intstat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Detected double-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Detected single-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Detected double-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Detected single-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Detected double-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Detected single-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Detected double-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Detected single-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Detected double-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Detected single-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Received DMA read response with poisoned data from NCBO. Hardware also sets
+ USBDRD()_UCTL_RAS[DMA_PSN]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Detected bad DMA access from UAHC to NCB. Error information is logged in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates the
+ assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see the description in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE]. The hardware does not translate the request
+ correctly
+ and results may violate NCB protocols. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1 MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. Error information is logged in USBDRD()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1 MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. Error information is logged in USBDRD()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Detected bad DMA access from UAHC to NCB. Error information is logged in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates the
+ assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see the description in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE]. The hardware does not translate the request
+ correctly
+ and results may violate NCB protocols. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Received DMA read response with poisoned data from NCBO. Hardware also sets
+ USBDRD()_UCTL_RAS[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Detected single-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Detected double-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Detected single-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Detected double-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Detected single-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Detected double-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Detected single-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Detected double-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Detected single-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Detected double-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uctl_intstat_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Detected double-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Detected single-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Detected double-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Detected single-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Detected double-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Detected single-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Detected double-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Detected single-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Detected double-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Detected single-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Detected bad DMA access from UAHC to NCB. Error information is logged in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates the
+ assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see the description in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE]. The hardware does not translate the request
+ correctly
+ and results may violate NCB protocols. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1 MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. Error information is logged in USBDRD()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1 MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. Error information is logged in USBDRD()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Detected bad DMA access from UAHC to NCB. Error information is logged in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates the
+ assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see the description in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE]. The hardware does not translate the request
+ correctly
+ and results may violate NCB protocols. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Detected single-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Detected double-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Detected single-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Detected double-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Detected single-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Detected double-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Detected single-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Detected double-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Detected single-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Detected double-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_usbdrdx_uctl_intstat_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Received DMA read response with poisoned data from NCBO. Hardware also sets
+ USBDRD()_UCTL_RAS[DMA_PSN]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Detected bad DMA access from UAHC to NCB. Error information is logged in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates the
+ assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see the description in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE]. The hardware does not translate the request
+ correctly
+ and results may violate NCB protocols. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1 MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. Error information is logged in USBDRD()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1 MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. Error information is logged in USBDRD()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Detected bad DMA access from UAHC to NCB. Error information is logged in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates the
+ assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see the description in
+ USBDRD()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE]. The hardware does not translate the request
+ correctly
+ and results may violate NCB protocols. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1C/H) Received DMA read response with poisoned data from NCBO. Hardware also sets
+ USBDRD()_UCTL_RAS[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response fault error from NCBO. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response fault error from NCBO. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_usbdrdx_uctl_intstat bdk_usbdrdx_uctl_intstat_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_INTSTAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_INTSTAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000100030ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000100030ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100030ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_INTSTAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_INTSTAT(a) bdk_usbdrdx_uctl_intstat_t
+#define bustype_BDK_USBDRDX_UCTL_INTSTAT(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_INTSTAT(a) "USBDRDX_UCTL_INTSTAT"
+#define device_bar_BDK_USBDRDX_UCTL_INTSTAT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_INTSTAT(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_INTSTAT(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_intstat_w1s
+ *
+ * USB UCTL Interrupt Status Register
+ * This register sets interrupt bits.
+ */
+union bdk_usbdrdx_uctl_intstat_w1s
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_intstat_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uctl_intstat_w1s_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_usbdrdx_uctl_intstat_w1s_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_8_63 : 56;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_4 : 2;
+ uint64_t dma_psn : 1; /**< [ 5: 5](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_63 : 56;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_usbdrdx_uctl_intstat_w1s bdk_usbdrdx_uctl_intstat_w1s_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_INTSTAT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_INTSTAT_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000100038ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000100038ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100038ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_INTSTAT_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_INTSTAT_W1S(a) bdk_usbdrdx_uctl_intstat_w1s_t
+#define bustype_BDK_USBDRDX_UCTL_INTSTAT_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_INTSTAT_W1S(a) "USBDRDX_UCTL_INTSTAT_W1S"
+#define device_bar_BDK_USBDRDX_UCTL_INTSTAT_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_INTSTAT_W1S(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_INTSTAT_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_pipeclk_counter
+ *
+ * USB 3 Clock Counter Register
+ * This register is accessible only when USBDRD()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbdrdx_uctl_pipeclk_counter
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_pipeclk_counter_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t counter : 64; /**< [ 63: 0](R/W) Internal:
+ USB 3.0 free running clock counter. Increments each edge of the USB 3.0 reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t counter : 64; /**< [ 63: 0](R/W) Internal:
+ USB 3.0 free running clock counter. Increments each edge of the USB 3.0 reference clock. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_pipeclk_counter_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_pipeclk_counter bdk_usbdrdx_uctl_pipeclk_counter_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_PIPECLK_COUNTER(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_PIPECLK_COUNTER(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100020ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_PIPECLK_COUNTER", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_PIPECLK_COUNTER(a) bdk_usbdrdx_uctl_pipeclk_counter_t
+#define bustype_BDK_USBDRDX_UCTL_PIPECLK_COUNTER(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_PIPECLK_COUNTER(a) "USBDRDX_UCTL_PIPECLK_COUNTER"
+#define device_bar_BDK_USBDRDX_UCTL_PIPECLK_COUNTER(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_PIPECLK_COUNTER(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_PIPECLK_COUNTER(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_port#_cfg_hs
+ *
+ * USB UCTL Port Configuration High-Speed Register
+ * This register controls configuration and test controls for the HS port 0 PHY.
+ *
+ * This register is accessible only when USBDRD()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset.
+ *
+ * Internal:
+ * INTERNAL: All these settings are for HS functionality, connect on DVDD power domain.
+ */
+union bdk_usbdrdx_uctl_portx_cfg_hs
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_portx_cfg_hs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t comp_dis_tune : 3; /**< [ 57: 55](R/W) Disconnect threshold voltage. Adjusts the voltage level for the threshold used to detect a
+ disconnect event at the host.
+ A positive binary bit setting change results in a +1.5% incremental change in the
+ threshold voltage level, while a negative binary bit setting change results in a -1.5%
+ incremental change in the threshold voltage level. */
+ uint64_t sq_rx_tune : 3; /**< [ 54: 52](R/W) Squelch threshold adjustment. Adjusts the voltage level for the threshold used to detect
+ valid high-speed data.
+ A positive binary bit setting change results in a -5% incremental change in threshold
+ voltage level, while a negative binary bit setting change results in a +5% incremental
+ change in threshold voltage level. */
+ uint64_t tx_fsls_tune : 4; /**< [ 51: 48](R/W) Low-speed/full-speed source impedance adjustment. Adjusts the low- and full-speed single-
+ ended source impedance while driving high. This parameter control is encoded in
+ thermometer code.
+ A positive thermometer code change results in a -2.5% incremental change in source
+ impedance. A negative thermometer code change results in +2.5% incremental change in
+ source impedance. Any non-thermometer code setting (that is, 0x9) is not supported and
+ reserved. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t tx_hs_xv_tune : 2; /**< [ 45: 44](R/W) Transmitter high-speed crossover adjustment. This bus adjusts the voltage at which the DP0
+ and DM0 signals cross while transmitting in high-speed mode.
+ 0x3 = default setting.
+ 0x2 = +15 mV.
+ 0x1 = -15 mV.
+ 0x0 = reserved. */
+ uint64_t tx_preemp_amp_tune : 2; /**< [ 43: 42](R/W) High-speed transmitter preemphasis current control. Controls the amount of current
+ sourced to DP0 and DM0 after a J-to-K or K-to-J transition. The high-speed transmitter
+ preemphasis current is defined in terms of unit amounts. One unit amount is approximately
+ 600 A and is defined as 1* preemphasis current.
+ 0x0 = High-speed TX preemphasis is disabled.
+ 0x1 = High-speed TX preemphasis circuit sources 1* preemphasis current.
+ 0x2 = High-speed TX preemphasis circuit sources 2* preemphasis current.
+ 0x3 = High-speed TX preemphasis circuit sources 3* preemphasis current.
+
+ If these signals are not used, set them to 0x0. */
+ uint64_t reserved_41 : 1;
+ uint64_t tx_preemp_pulse_tune : 1; /**< [ 40: 40](R/W) High-speed transmitter preemphasis duration control. Controls the duration for which the
+ high-speed preemphasis current is sourced onto DP0 or DM0. The high-speed transmitter
+ preemphasis duration is defined in terms of unit amounts. One unit of preemphasis duration
+ is approximately 580 ps and is defined as 1* preemphasis duration. This signal is valid
+ only if either TX_PREEMP_AMP_TUNE0[1] or TX_PREEMP_AMP_TUNE0[0] is set to 1.
+ 0 = 2*, long preemphasis current duration (design default).
+ 1 = 1*, short preemphasis current duration.
+
+ If this signal is not used, set it to 0. */
+ uint64_t tx_res_tune : 2; /**< [ 39: 38](R/W) USB source-impedance adjustment. Some applications require additional devices to be added
+ on the USB, such as a series switch, which can add significant series resistance. This bus
+ adjusts the driver source impedance to compensate for added series resistance on the USB.
+ 0x0 = source impedance is decreased by approximately 1.5 ohm.
+ 0x1 = design default.
+ 0x2 = source impedance is decreased by approximately 2 ohm.
+ 0x3 = source impedance is decreased by approximately 4 ohm.
+
+ Any setting other than the default can result in source-impedance variation across
+ process, voltage, and temperature conditions that does not meet USB 2.0 specification
+ limits. If this bus is not used, leave it at the default setting. */
+ uint64_t tx_rise_tune : 2; /**< [ 37: 36](R/W) High-speed transmitter rise-/fall-time adjustment. Adjusts the rise/fall times of the
+ high-speed waveform. A positive binary bit setting change results in a -4% incremental
+ change in the high-speed rise/fall time. A negative binary bit setting change results in a
+ +4% incremental change in the high-speed rise/fall time. */
+ uint64_t tx_vref_tune : 4; /**< [ 35: 32](R/W) High-speed DC voltage-level adjustment. Adjusts the high-speed DC level voltage.
+ A positive binary-bit-setting change results in a +1.25% incremental change in high-speed
+ DC voltage level, while a negative binary-bit-setting change results in a -1.25%
+ incremental change in high-speed DC voltage level.
+
+ The default bit setting is intended to create a high-speed transmit
+ DC level of approximately 400mV. */
+ uint64_t reserved_7_31 : 25;
+ uint64_t otgtune : 3; /**< [ 6: 4](R/W) "VBUS valid threshold adjustment.
+ This bus adjusts the voltage level for the VBUS\<#\>
+ valid threshold. To enable tuning at the board level, connect this
+ bus to a register.
+ Note: A positive binary bit setting change results in a +3%
+ incremental change in threshold voltage level, while a negative
+ binary bit setting change results in a -3% incremental change
+ in threshold voltage level. " */
+ uint64_t reserved_2_3 : 2;
+ uint64_t loopback_enable : 1; /**< [ 1: 1](R/W) Places the high-speed PHY in loopback mode, which concurrently enables high-speed receive
+ and transmit logic. */
+ uint64_t atereset : 1; /**< [ 0: 0](R/W) Per-PHY ATE reset. When the USB core is powered up (not in suspend mode), an automatic
+ tester can use this to disable PHYCLOCK and FREECLK, then re-enable them with an aligned
+ phase.
+ 0 = PHYCLOCK and FREECLK are available within a specific period after ATERESET is
+ deasserted.
+ 1 = PHYCLOCK and FREECLK outputs are disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t atereset : 1; /**< [ 0: 0](R/W) Per-PHY ATE reset. When the USB core is powered up (not in suspend mode), an automatic
+ tester can use this to disable PHYCLOCK and FREECLK, then re-enable them with an aligned
+ phase.
+ 0 = PHYCLOCK and FREECLK are available within a specific period after ATERESET is
+ deasserted.
+ 1 = PHYCLOCK and FREECLK outputs are disabled. */
+ uint64_t loopback_enable : 1; /**< [ 1: 1](R/W) Places the high-speed PHY in loopback mode, which concurrently enables high-speed receive
+ and transmit logic. */
+ uint64_t reserved_2_3 : 2;
+ uint64_t otgtune : 3; /**< [ 6: 4](R/W) "VBUS valid threshold adjustment.
+ This bus adjusts the voltage level for the VBUS\<#\>
+ valid threshold. To enable tuning at the board level, connect this
+ bus to a register.
+ Note: A positive binary bit setting change results in a +3%
+ incremental change in threshold voltage level, while a negative
+ binary bit setting change results in a -3% incremental change
+ in threshold voltage level. " */
+ uint64_t reserved_7_31 : 25;
+ uint64_t tx_vref_tune : 4; /**< [ 35: 32](R/W) High-speed DC voltage-level adjustment. Adjusts the high-speed DC level voltage.
+ A positive binary-bit-setting change results in a +1.25% incremental change in high-speed
+ DC voltage level, while a negative binary-bit-setting change results in a -1.25%
+ incremental change in high-speed DC voltage level.
+
+ The default bit setting is intended to create a high-speed transmit
+ DC level of approximately 400mV. */
+ uint64_t tx_rise_tune : 2; /**< [ 37: 36](R/W) High-speed transmitter rise-/fall-time adjustment. Adjusts the rise/fall times of the
+ high-speed waveform. A positive binary bit setting change results in a -4% incremental
+ change in the high-speed rise/fall time. A negative binary bit setting change results in a
+ +4% incremental change in the high-speed rise/fall time. */
+ uint64_t tx_res_tune : 2; /**< [ 39: 38](R/W) USB source-impedance adjustment. Some applications require additional devices to be added
+ on the USB, such as a series switch, which can add significant series resistance. This bus
+ adjusts the driver source impedance to compensate for added series resistance on the USB.
+ 0x0 = source impedance is decreased by approximately 1.5 ohm.
+ 0x1 = design default.
+ 0x2 = source impedance is decreased by approximately 2 ohm.
+ 0x3 = source impedance is decreased by approximately 4 ohm.
+
+ Any setting other than the default can result in source-impedance variation across
+ process, voltage, and temperature conditions that does not meet USB 2.0 specification
+ limits. If this bus is not used, leave it at the default setting. */
+ uint64_t tx_preemp_pulse_tune : 1; /**< [ 40: 40](R/W) High-speed transmitter preemphasis duration control. Controls the duration for which the
+ high-speed preemphasis current is sourced onto DP0 or DM0. The high-speed transmitter
+ preemphasis duration is defined in terms of unit amounts. One unit of preemphasis duration
+ is approximately 580 ps and is defined as 1* preemphasis duration. This signal is valid
+ only if either TX_PREEMP_AMP_TUNE0[1] or TX_PREEMP_AMP_TUNE0[0] is set to 1.
+ 0 = 2*, long preemphasis current duration (design default).
+ 1 = 1*, short preemphasis current duration.
+
+ If this signal is not used, set it to 0. */
+ uint64_t reserved_41 : 1;
+ uint64_t tx_preemp_amp_tune : 2; /**< [ 43: 42](R/W) High-speed transmitter preemphasis current control. Controls the amount of current
+ sourced to DP0 and DM0 after a J-to-K or K-to-J transition. The high-speed transmitter
+ preemphasis current is defined in terms of unit amounts. One unit amount is approximately
+ 600 A and is defined as 1* preemphasis current.
+ 0x0 = High-speed TX preemphasis is disabled.
+ 0x1 = High-speed TX preemphasis circuit sources 1* preemphasis current.
+ 0x2 = High-speed TX preemphasis circuit sources 2* preemphasis current.
+ 0x3 = High-speed TX preemphasis circuit sources 3* preemphasis current.
+
+ If these signals are not used, set them to 0x0. */
+ uint64_t tx_hs_xv_tune : 2; /**< [ 45: 44](R/W) Transmitter high-speed crossover adjustment. This bus adjusts the voltage at which the DP0
+ and DM0 signals cross while transmitting in high-speed mode.
+ 0x3 = default setting.
+ 0x2 = +15 mV.
+ 0x1 = -15 mV.
+ 0x0 = reserved. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t tx_fsls_tune : 4; /**< [ 51: 48](R/W) Low-speed/full-speed source impedance adjustment. Adjusts the low- and full-speed single-
+ ended source impedance while driving high. This parameter control is encoded in
+ thermometer code.
+ A positive thermometer code change results in a -2.5% incremental change in source
+ impedance. A negative thermometer code change results in +2.5% incremental change in
+ source impedance. Any non-thermometer code setting (that is, 0x9) is not supported and
+ reserved. */
+ uint64_t sq_rx_tune : 3; /**< [ 54: 52](R/W) Squelch threshold adjustment. Adjusts the voltage level for the threshold used to detect
+ valid high-speed data.
+ A positive binary bit setting change results in a -5% incremental change in threshold
+ voltage level, while a negative binary bit setting change results in a +5% incremental
+ change in threshold voltage level. */
+ uint64_t comp_dis_tune : 3; /**< [ 57: 55](R/W) Disconnect threshold voltage. Adjusts the voltage level for the threshold used to detect a
+ disconnect event at the host.
+ A positive binary bit setting change results in a +1.5% incremental change in the
+ threshold voltage level, while a negative binary bit setting change results in a -1.5%
+ incremental change in the threshold voltage level. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbdrdx_uctl_portx_cfg_hs_cn8
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t comp_dis_tune : 3; /**< [ 57: 55](R/W) Disconnect threshold voltage. Adjusts the voltage level for the threshold used to detect a
+ disconnect event at the host.
+ A positive binary bit setting change results in a +1.5% incremental change in the
+ threshold voltage level, while a negative binary bit setting change results in a -1.5%
+ incremental change in the threshold voltage level. */
+ uint64_t sq_rx_tune : 3; /**< [ 54: 52](R/W) Squelch threshold adjustment. Adjusts the voltage level for the threshold used to detect
+ valid high-speed data.
+ A positive binary bit setting change results in a -5% incremental change in threshold
+ voltage level, while a negative binary bit setting change results in a +5% incremental
+ change in threshold voltage level. */
+ uint64_t tx_fsls_tune : 4; /**< [ 51: 48](R/W) Low-speed/full-speed source impedance adjustment. Adjusts the low- and full-speed single-
+ ended source impedance while driving high. This parameter control is encoded in
+ thermometer code.
+ A positive thermometer code change results in a -2.5% incremental change in source
+ impedance. A negative thermometer code change results in +2.5% incremental change in
+ source impedance. Any non-thermometer code setting (that is, 0x9) is not supported and
+ reserved. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t tx_hs_xv_tune : 2; /**< [ 45: 44](R/W) Transmitter high-speed crossover adjustment. This bus adjusts the voltage at which the DP0
+ and DM0 signals cross while transmitting in high-speed mode.
+ 0x3 = default setting.
+ 0x2 = +15 mV.
+ 0x1 = -15 mV.
+ 0x0 = reserved. */
+ uint64_t tx_preemp_amp_tune : 2; /**< [ 43: 42](R/W) High-speed transmitter preemphasis current control. Controls the amount of current
+ sourced to DP0 and DM0 after a J-to-K or K-to-J transition. The high-speed transmitter
+ preemphasis current is defined in terms of unit amounts. One unit amount is approximately
+ 600 A and is defined as 1* preemphasis current.
+ 0x0 = High-speed TX preemphasis is disabled.
+ 0x1 = High-speed TX preemphasis circuit sources 1* preemphasis current.
+ 0x2 = High-speed TX preemphasis circuit sources 2* preemphasis current.
+ 0x3 = High-speed TX preemphasis circuit sources 3* preemphasis current.
+
+ If these signals are not used, set them to 0x0. */
+ uint64_t reserved_41 : 1;
+ uint64_t tx_preemp_pulse_tune : 1; /**< [ 40: 40](R/W) High-speed transmitter preemphasis duration control. Controls the duration for which the
+ high-speed preemphasis current is sourced onto DP0 or DM0. The high-speed transmitter
+ preemphasis duration is defined in terms of unit amounts. One unit of preemphasis duration
+ is approximately 580 ps and is defined as 1* preemphasis duration. This signal is valid
+ only if either TX_PREEMP_AMP_TUNE0[1] or TX_PREEMP_AMP_TUNE0[0] is set to 1.
+ 0 = 2*, long preemphasis current duration (design default).
+ 1 = 1*, short preemphasis current duration.
+
+ If this signal is not used, set it to 0. */
+ uint64_t tx_res_tune : 2; /**< [ 39: 38](R/W) USB source-impedance adjustment. Some applications require additional devices to be added
+ on the USB, such as a series switch, which can add significant series resistance. This bus
+ adjusts the driver source impedance to compensate for added series resistance on the USB.
+ 0x0 = source impedance is decreased by approximately 1.5 ohm.
+ 0x1 = design default.
+ 0x2 = source impedance is decreased by approximately 2 ohm.
+ 0x3 = source impedance is decreased by approximately 4 ohm.
+
+ Any setting other than the default can result in source-impedance variation across
+ process, voltage, and temperature conditions that does not meet USB 2.0 specification
+ limits. If this bus is not used, leave it at the default setting. */
+ uint64_t tx_rise_tune : 2; /**< [ 37: 36](R/W) High-speed transmitter rise-/fall-time adjustment. Adjusts the rise/fall times of the
+ high-speed waveform. A positive binary bit setting change results in a -4% incremental
+ change in the high-speed rise/fall time. A negative binary bit setting change results in a
+ +4% incremental change in the high-speed rise/fall time. */
+ uint64_t tx_vref_tune : 4; /**< [ 35: 32](R/W) High-speed DC voltage-level adjustment. Adjusts the high-speed DC level voltage.
+ A positive binary-bit-setting change results in a +1.25% incremental change in high-speed
+ DC voltage level, while a negative binary-bit-setting change results in a -1.25%
+ incremental change in high-speed DC voltage level.
+
+ The default bit setting is intended to create a high-speed transmit
+ DC level of approximately 400mV. */
+ uint64_t reserved_7_31 : 25;
+ uint64_t otgtune : 3; /**< [ 6: 4](R/W) "VBUS valid threshold adjustment.
+ This bus adjusts the voltage level for the VBUS\<#\>
+ valid threshold. To enable tuning at the board level, connect this
+ bus to a register.
+ Note: A positive binary bit setting change results in a +3%
+ incremental change in threshold voltage level, while a negative
+ binary bit setting change results in a -3% incremental change
+ in threshold voltage level. " */
+ uint64_t vatest_enable : 2; /**< [ 3: 2](R/W) Analog test-pin select. Enables analog test voltages to be placed on the ID0 pin.
+ 0x0 = Test functionality disabled.
+ 0x1 = Test functionality enabled.
+ 0x2, 0x3 = Reserved, invalid settings.
+
+ See also the PHY databook for details on how to select which analog test voltage. */
+ uint64_t loopback_enable : 1; /**< [ 1: 1](R/W) Places the high-speed PHY in loopback mode, which concurrently enables high-speed receive
+ and transmit logic. */
+ uint64_t atereset : 1; /**< [ 0: 0](R/W) Per-PHY ATE reset. When the USB core is powered up (not in suspend mode), an automatic
+ tester can use this to disable PHYCLOCK and FREECLK, then re-enable them with an aligned
+ phase.
+ 0 = PHYCLOCK and FREECLK are available within a specific period after ATERESET is
+ deasserted.
+ 1 = PHYCLOCK and FREECLK outputs are disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t atereset : 1; /**< [ 0: 0](R/W) Per-PHY ATE reset. When the USB core is powered up (not in suspend mode), an automatic
+ tester can use this to disable PHYCLOCK and FREECLK, then re-enable them with an aligned
+ phase.
+ 0 = PHYCLOCK and FREECLK are available within a specific period after ATERESET is
+ deasserted.
+ 1 = PHYCLOCK and FREECLK outputs are disabled. */
+ uint64_t loopback_enable : 1; /**< [ 1: 1](R/W) Places the high-speed PHY in loopback mode, which concurrently enables high-speed receive
+ and transmit logic. */
+ uint64_t vatest_enable : 2; /**< [ 3: 2](R/W) Analog test-pin select. Enables analog test voltages to be placed on the ID0 pin.
+ 0x0 = Test functionality disabled.
+ 0x1 = Test functionality enabled.
+ 0x2, 0x3 = Reserved, invalid settings.
+
+ See also the PHY databook for details on how to select which analog test voltage. */
+ uint64_t otgtune : 3; /**< [ 6: 4](R/W) "VBUS valid threshold adjustment.
+ This bus adjusts the voltage level for the VBUS\<#\>
+ valid threshold. To enable tuning at the board level, connect this
+ bus to a register.
+ Note: A positive binary bit setting change results in a +3%
+ incremental change in threshold voltage level, while a negative
+ binary bit setting change results in a -3% incremental change
+ in threshold voltage level. " */
+ uint64_t reserved_7_31 : 25;
+ uint64_t tx_vref_tune : 4; /**< [ 35: 32](R/W) High-speed DC voltage-level adjustment. Adjusts the high-speed DC level voltage.
+ A positive binary-bit-setting change results in a +1.25% incremental change in high-speed
+ DC voltage level, while a negative binary-bit-setting change results in a -1.25%
+ incremental change in high-speed DC voltage level.
+
+ The default bit setting is intended to create a high-speed transmit
+ DC level of approximately 400mV. */
+ uint64_t tx_rise_tune : 2; /**< [ 37: 36](R/W) High-speed transmitter rise-/fall-time adjustment. Adjusts the rise/fall times of the
+ high-speed waveform. A positive binary bit setting change results in a -4% incremental
+ change in the high-speed rise/fall time. A negative binary bit setting change results in a
+ +4% incremental change in the high-speed rise/fall time. */
+ uint64_t tx_res_tune : 2; /**< [ 39: 38](R/W) USB source-impedance adjustment. Some applications require additional devices to be added
+ on the USB, such as a series switch, which can add significant series resistance. This bus
+ adjusts the driver source impedance to compensate for added series resistance on the USB.
+ 0x0 = source impedance is decreased by approximately 1.5 ohm.
+ 0x1 = design default.
+ 0x2 = source impedance is decreased by approximately 2 ohm.
+ 0x3 = source impedance is decreased by approximately 4 ohm.
+
+ Any setting other than the default can result in source-impedance variation across
+ process, voltage, and temperature conditions that does not meet USB 2.0 specification
+ limits. If this bus is not used, leave it at the default setting. */
+ uint64_t tx_preemp_pulse_tune : 1; /**< [ 40: 40](R/W) High-speed transmitter preemphasis duration control. Controls the duration for which the
+ high-speed preemphasis current is sourced onto DP0 or DM0. The high-speed transmitter
+ preemphasis duration is defined in terms of unit amounts. One unit of preemphasis duration
+ is approximately 580 ps and is defined as 1* preemphasis duration. This signal is valid
+ only if either TX_PREEMP_AMP_TUNE0[1] or TX_PREEMP_AMP_TUNE0[0] is set to 1.
+ 0 = 2*, long preemphasis current duration (design default).
+ 1 = 1*, short preemphasis current duration.
+
+ If this signal is not used, set it to 0. */
+ uint64_t reserved_41 : 1;
+ uint64_t tx_preemp_amp_tune : 2; /**< [ 43: 42](R/W) High-speed transmitter preemphasis current control. Controls the amount of current
+ sourced to DP0 and DM0 after a J-to-K or K-to-J transition. The high-speed transmitter
+ preemphasis current is defined in terms of unit amounts. One unit amount is approximately
+ 600 A and is defined as 1* preemphasis current.
+ 0x0 = High-speed TX preemphasis is disabled.
+ 0x1 = High-speed TX preemphasis circuit sources 1* preemphasis current.
+ 0x2 = High-speed TX preemphasis circuit sources 2* preemphasis current.
+ 0x3 = High-speed TX preemphasis circuit sources 3* preemphasis current.
+
+ If these signals are not used, set them to 0x0. */
+ uint64_t tx_hs_xv_tune : 2; /**< [ 45: 44](R/W) Transmitter high-speed crossover adjustment. This bus adjusts the voltage at which the DP0
+ and DM0 signals cross while transmitting in high-speed mode.
+ 0x3 = default setting.
+ 0x2 = +15 mV.
+ 0x1 = -15 mV.
+ 0x0 = reserved. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t tx_fsls_tune : 4; /**< [ 51: 48](R/W) Low-speed/full-speed source impedance adjustment. Adjusts the low- and full-speed single-
+ ended source impedance while driving high. This parameter control is encoded in
+ thermometer code.
+ A positive thermometer code change results in a -2.5% incremental change in source
+ impedance. A negative thermometer code change results in +2.5% incremental change in
+ source impedance. Any non-thermometer code setting (that is, 0x9) is not supported and
+ reserved. */
+ uint64_t sq_rx_tune : 3; /**< [ 54: 52](R/W) Squelch threshold adjustment. Adjusts the voltage level for the threshold used to detect
+ valid high-speed data.
+ A positive binary bit setting change results in a -5% incremental change in threshold
+ voltage level, while a negative binary bit setting change results in a +5% incremental
+ change in threshold voltage level. */
+ uint64_t comp_dis_tune : 3; /**< [ 57: 55](R/W) Disconnect threshold voltage. Adjusts the voltage level for the threshold used to detect a
+ disconnect event at the host.
+ A positive binary bit setting change results in a +1.5% incremental change in the
+ threshold voltage level, while a negative binary bit setting change results in a -1.5%
+ incremental change in the threshold voltage level. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } cn8;
+ struct bdk_usbdrdx_uctl_portx_cfg_hs_cn9
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t comp_dis_tune : 3; /**< [ 57: 55](R/W) Disconnect threshold voltage. Adjusts the voltage level for the threshold used to detect a
+ disconnect event at the host.
+ A positive binary bit setting change results in a +1.5% incremental change in the
+ threshold voltage level, while a negative binary bit setting change results in a -1.5%
+ incremental change in the threshold voltage level. */
+ uint64_t sq_rx_tune : 3; /**< [ 54: 52](R/W) Squelch threshold adjustment. Adjusts the voltage level for the threshold used to detect
+ valid high-speed data.
+ A positive binary bit setting change results in a -5% incremental change in threshold
+ voltage level, while a negative binary bit setting change results in a +5% incremental
+ change in threshold voltage level. */
+ uint64_t tx_fsls_tune : 4; /**< [ 51: 48](R/W) Low-speed/full-speed source impedance adjustment. Adjusts the low- and full-speed single-
+ ended source impedance while driving high. This parameter control is encoded in
+ thermometer code.
+ A positive thermometer code change results in a -2.5% incremental change in source
+ impedance. A negative thermometer code change results in +2.5% incremental change in
+ source impedance. Any non-thermometer code setting (that is, 0x9) is not supported and
+ reserved. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t tx_hs_xv_tune : 2; /**< [ 45: 44](R/W) Transmitter high-speed crossover adjustment. This bus adjusts the voltage at which the DP0
+ and DM0 signals cross while transmitting in high-speed mode.
+ 0x3 = default setting.
+ 0x2 = +15 mV.
+ 0x1 = -15 mV.
+ 0x0 = reserved. */
+ uint64_t tx_preemp_amp_tune : 2; /**< [ 43: 42](R/W) High-speed transmitter preemphasis current control. Controls the amount of current
+ sourced to DP0 and DM0 after a J-to-K or K-to-J transition. The high-speed transmitter
+ preemphasis current is defined in terms of unit amounts. One unit amount is approximately
+ 600 A and is defined as 1* preemphasis current.
+ 0x0 = High-speed TX preemphasis is disabled.
+ 0x1 = High-speed TX preemphasis circuit sources 1* preemphasis current.
+ 0x2 = High-speed TX preemphasis circuit sources 2* preemphasis current.
+ 0x3 = High-speed TX preemphasis circuit sources 3* preemphasis current.
+
+ If these signals are not used, set them to 0x0. */
+ uint64_t reserved_41 : 1;
+ uint64_t tx_preemp_pulse_tune : 1; /**< [ 40: 40](R/W) High-speed transmitter preemphasis duration control. Controls the duration for which the
+ high-speed preemphasis current is sourced onto DP0 or DM0. The high-speed transmitter
+ preemphasis duration is defined in terms of unit amounts. One unit of preemphasis duration
+ is approximately 580 ps and is defined as 1* preemphasis duration. This signal is valid
+ only if either TX_PREEMP_AMP_TUNE0[1] or TX_PREEMP_AMP_TUNE0[0] is set to 1.
+ 0 = 2*, long preemphasis current duration (design default).
+ 1 = 1*, short preemphasis current duration.
+
+ If this signal is not used, set it to 0. */
+ uint64_t tx_res_tune : 2; /**< [ 39: 38](R/W) USB source-impedance adjustment. Some applications require additional devices to be added
+ on the USB, such as a series switch, which can add significant series resistance. This bus
+ adjusts the driver source impedance to compensate for added series resistance on the USB.
+ 0x0 = source impedance is decreased by approximately 1.5 ohm.
+ 0x1 = design default.
+ 0x2 = source impedance is decreased by approximately 2 ohm.
+ 0x3 = source impedance is decreased by approximately 4 ohm.
+
+ Any setting other than the default can result in source-impedance variation across
+ process, voltage, and temperature conditions that does not meet USB 2.0 specification
+ limits. If this bus is not used, leave it at the default setting. */
+ uint64_t tx_rise_tune : 2; /**< [ 37: 36](R/W) High-speed transmitter rise-/fall-time adjustment. Adjusts the rise/fall times of the
+ high-speed waveform. A positive binary bit setting change results in a -4% incremental
+ change in the high-speed rise/fall time. A negative binary bit setting change results in a
+ +4% incremental change in the high-speed rise/fall time. */
+ uint64_t tx_vref_tune : 4; /**< [ 35: 32](R/W) High-speed DC voltage-level adjustment. Adjusts the high-speed DC level voltage.
+ A positive binary-bit-setting change results in a +1.25% incremental change in high-speed
+ DC voltage level, while a negative binary-bit-setting change results in a -1.25%
+ incremental change in high-speed DC voltage level.
+
+ The default bit setting is intended to create a high-speed transmit
+ DC level of approximately 400mV. */
+ uint64_t reserved_7_31 : 25;
+ uint64_t otgtune : 3; /**< [ 6: 4](R/W) "VBUS valid threshold adjustment.
+ This bus adjusts the voltage level for the VBUS\<#\>
+ valid threshold. To enable tuning at the board level, connect this
+ bus to a register.
+ Note: A positive binary bit setting change results in a +3%
+ incremental change in threshold voltage level, while a negative
+ binary bit setting change results in a -3% incremental change
+ in threshold voltage level. " */
+ uint64_t vatest_enable : 1; /**< [ 3: 3](R/W) Analog test-pin select. Enables analog test voltages to be placed on the ID0 pin.
+ 0x0 = Test functionality disabled.
+ 0x1 = Test functionality enabled.
+ 0x2, 0x3 = Reserved, invalid settings.
+
+ See also the PHY databook for details on how to select which analog test voltage. */
+ uint64_t reserved_2 : 1;
+ uint64_t loopback_enable : 1; /**< [ 1: 1](R/W) Places the high-speed PHY in loopback mode, which concurrently enables high-speed receive
+ and transmit logic. */
+ uint64_t atereset : 1; /**< [ 0: 0](R/W) Per-PHY ATE reset. When the USB core is powered up (not in suspend mode), an automatic
+ tester can use this to disable PHYCLOCK and FREECLK, then re-enable them with an aligned
+ phase.
+ 0 = PHYCLOCK and FREECLK are available within a specific period after ATERESET is
+ deasserted.
+ 1 = PHYCLOCK and FREECLK outputs are disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t atereset : 1; /**< [ 0: 0](R/W) Per-PHY ATE reset. When the USB core is powered up (not in suspend mode), an automatic
+ tester can use this to disable PHYCLOCK and FREECLK, then re-enable them with an aligned
+ phase.
+ 0 = PHYCLOCK and FREECLK are available within a specific period after ATERESET is
+ deasserted.
+ 1 = PHYCLOCK and FREECLK outputs are disabled. */
+ uint64_t loopback_enable : 1; /**< [ 1: 1](R/W) Places the high-speed PHY in loopback mode, which concurrently enables high-speed receive
+ and transmit logic. */
+ uint64_t reserved_2 : 1;
+ uint64_t vatest_enable : 1; /**< [ 3: 3](R/W) Analog test-pin select. Enables analog test voltages to be placed on the ID0 pin.
+ 0x0 = Test functionality disabled.
+ 0x1 = Test functionality enabled.
+ 0x2, 0x3 = Reserved, invalid settings.
+
+ See also the PHY databook for details on how to select which analog test voltage. */
+ uint64_t otgtune : 3; /**< [ 6: 4](R/W) "VBUS valid threshold adjustment.
+ This bus adjusts the voltage level for the VBUS\<#\>
+ valid threshold. To enable tuning at the board level, connect this
+ bus to a register.
+ Note: A positive binary bit setting change results in a +3%
+ incremental change in threshold voltage level, while a negative
+ binary bit setting change results in a -3% incremental change
+ in threshold voltage level. " */
+ uint64_t reserved_7_31 : 25;
+ uint64_t tx_vref_tune : 4; /**< [ 35: 32](R/W) High-speed DC voltage-level adjustment. Adjusts the high-speed DC level voltage.
+ A positive binary-bit-setting change results in a +1.25% incremental change in high-speed
+ DC voltage level, while a negative binary-bit-setting change results in a -1.25%
+ incremental change in high-speed DC voltage level.
+
+ The default bit setting is intended to create a high-speed transmit
+ DC level of approximately 400mV. */
+ uint64_t tx_rise_tune : 2; /**< [ 37: 36](R/W) High-speed transmitter rise-/fall-time adjustment. Adjusts the rise/fall times of the
+ high-speed waveform. A positive binary bit setting change results in a -4% incremental
+ change in the high-speed rise/fall time. A negative binary bit setting change results in a
+ +4% incremental change in the high-speed rise/fall time. */
+ uint64_t tx_res_tune : 2; /**< [ 39: 38](R/W) USB source-impedance adjustment. Some applications require additional devices to be added
+ on the USB, such as a series switch, which can add significant series resistance. This bus
+ adjusts the driver source impedance to compensate for added series resistance on the USB.
+ 0x0 = source impedance is decreased by approximately 1.5 ohm.
+ 0x1 = design default.
+ 0x2 = source impedance is decreased by approximately 2 ohm.
+ 0x3 = source impedance is decreased by approximately 4 ohm.
+
+ Any setting other than the default can result in source-impedance variation across
+ process, voltage, and temperature conditions that does not meet USB 2.0 specification
+ limits. If this bus is not used, leave it at the default setting. */
+ uint64_t tx_preemp_pulse_tune : 1; /**< [ 40: 40](R/W) High-speed transmitter preemphasis duration control. Controls the duration for which the
+ high-speed preemphasis current is sourced onto DP0 or DM0. The high-speed transmitter
+ preemphasis duration is defined in terms of unit amounts. One unit of preemphasis duration
+ is approximately 580 ps and is defined as 1* preemphasis duration. This signal is valid
+ only if either TX_PREEMP_AMP_TUNE0[1] or TX_PREEMP_AMP_TUNE0[0] is set to 1.
+ 0 = 2*, long preemphasis current duration (design default).
+ 1 = 1*, short preemphasis current duration.
+
+ If this signal is not used, set it to 0. */
+ uint64_t reserved_41 : 1;
+ uint64_t tx_preemp_amp_tune : 2; /**< [ 43: 42](R/W) High-speed transmitter preemphasis current control. Controls the amount of current
+ sourced to DP0 and DM0 after a J-to-K or K-to-J transition. The high-speed transmitter
+ preemphasis current is defined in terms of unit amounts. One unit amount is approximately
+ 600 A and is defined as 1* preemphasis current.
+ 0x0 = High-speed TX preemphasis is disabled.
+ 0x1 = High-speed TX preemphasis circuit sources 1* preemphasis current.
+ 0x2 = High-speed TX preemphasis circuit sources 2* preemphasis current.
+ 0x3 = High-speed TX preemphasis circuit sources 3* preemphasis current.
+
+ If these signals are not used, set them to 0x0. */
+ uint64_t tx_hs_xv_tune : 2; /**< [ 45: 44](R/W) Transmitter high-speed crossover adjustment. This bus adjusts the voltage at which the DP0
+ and DM0 signals cross while transmitting in high-speed mode.
+ 0x3 = default setting.
+ 0x2 = +15 mV.
+ 0x1 = -15 mV.
+ 0x0 = reserved. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t tx_fsls_tune : 4; /**< [ 51: 48](R/W) Low-speed/full-speed source impedance adjustment. Adjusts the low- and full-speed single-
+ ended source impedance while driving high. This parameter control is encoded in
+ thermometer code.
+ A positive thermometer code change results in a -2.5% incremental change in source
+ impedance. A negative thermometer code change results in +2.5% incremental change in
+ source impedance. Any non-thermometer code setting (that is, 0x9) is not supported and
+ reserved. */
+ uint64_t sq_rx_tune : 3; /**< [ 54: 52](R/W) Squelch threshold adjustment. Adjusts the voltage level for the threshold used to detect
+ valid high-speed data.
+ A positive binary bit setting change results in a -5% incremental change in threshold
+ voltage level, while a negative binary bit setting change results in a +5% incremental
+ change in threshold voltage level. */
+ uint64_t comp_dis_tune : 3; /**< [ 57: 55](R/W) Disconnect threshold voltage. Adjusts the voltage level for the threshold used to detect a
+ disconnect event at the host.
+ A positive binary bit setting change results in a +1.5% incremental change in the
+ threshold voltage level, while a negative binary bit setting change results in a -1.5%
+ incremental change in the threshold voltage level. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } cn9;
+};
+typedef union bdk_usbdrdx_uctl_portx_cfg_hs bdk_usbdrdx_uctl_portx_cfg_hs_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_PORTX_CFG_HS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_PORTX_CFG_HS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000100050ll + 0x1000000000ll * ((a) & 0x1) + 0x20ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000100050ll + 0x1000000000ll * ((a) & 0x1) + 0x20ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000100050ll + 0x1000000000ll * ((a) & 0x1) + 0x20ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UCTL_PORTX_CFG_HS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_PORTX_CFG_HS(a,b) bdk_usbdrdx_uctl_portx_cfg_hs_t
+#define bustype_BDK_USBDRDX_UCTL_PORTX_CFG_HS(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_PORTX_CFG_HS(a,b) "USBDRDX_UCTL_PORTX_CFG_HS"
+#define device_bar_BDK_USBDRDX_UCTL_PORTX_CFG_HS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_PORTX_CFG_HS(a,b) (a)
+#define arguments_BDK_USBDRDX_UCTL_PORTX_CFG_HS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_port#_cfg_ss
+ *
+ * USB UCTL Port Configuration SuperSpeed Register
+ * This register controls configuration and test controls for the SS port 0 PHY.
+ *
+ * This register is accessible only when USBDRD()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UCTL_RST].
+ *
+ * Internal:
+ * All these settings are for high-speed functionality, connect on DVDD power domain.
+ */
+union bdk_usbdrdx_uctl_portx_cfg_ss
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_portx_cfg_ss_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tx_vboost_lvl : 3; /**< [ 63: 61](R/W) TX voltage-boost level. Sets the boosted transmit launch amplitude (mVppd). The default
+ bit setting is intended to set the launch amplitude to approximately 1,008 mVppd. A
+ single, positive binary bit setting change results in a +156 mVppd change in the TX launch
+ amplitude.
+ A single, negative binary bit setting change results in a -156 mVppd change in the TX
+ launch amplitude. All settings more than one binary bit change should not be used.
+ 0x3 = 0.844 V launch amplitude.
+ 0x4 = 1.008 V launch amplitude.
+ 0x5 = 1.156 V launch amplitude.
+ All others values are invalid. */
+ uint64_t los_bias : 3; /**< [ 60: 58](R/W) Loss-of-signal detector threshold-level control. A positive, binary bit setting change
+ results in a +15 mVp incremental change in the LOS threshold.
+ A negative binary bit setting change results in a -15 mVp incremental change in the LOS
+ threshold. The 0x0 setting is reserved and must not be used. The default 0x5 setting
+ corresponds to approximately 105 mVp.
+ 0x0 = invalid.
+ 0x1 = 45 mV.
+ 0x2 = 60 mV.
+ 0x3 = 75 mV.
+ 0x4 = 90 mV.
+ 0x5 = 105 mV (default).
+ 0x6 = 120 mV.
+ 0x7 = 135 mV. */
+ uint64_t lane0_ext_pclk_req : 1; /**< [ 57: 57](R/W) When asserted, this signal enables the pipe0_pclk output regardless of power state (along
+ with the associated increase in power consumption). You can use this input to enable
+ pipe0_pclk in the P3 state without going through a complete boot sequence. */
+ uint64_t lane0_tx2rx_loopbk : 1; /**< [ 56: 56](R/W) When asserted, data from TX predriver is looped back to RX slicers. LOS is bypassed and
+ based on the tx0_en input so that rx0_los = !tx_data_en. */
+ uint64_t reserved_42_55 : 14;
+ uint64_t pcs_rx_los_mask_val : 10; /**< [ 41: 32](R/W) Configurable loss-of-signal mask width. Sets the number of reference clock cycles to mask
+ the incoming LFPS in U3 and U2 states. Masks the incoming LFPS for the number of reference
+ clock cycles equal to the value of pcs_rx_los_mask_val\<9:0\>. This control filters out
+ short, non-compliant LFPS glitches sent by a noncompliant host.
+
+ For normal operation, set to a targeted mask interval of 10us (value = 10us / Tref_clk).
+ If the USBDRD()_UCTL_CTL[REF_CLK_DIV2] is used, then
+ (value = 10us / (2 * Tref_clk)). These equations are based on the SuperSpeed reference
+ clock frequency. The value of [PCS_RX_LOS_MASK_VAL] should be as follows:
+
+ \<pre\>
+ Frequency DIV2 LOS_MASK
+ --------- --- --------
+ 200 MHz 1 0x3E8
+ 125 MHz 0 0x4E2
+ 104 MHz 0 0x410
+ 100 MHz 0 0x3E8
+ 96 MHz 0 0x3C0
+ 76.8 MHz 1 0x180
+ 52 MHz 0 0x208
+ 50 MHz 0 0x1F4
+ 48 MHz 0 0x1E0
+ 40 MHz 1 0x0C8
+ 38.4 MHz 0 0x180
+ 26 MHz 0 0x104
+ 25 MHz 0 0x0FA
+ 24 MHz 0 0x0F0
+ 20 MHz 0 0x0C8
+ 19.2 MHz 0 0x0C0
+ \</pre\>
+
+ Setting this bus to 0x0 disables masking. The value should be defined when the PHY is in
+ reset. Changing this value during operation might disrupt normal operation of the link. */
+ uint64_t pcs_tx_deemph_3p5db : 6; /**< [ 31: 26](R/W) Fine-tune transmitter driver deemphasis when set to 3.5db.
+ This static value sets the TX driver deemphasis value when
+ USBDRD()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] is set to
+ 0x1 (according to the PIPE3 specification). The values for transmit deemphasis are derived
+ from the following equation:
+
+ _ TX deemphasis (db) = 20 * log_base_10((128 - 2 * pcs_tx_deemph)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBDRD()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t pcs_tx_deemph_6db : 6; /**< [ 25: 20](R/W) Fine-tune transmitter driver deemphasis when set to 6 db.
+ This static value sets the TX driver deemphasis value when
+ USBDRD()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] is set to
+ 0x0 (according to the PIPE3 specification). This bus is provided for completeness and as a
+ second potential launch amplitude. The values for transmit deemphasis are derived from the
+ following equation:
+
+ _ TX deemphasis (db) = 20 * log_base_10((128 - 2 * pcs_tx_deemph)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBDRD()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t pcs_tx_swing_full : 7; /**< [ 19: 13](R/W) Launch amplitude of the transmitter. Sets the launch amplitude of the transmitter. The
+ values for transmit amplitude are derived from the following equation:
+ TX amplitude (V) = vptx * ((pcs_tx_swing_full + 1)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBDRD()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t lane0_tx_term_offset : 5; /**< [ 12: 8](R/W) Transmitter termination offset. Reserved, set to 0x0. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t res_tune_ack : 1; /**< [ 5: 5](RO/H) Resistor tune acknowledge. While asserted, indicates a resistor tune is in progress. */
+ uint64_t res_tune_req : 1; /**< [ 4: 4](R/W) Resistor tune request. The rising edge triggers a resistor tune request (if one is not
+ already in progress). When asserted, [RES_TUNE_ACK] is asserted high until calibration of
+ the termination impedance is complete.
+ Tuning disrupts the normal flow of data; therefore, assert [RES_TUNE_REQ] only when the
+ PHY
+ is inactive. The PHY automatically performs a tune when coming out of PRST. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t res_tune_req : 1; /**< [ 4: 4](R/W) Resistor tune request. The rising edge triggers a resistor tune request (if one is not
+ already in progress). When asserted, [RES_TUNE_ACK] is asserted high until calibration of
+ the termination impedance is complete.
+ Tuning disrupts the normal flow of data; therefore, assert [RES_TUNE_REQ] only when the
+ PHY
+ is inactive. The PHY automatically performs a tune when coming out of PRST. */
+ uint64_t res_tune_ack : 1; /**< [ 5: 5](RO/H) Resistor tune acknowledge. While asserted, indicates a resistor tune is in progress. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t lane0_tx_term_offset : 5; /**< [ 12: 8](R/W) Transmitter termination offset. Reserved, set to 0x0. */
+ uint64_t pcs_tx_swing_full : 7; /**< [ 19: 13](R/W) Launch amplitude of the transmitter. Sets the launch amplitude of the transmitter. The
+ values for transmit amplitude are derived from the following equation:
+ TX amplitude (V) = vptx * ((pcs_tx_swing_full + 1)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBDRD()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t pcs_tx_deemph_6db : 6; /**< [ 25: 20](R/W) Fine-tune transmitter driver deemphasis when set to 6 db.
+ This static value sets the TX driver deemphasis value when
+ USBDRD()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] is set to
+ 0x0 (according to the PIPE3 specification). This bus is provided for completeness and as a
+ second potential launch amplitude. The values for transmit deemphasis are derived from the
+ following equation:
+
+ _ TX deemphasis (db) = 20 * log_base_10((128 - 2 * pcs_tx_deemph)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBDRD()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t pcs_tx_deemph_3p5db : 6; /**< [ 31: 26](R/W) Fine-tune transmitter driver deemphasis when set to 3.5db.
+ This static value sets the TX driver deemphasis value when
+ USBDRD()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] is set to
+ 0x1 (according to the PIPE3 specification). The values for transmit deemphasis are derived
+ from the following equation:
+
+ _ TX deemphasis (db) = 20 * log_base_10((128 - 2 * pcs_tx_deemph)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBDRD()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t pcs_rx_los_mask_val : 10; /**< [ 41: 32](R/W) Configurable loss-of-signal mask width. Sets the number of reference clock cycles to mask
+ the incoming LFPS in U3 and U2 states. Masks the incoming LFPS for the number of reference
+ clock cycles equal to the value of pcs_rx_los_mask_val\<9:0\>. This control filters out
+ short, non-compliant LFPS glitches sent by a noncompliant host.
+
+ For normal operation, set to a targeted mask interval of 10us (value = 10us / Tref_clk).
+ If the USBDRD()_UCTL_CTL[REF_CLK_DIV2] is used, then
+ (value = 10us / (2 * Tref_clk)). These equations are based on the SuperSpeed reference
+ clock frequency. The value of [PCS_RX_LOS_MASK_VAL] should be as follows:
+
+ \<pre\>
+ Frequency DIV2 LOS_MASK
+ --------- --- --------
+ 200 MHz 1 0x3E8
+ 125 MHz 0 0x4E2
+ 104 MHz 0 0x410
+ 100 MHz 0 0x3E8
+ 96 MHz 0 0x3C0
+ 76.8 MHz 1 0x180
+ 52 MHz 0 0x208
+ 50 MHz 0 0x1F4
+ 48 MHz 0 0x1E0
+ 40 MHz 1 0x0C8
+ 38.4 MHz 0 0x180
+ 26 MHz 0 0x104
+ 25 MHz 0 0x0FA
+ 24 MHz 0 0x0F0
+ 20 MHz 0 0x0C8
+ 19.2 MHz 0 0x0C0
+ \</pre\>
+
+ Setting this bus to 0x0 disables masking. The value should be defined when the PHY is in
+ reset. Changing this value during operation might disrupt normal operation of the link. */
+ uint64_t reserved_42_55 : 14;
+ uint64_t lane0_tx2rx_loopbk : 1; /**< [ 56: 56](R/W) When asserted, data from TX predriver is looped back to RX slicers. LOS is bypassed and
+ based on the tx0_en input so that rx0_los = !tx_data_en. */
+ uint64_t lane0_ext_pclk_req : 1; /**< [ 57: 57](R/W) When asserted, this signal enables the pipe0_pclk output regardless of power state (along
+ with the associated increase in power consumption). You can use this input to enable
+ pipe0_pclk in the P3 state without going through a complete boot sequence. */
+ uint64_t los_bias : 3; /**< [ 60: 58](R/W) Loss-of-signal detector threshold-level control. A positive, binary bit setting change
+ results in a +15 mVp incremental change in the LOS threshold.
+ A negative binary bit setting change results in a -15 mVp incremental change in the LOS
+ threshold. The 0x0 setting is reserved and must not be used. The default 0x5 setting
+ corresponds to approximately 105 mVp.
+ 0x0 = invalid.
+ 0x1 = 45 mV.
+ 0x2 = 60 mV.
+ 0x3 = 75 mV.
+ 0x4 = 90 mV.
+ 0x5 = 105 mV (default).
+ 0x6 = 120 mV.
+ 0x7 = 135 mV. */
+ uint64_t tx_vboost_lvl : 3; /**< [ 63: 61](R/W) TX voltage-boost level. Sets the boosted transmit launch amplitude (mVppd). The default
+ bit setting is intended to set the launch amplitude to approximately 1,008 mVppd. A
+ single, positive binary bit setting change results in a +156 mVppd change in the TX launch
+ amplitude.
+ A single, negative binary bit setting change results in a -156 mVppd change in the TX
+ launch amplitude. All settings more than one binary bit change should not be used.
+ 0x3 = 0.844 V launch amplitude.
+ 0x4 = 1.008 V launch amplitude.
+ 0x5 = 1.156 V launch amplitude.
+ All others values are invalid. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_portx_cfg_ss_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_portx_cfg_ss bdk_usbdrdx_uctl_portx_cfg_ss_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_PORTX_CFG_SS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_PORTX_CFG_SS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000100058ll + 0x1000000000ll * ((a) & 0x1) + 0x20ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000100058ll + 0x1000000000ll * ((a) & 0x1) + 0x20ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000100058ll + 0x1000000000ll * ((a) & 0x1) + 0x20ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UCTL_PORTX_CFG_SS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_PORTX_CFG_SS(a,b) bdk_usbdrdx_uctl_portx_cfg_ss_t
+#define bustype_BDK_USBDRDX_UCTL_PORTX_CFG_SS(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_PORTX_CFG_SS(a,b) "USBDRDX_UCTL_PORTX_CFG_SS"
+#define device_bar_BDK_USBDRDX_UCTL_PORTX_CFG_SS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_PORTX_CFG_SS(a,b) (a)
+#define arguments_BDK_USBDRDX_UCTL_PORTX_CFG_SS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_port#_cr_dbg_cfg
+ *
+ * USB UCTL Port Debug Configuration Register
+ * This register allows indirect access to the configuration and test controls for the port 0
+ * PHY.
+ *
+ * This register is accessible only when USBDRD()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UCTL_RST].
+ *
+ * Internal:
+ * (In body of HRM)
+ * To access the PHY registers indirectly through the CR interface, the HCLK must be running,
+ * UCTL_RST must be deasserted, and UPHY_RST must be deasserted. Software is responsible for
+ * ensuring that only one indirect access is ongoing at a time.
+ *
+ * To read a PHY register via indirect CR interface:
+ * 1. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the \<\<address\>\> of the register,
+ * * [CAP_ADDR], [CAP_DATA], [READ], and [WRITE] fields 0x0.
+ * 2. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the \<\<address\>\> of the register,
+ * * [CAP_ADDR] field 0x1,
+ * * [CAP_DATA], [READ], and [WRITE] fields 0x0.
+ * 3. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x1.
+ * 4. Write UCTL_PORTn_CR_DBG_CFG with all 0x0's.
+ * 5. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x0.
+ * 6. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [READ] field 0x1,
+ * * [DATA_IN], [CAP_ADDR], [CAP_DATA], and [WRITE] fields 0x0.
+ * 7. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x1.
+ * 8. Read UCTL_PORTn_CR_DBG_STATUS[DATA_OUT]. This is the \<\<read data\>\>.
+ * 9. Write UCTL_PORTn_CR_DBG_CFG with all 0x0's.
+ * 10. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x0.
+ *
+ * To write a PHY register via indirect CR interface:
+ * 1. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the \<\<address\>\> of the register,
+ * * [CAP_ADDR], [CAP_DATA], [READ], and [WRITE] fields 0x0.
+ * 2. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the \<\<address\>\> of the register,
+ * * [CAP_ADDR] field 0x1,
+ * * [CAP_DATA], [READ], and [WRITE] fields 0x0.
+ * 3. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x1.
+ * 4. Write UCTL_PORTn_CR_DBG_CFG with all 0x0's.
+ * 5. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x0.
+ * 6. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the \<\<write data\>\>,
+ * * [CAP_ADDR], [CAP_DATA], [READ], and [WRITE] fields 0x0.
+ * 7. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the write data,
+ * * [CAP_DATA] field 0x1,
+ * * [CAP_ADDR], [READ], and [WRITE] fields 0x0.
+ * 8. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x1.
+ * 9. Write UCTL_PORTn_CR_DBG_CFG with all 0x0's.
+ * 10. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x0.
+ * 11. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [WRITE] field 0x1,
+ * * [DATA_IN], [CAP_ADDR], and [READ] fields 0x0.
+ * 12. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x1.
+ * 13. Write UCTL_PORTn_CR_DBG_CFG with all 0x0's.
+ * 14. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x0.
+ *
+ * For partial writes, a read-modify write is required. Note that the CAP_ADDR steps (1-5)
+ * do not have to be repeated until the address needs changed.
+ */
+union bdk_usbdrdx_uctl_portx_cr_dbg_cfg
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_portx_cr_dbg_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t data_in : 16; /**< [ 47: 32](R/W) Address or data to be written to the CR interface. */
+ uint64_t reserved_4_31 : 28;
+ uint64_t cap_addr : 1; /**< [ 3: 3](R/W) Rising edge triggers the [DATA_IN] field to be captured as the address. */
+ uint64_t cap_data : 1; /**< [ 2: 2](R/W) Rising edge triggers the [DATA_IN] field to be captured as the write data. */
+ uint64_t read : 1; /**< [ 1: 1](R/W) Rising edge triggers a register read operation of the captured address. */
+ uint64_t write : 1; /**< [ 0: 0](R/W) Rising edge triggers a register write operation of the captured address with the captured data. */
+#else /* Word 0 - Little Endian */
+ uint64_t write : 1; /**< [ 0: 0](R/W) Rising edge triggers a register write operation of the captured address with the captured data. */
+ uint64_t read : 1; /**< [ 1: 1](R/W) Rising edge triggers a register read operation of the captured address. */
+ uint64_t cap_data : 1; /**< [ 2: 2](R/W) Rising edge triggers the [DATA_IN] field to be captured as the write data. */
+ uint64_t cap_addr : 1; /**< [ 3: 3](R/W) Rising edge triggers the [DATA_IN] field to be captured as the address. */
+ uint64_t reserved_4_31 : 28;
+ uint64_t data_in : 16; /**< [ 47: 32](R/W) Address or data to be written to the CR interface. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_portx_cr_dbg_cfg_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_portx_cr_dbg_cfg bdk_usbdrdx_uctl_portx_cr_dbg_cfg_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_PORTX_CR_DBG_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_PORTX_CR_DBG_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000100060ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000100060ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000100060ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UCTL_PORTX_CR_DBG_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_PORTX_CR_DBG_CFG(a,b) bdk_usbdrdx_uctl_portx_cr_dbg_cfg_t
+#define bustype_BDK_USBDRDX_UCTL_PORTX_CR_DBG_CFG(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_PORTX_CR_DBG_CFG(a,b) "USBDRDX_UCTL_PORTX_CR_DBG_CFG"
+#define device_bar_BDK_USBDRDX_UCTL_PORTX_CR_DBG_CFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_PORTX_CR_DBG_CFG(a,b) (a)
+#define arguments_BDK_USBDRDX_UCTL_PORTX_CR_DBG_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_port#_cr_dbg_status
+ *
+ * USB UCTL Port Debug Status Register
+ * This register allows indirect access to the configuration and test controls for the port 0
+ * PHY.
+ *
+ * This register is accessible only when USBDRD()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbdrdx_uctl_portx_cr_dbg_status
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_portx_cr_dbg_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t data_out : 16; /**< [ 47: 32](RO/H) Last data read from the CR interface. */
+ uint64_t reserved_1_31 : 31;
+ uint64_t ack : 1; /**< [ 0: 0](RO/H) Acknowledge that the CAP_ADDR, CAP_DATA, READ, WRITE commands have completed. */
+#else /* Word 0 - Little Endian */
+ uint64_t ack : 1; /**< [ 0: 0](RO/H) Acknowledge that the CAP_ADDR, CAP_DATA, READ, WRITE commands have completed. */
+ uint64_t reserved_1_31 : 31;
+ uint64_t data_out : 16; /**< [ 47: 32](RO/H) Last data read from the CR interface. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_portx_cr_dbg_status_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_portx_cr_dbg_status bdk_usbdrdx_uctl_portx_cr_dbg_status_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_PORTX_CR_DBG_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_PORTX_CR_DBG_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && ((a<=1) && (b==0)))
+ return 0x868000100068ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && ((a<=1) && (b==0)))
+ return 0x868000100068ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && ((a<=1) && (b==0)))
+ return 0x868000100068ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBDRDX_UCTL_PORTX_CR_DBG_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_PORTX_CR_DBG_STATUS(a,b) bdk_usbdrdx_uctl_portx_cr_dbg_status_t
+#define bustype_BDK_USBDRDX_UCTL_PORTX_CR_DBG_STATUS(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_PORTX_CR_DBG_STATUS(a,b) "USBDRDX_UCTL_PORTX_CR_DBG_STATUS"
+#define device_bar_BDK_USBDRDX_UCTL_PORTX_CR_DBG_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_PORTX_CR_DBG_STATUS(a,b) (a)
+#define arguments_BDK_USBDRDX_UCTL_PORTX_CR_DBG_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_ras
+ *
+ * USB UCTL RAS Register
+ * This register is intended for delivery of RAS events to the SCP, so should be
+ * ignored by OS drivers.
+ */
+union bdk_usbdrdx_uctl_ras
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_ras_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1C/H) Received DMA read response with poisoned data from NCBO. Hardware also sets
+ USBDRD()_UCTL_INTSTAT[DMA_PSN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1C/H) Received DMA read response with poisoned data from NCBO. Hardware also sets
+ USBDRD()_UCTL_INTSTAT[DMA_PSN]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_ras_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_ras bdk_usbdrdx_uctl_ras_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_RAS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_RAS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100080ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_RAS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_RAS(a) bdk_usbdrdx_uctl_ras_t
+#define bustype_BDK_USBDRDX_UCTL_RAS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_RAS(a) "USBDRDX_UCTL_RAS"
+#define device_bar_BDK_USBDRDX_UCTL_RAS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_RAS(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_RAS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_ras_w1s
+ *
+ * USB UCTL RAS Register
+ * This register sets interrupt bits.
+ */
+union bdk_usbdrdx_uctl_ras_w1s
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_ras_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_RAS[DMA_PSN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1S/H) Reads or sets USBDRD(0..1)_UCTL_RAS[DMA_PSN]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_ras_w1s_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_ras_w1s bdk_usbdrdx_uctl_ras_w1s_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_RAS_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_RAS_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100088ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_RAS_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_RAS_W1S(a) bdk_usbdrdx_uctl_ras_w1s_t
+#define bustype_BDK_USBDRDX_UCTL_RAS_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_RAS_W1S(a) "USBDRDX_UCTL_RAS_W1S"
+#define device_bar_BDK_USBDRDX_UCTL_RAS_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_RAS_W1S(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_RAS_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_rasena_w1c
+ *
+ * USB UCTL RAS Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_usbdrdx_uctl_rasena_w1c
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_rasena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_RAS[DMA_PSN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1C/H) Reads or clears enable for USBDRD(0..1)_UCTL_RAS[DMA_PSN]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_rasena_w1c_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_rasena_w1c bdk_usbdrdx_uctl_rasena_w1c_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_RASENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_RASENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100090ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_RASENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_RASENA_W1C(a) bdk_usbdrdx_uctl_rasena_w1c_t
+#define bustype_BDK_USBDRDX_UCTL_RASENA_W1C(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_RASENA_W1C(a) "USBDRDX_UCTL_RASENA_W1C"
+#define device_bar_BDK_USBDRDX_UCTL_RASENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_RASENA_W1C(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_RASENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_rasena_w1s
+ *
+ * USB UCTL RAS Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_usbdrdx_uctl_rasena_w1s
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_rasena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_1_63 : 63;
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_RAS[DMA_PSN]. */
+#else /* Word 0 - Little Endian */
+ uint64_t dma_psn : 1; /**< [ 0: 0](R/W1S/H) Reads or sets enable for USBDRD(0..1)_UCTL_RAS[DMA_PSN]. */
+ uint64_t reserved_1_63 : 63;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_rasena_w1s_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_rasena_w1s bdk_usbdrdx_uctl_rasena_w1s_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_RASENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_RASENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100098ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_RASENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_RASENA_W1S(a) bdk_usbdrdx_uctl_rasena_w1s_t
+#define bustype_BDK_USBDRDX_UCTL_RASENA_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_RASENA_W1S(a) "USBDRDX_UCTL_RASENA_W1S"
+#define device_bar_BDK_USBDRDX_UCTL_RASENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_RASENA_W1S(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_RASENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_shim_cfg
+ *
+ * USB UCTL Shim Configuration Register
+ * This register allows configuration of various shim (UCTL) features. The fields XS_NCB_OOB_*
+ * are captured when there are no outstanding OOB errors indicated in INTSTAT and a new OOB error
+ * arrives. The fields XS_BAD_DMA_* are captured when there are no outstanding DMA errors
+ * indicated in INTSTAT and a new DMA error arrives.
+ *
+ * This register is accessible only when USBDRD()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbdrdx_uctl_shim_cfg
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_shim_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t xs_ncb_oob_wrn : 1; /**< [ 63: 63](RO/H) Read/write error log for out-of-bound UAHC register access.
+ 0 = read, 1 = write. */
+ uint64_t reserved_60_62 : 3;
+ uint64_t xs_ncb_oob_osrc : 12; /**< [ 59: 48](RO/H) SRCID error log for out-of-bound UAHC register access. The NCB outbound SRCID for the OOB
+ error.
+ \<59:58\> = chipID.
+ \<57\> = Request source: 0 = core, 1 = NCB-device.
+ \<56:51\> = Core/NCB-device number. Note that for NCB devices, \<56\> is always 0.
+ \<50:48\> = SubID. */
+ uint64_t xm_bad_dma_wrn : 1; /**< [ 47: 47](RO/H) Read/write error log for bad DMA access from UAHC.
+ 0 = Read error log.
+ 1 = Write error log. */
+ uint64_t reserved_44_46 : 3;
+ uint64_t xm_bad_dma_type : 4; /**< [ 43: 40](RO/H) ErrType error log for bad DMA access from UAHC. Encodes the type of error encountered
+ (error largest encoded value has priority). See UCTL_XM_BAD_DMA_TYPE_E. */
+ uint64_t reserved_14_39 : 26;
+ uint64_t dma_read_cmd : 2; /**< [ 13: 12](R/W) Selects the NCB read command used by DMA accesses. See UCTL_DMA_READ_CMD_E. */
+ uint64_t reserved_11 : 1;
+ uint64_t dma_write_cmd : 1; /**< [ 10: 10](R/W) Selects the NCB write command used by DMA accesses. See UCTL_DMA_WRITE_CMD_E. */
+ uint64_t reserved_0_9 : 10;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_9 : 10;
+ uint64_t dma_write_cmd : 1; /**< [ 10: 10](R/W) Selects the NCB write command used by DMA accesses. See UCTL_DMA_WRITE_CMD_E. */
+ uint64_t reserved_11 : 1;
+ uint64_t dma_read_cmd : 2; /**< [ 13: 12](R/W) Selects the NCB read command used by DMA accesses. See UCTL_DMA_READ_CMD_E. */
+ uint64_t reserved_14_39 : 26;
+ uint64_t xm_bad_dma_type : 4; /**< [ 43: 40](RO/H) ErrType error log for bad DMA access from UAHC. Encodes the type of error encountered
+ (error largest encoded value has priority). See UCTL_XM_BAD_DMA_TYPE_E. */
+ uint64_t reserved_44_46 : 3;
+ uint64_t xm_bad_dma_wrn : 1; /**< [ 47: 47](RO/H) Read/write error log for bad DMA access from UAHC.
+ 0 = Read error log.
+ 1 = Write error log. */
+ uint64_t xs_ncb_oob_osrc : 12; /**< [ 59: 48](RO/H) SRCID error log for out-of-bound UAHC register access. The NCB outbound SRCID for the OOB
+ error.
+ \<59:58\> = chipID.
+ \<57\> = Request source: 0 = core, 1 = NCB-device.
+ \<56:51\> = Core/NCB-device number. Note that for NCB devices, \<56\> is always 0.
+ \<50:48\> = SubID. */
+ uint64_t reserved_60_62 : 3;
+ uint64_t xs_ncb_oob_wrn : 1; /**< [ 63: 63](RO/H) Read/write error log for out-of-bound UAHC register access.
+ 0 = read, 1 = write. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_shim_cfg_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_shim_cfg bdk_usbdrdx_uctl_shim_cfg_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_SHIM_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_SHIM_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8680001000e8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x8680001000e8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x8680001000e8ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_SHIM_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_SHIM_CFG(a) bdk_usbdrdx_uctl_shim_cfg_t
+#define bustype_BDK_USBDRDX_UCTL_SHIM_CFG(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_SHIM_CFG(a) "USBDRDX_UCTL_SHIM_CFG"
+#define device_bar_BDK_USBDRDX_UCTL_SHIM_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_SHIM_CFG(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_SHIM_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_spare0
+ *
+ * INTERNAL: USB UCTL Spare Register 0
+ *
+ * This register is a spare register. This register can be reset by NCB reset.
+ */
+union bdk_usbdrdx_uctl_spare0
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_spare0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Internal:
+ Reserved for ECO usage. */
+#else /* Word 0 - Little Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Internal:
+ Reserved for ECO usage. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_spare0_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_spare0 bdk_usbdrdx_uctl_spare0_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_SPARE0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_SPARE0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x868000100010ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x868000100010ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100010ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_SPARE0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_SPARE0(a) bdk_usbdrdx_uctl_spare0_t
+#define bustype_BDK_USBDRDX_UCTL_SPARE0(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_SPARE0(a) "USBDRDX_UCTL_SPARE0"
+#define device_bar_BDK_USBDRDX_UCTL_SPARE0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_SPARE0(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_SPARE0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_spare1
+ *
+ * INTERNAL: USB UCTL Spare Register 1
+ *
+ * This register is accessible only when USBDRD()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbdrdx_uctl_spare1
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_spare1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Internal:
+ Reserved for ECO usage. */
+#else /* Word 0 - Little Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Internal:
+ Reserved for ECO usage. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_spare1_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_spare1 bdk_usbdrdx_uctl_spare1_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_SPARE1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_SPARE1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN81XX) && (a<=1))
+ return 0x8680001000f8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN83XX) && (a<=1))
+ return 0x8680001000f8ll + 0x1000000000ll * ((a) & 0x1);
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x8680001000f8ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_SPARE1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_SPARE1(a) bdk_usbdrdx_uctl_spare1_t
+#define bustype_BDK_USBDRDX_UCTL_SPARE1(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_SPARE1(a) "USBDRDX_UCTL_SPARE1"
+#define device_bar_BDK_USBDRDX_UCTL_SPARE1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_SPARE1(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_SPARE1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbdrd#_uctl_utmiclk_counter
+ *
+ * USB 2 Clock Counter Register
+ * This register is accessible only when USBDRD()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBDRD()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbdrdx_uctl_utmiclk_counter
+{
+ uint64_t u;
+ struct bdk_usbdrdx_uctl_utmiclk_counter_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t counter : 64; /**< [ 63: 0](R/W) Internal:
+ USB 2.0 free running clock counter. Increments each edge of the USB 2.0 reference clock. */
+#else /* Word 0 - Little Endian */
+ uint64_t counter : 64; /**< [ 63: 0](R/W) Internal:
+ USB 2.0 free running clock counter. Increments each edge of the USB 2.0 reference clock. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbdrdx_uctl_utmiclk_counter_s cn; */
+};
+typedef union bdk_usbdrdx_uctl_utmiclk_counter bdk_usbdrdx_uctl_utmiclk_counter_t;
+
+static inline uint64_t BDK_USBDRDX_UCTL_UTMICLK_COUNTER(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBDRDX_UCTL_UTMICLK_COUNTER(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN9XXX) && (a<=1))
+ return 0x868000100018ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBDRDX_UCTL_UTMICLK_COUNTER", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBDRDX_UCTL_UTMICLK_COUNTER(a) bdk_usbdrdx_uctl_utmiclk_counter_t
+#define bustype_BDK_USBDRDX_UCTL_UTMICLK_COUNTER(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBDRDX_UCTL_UTMICLK_COUNTER(a) "USBDRDX_UCTL_UTMICLK_COUNTER"
+#define device_bar_BDK_USBDRDX_UCTL_UTMICLK_COUNTER(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBDRDX_UCTL_UTMICLK_COUNTER(a) (a)
+#define arguments_BDK_USBDRDX_UCTL_UTMICLK_COUNTER(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_USBDRD_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-usbh.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-usbh.h
new file mode 100644
index 0000000000..6754b437e1
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-csrs-usbh.h
@@ -0,0 +1,7606 @@
+#ifndef __BDK_CSRS_USBH_H__
+#define __BDK_CSRS_USBH_H__
+/* This file is auto-generated. Do not edit */
+
+/***********************license start***************
+ * Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+ * reserved.
+ *
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials provided
+ * with the distribution.
+
+ * * Neither the name of Cavium Inc. nor the names of
+ * its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written
+ * permission.
+
+ * This Software, including technical data, may be subject to U.S. export control
+ * laws, including the U.S. Export Administration Act and its associated
+ * regulations, and may be subject to export or import regulations in other
+ * countries.
+
+ * TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+ * AND WITH ALL FAULTS AND CAVIUM NETWORKS MAKES NO PROMISES, REPRESENTATIONS OR
+ * WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT TO
+ * THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY REPRESENTATION OR
+ * DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT DEFECTS, AND CAVIUM
+ * SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES OF TITLE,
+ * MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR PURPOSE, LACK OF
+ * VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT, QUIET POSSESSION OR
+ * CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK ARISING OUT OF USE OR
+ * PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+ ***********************license end**************************************/
+
+
+/**
+ * @file
+ *
+ * Configuration and status register (CSR) address and type definitions for
+ * Cavium USBH.
+ *
+ * This file is auto generated. Do not edit.
+ *
+ */
+
+/**
+ * Enumeration uctl_ecc_err_source_e
+ *
+ * UCTL ECC Error Source Enumeration
+ * Enumerate sources of ECC error log information.
+ */
+#define BDK_UCTL_ECC_ERR_SOURCE_E_NONE (0)
+#define BDK_UCTL_ECC_ERR_SOURCE_E_RAM0_DBE (0xf)
+#define BDK_UCTL_ECC_ERR_SOURCE_E_RAM0_SBE (7)
+#define BDK_UCTL_ECC_ERR_SOURCE_E_RAM1_DBE (0xe)
+#define BDK_UCTL_ECC_ERR_SOURCE_E_RAM1_SBE (6)
+#define BDK_UCTL_ECC_ERR_SOURCE_E_RAM2_DBE (0xd)
+#define BDK_UCTL_ECC_ERR_SOURCE_E_RAM2_SBE (5)
+#define BDK_UCTL_ECC_ERR_SOURCE_E_XM_R_DBE (0xa)
+#define BDK_UCTL_ECC_ERR_SOURCE_E_XM_R_SBE (2)
+#define BDK_UCTL_ECC_ERR_SOURCE_E_XM_W_DBE (9)
+#define BDK_UCTL_ECC_ERR_SOURCE_E_XM_W_SBE (1)
+
+/**
+ * Enumeration usbh_bar_e
+ *
+ * USBH Base Address Register Enumeration
+ * Enumerates the base address registers.
+ */
+#define BDK_USBH_BAR_E_USBHX_PF_BAR0(a) (0x868000000000ll + 0x1000000000ll * (a))
+#define BDK_USBH_BAR_E_USBHX_PF_BAR0_SIZE 0x200000ull
+#define BDK_USBH_BAR_E_USBHX_PF_BAR4(a) (0x868000200000ll + 0x1000000000ll * (a))
+#define BDK_USBH_BAR_E_USBHX_PF_BAR4_SIZE 0x100000ull
+
+/**
+ * Enumeration usbh_int_vec_e
+ *
+ * USBH MSI-X Vector Enumeration
+ * Enumerates the MSI-X interrupt vectors.
+ */
+#define BDK_USBH_INT_VEC_E_UAHC_IMAN_IP_CN88XXP1_1 (0)
+#define BDK_USBH_INT_VEC_E_UAHC_IMAN_IP_CN88XXP1_0 (2)
+#define BDK_USBH_INT_VEC_E_UAHC_IMAN_IP_CN88XXP2 (0)
+#define BDK_USBH_INT_VEC_E_UAHC_USBSTS_HSE_CN88XXP1_1 (2)
+#define BDK_USBH_INT_VEC_E_UAHC_USBSTS_HSE_CN88XXP1_0 (0)
+#define BDK_USBH_INT_VEC_E_UAHC_USBSTS_HSE_CN88XXP2 (2)
+#define BDK_USBH_INT_VEC_E_UAHC_USBSTS_HSE_CLEAR_CN88XXP1_1 (3)
+#define BDK_USBH_INT_VEC_E_UAHC_USBSTS_HSE_CLEAR_CN88XXP1_0 (1)
+#define BDK_USBH_INT_VEC_E_UAHC_USBSTS_HSE_CLEAR_CN88XXP2 (3)
+#define BDK_USBH_INT_VEC_E_UCTL_INTSTAT_CN88XXP1_1 (1)
+#define BDK_USBH_INT_VEC_E_UCTL_INTSTAT_CN88XXP1_0 (3)
+#define BDK_USBH_INT_VEC_E_UCTL_INTSTAT_CN88XXP2 (1)
+
+/**
+ * Register (NCB) usbh#_msix_pba#
+ *
+ * USBH MSI-X Pending Bit Array Registers
+ * This register is the MSI-X PBA table, the bit number is indexed by the USBH_INT_VEC_E enumeration.
+ */
+union bdk_usbhx_msix_pbax
+{
+ uint64_t u;
+ struct bdk_usbhx_msix_pbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated USBH_MSIX_VEC()_CTL, enumerated by USBH_INT_VEC_E.
+ Bits that have no associated USBH_INT_VEC_E are zero. */
+#else /* Word 0 - Little Endian */
+ uint64_t pend : 64; /**< [ 63: 0](RO/H) Pending message for the associated USBH_MSIX_VEC()_CTL, enumerated by USBH_INT_VEC_E.
+ Bits that have no associated USBH_INT_VEC_E are zero. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_msix_pbax_s cn; */
+};
+typedef union bdk_usbhx_msix_pbax bdk_usbhx_msix_pbax_t;
+
+static inline uint64_t BDK_USBHX_MSIX_PBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_MSIX_PBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x8680002f0000ll + 0x1000000000ll * ((a) & 0x1) + 8ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_MSIX_PBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_MSIX_PBAX(a,b) bdk_usbhx_msix_pbax_t
+#define bustype_BDK_USBHX_MSIX_PBAX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_MSIX_PBAX(a,b) "USBHX_MSIX_PBAX"
+#define device_bar_BDK_USBHX_MSIX_PBAX(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_USBHX_MSIX_PBAX(a,b) (a)
+#define arguments_BDK_USBHX_MSIX_PBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbh#_msix_vec#_addr
+ *
+ * USBH MSI-X Vector Table Address Registers
+ * This register is the MSI-X vector table, indexed by the USBH_INT_VEC_E enumeration.
+ */
+union bdk_usbhx_msix_vecx_addr
+{
+ uint64_t u;
+ struct bdk_usbhx_msix_vecx_addr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_49_63 : 15;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_1 : 1;
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's USBH()_MSIX_VEC()_ADDR, USBH()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of USBH()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_USBH()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+#else /* Word 0 - Little Endian */
+ uint64_t secvec : 1; /**< [ 0: 0](SR/W) Secure vector.
+ 0 = This vector may be read or written by either secure or nonsecure states.
+ 1 = This vector's USBH()_MSIX_VEC()_ADDR, USBH()_MSIX_VEC()_CTL, and
+ corresponding
+ bit of USBH()_MSIX_PBA() are RAZ/WI and does not cause a fault when accessed
+ by the nonsecure world.
+
+ If PCCPF_USBH()_VSEC_SCTL[MSIX_SEC] (for documentation, see
+ PCCPF_XXX_VSEC_SCTL[MSIX_SEC]) is
+ set, all vectors are secure and function as if [SECVEC] was set. */
+ uint64_t reserved_1 : 1;
+ uint64_t addr : 47; /**< [ 48: 2](R/W) IOVA to use for MSI-X delivery of this vector. */
+ uint64_t reserved_49_63 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_msix_vecx_addr_s cn; */
+};
+typedef union bdk_usbhx_msix_vecx_addr bdk_usbhx_msix_vecx_addr_t;
+
+static inline uint64_t BDK_USBHX_MSIX_VECX_ADDR(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_MSIX_VECX_ADDR(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x868000200000ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("USBHX_MSIX_VECX_ADDR", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_MSIX_VECX_ADDR(a,b) bdk_usbhx_msix_vecx_addr_t
+#define bustype_BDK_USBHX_MSIX_VECX_ADDR(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_MSIX_VECX_ADDR(a,b) "USBHX_MSIX_VECX_ADDR"
+#define device_bar_BDK_USBHX_MSIX_VECX_ADDR(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_USBHX_MSIX_VECX_ADDR(a,b) (a)
+#define arguments_BDK_USBHX_MSIX_VECX_ADDR(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbh#_msix_vec#_ctl
+ *
+ * USBH MSI-X Vector Table Control and Data Registers
+ * This register is the MSI-X vector table, indexed by the USBH_INT_VEC_E enumeration.
+ */
+union bdk_usbhx_msix_vecx_ctl
+{
+ uint64_t u;
+ struct bdk_usbhx_msix_vecx_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_33_63 : 31;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+#else /* Word 0 - Little Endian */
+ uint64_t data : 20; /**< [ 19: 0](R/W) Data to use for MSI-X delivery of this vector. */
+ uint64_t reserved_20_31 : 12;
+ uint64_t mask : 1; /**< [ 32: 32](R/W) When set, no MSI-X interrupts will be sent to this vector. */
+ uint64_t reserved_33_63 : 31;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_msix_vecx_ctl_s cn; */
+};
+typedef union bdk_usbhx_msix_vecx_ctl bdk_usbhx_msix_vecx_ctl_t;
+
+static inline uint64_t BDK_USBHX_MSIX_VECX_CTL(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_MSIX_VECX_CTL(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=3)))
+ return 0x868000200008ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x3);
+ __bdk_csr_fatal("USBHX_MSIX_VECX_CTL", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_MSIX_VECX_CTL(a,b) bdk_usbhx_msix_vecx_ctl_t
+#define bustype_BDK_USBHX_MSIX_VECX_CTL(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_MSIX_VECX_CTL(a,b) "USBHX_MSIX_VECX_CTL"
+#define device_bar_BDK_USBHX_MSIX_VECX_CTL(a,b) 0x4 /* PF_BAR4 */
+#define busnum_BDK_USBHX_MSIX_VECX_CTL(a,b) (a)
+#define arguments_BDK_USBHX_MSIX_VECX_CTL(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_caplength
+ *
+ * XHCI Capability Length Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.3.1.
+ */
+union bdk_usbhx_uahc_caplength
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_caplength_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t hciversion : 16; /**< [ 31: 16](RO) Host controller interface version number. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t caplength : 8; /**< [ 7: 0](RO) Capability registers length. */
+#else /* Word 0 - Little Endian */
+ uint32_t caplength : 8; /**< [ 7: 0](RO) Capability registers length. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t hciversion : 16; /**< [ 31: 16](RO) Host controller interface version number. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_caplength_s cn; */
+};
+typedef union bdk_usbhx_uahc_caplength bdk_usbhx_uahc_caplength_t;
+
+static inline uint64_t BDK_USBHX_UAHC_CAPLENGTH(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_CAPLENGTH(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000000ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_CAPLENGTH", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_CAPLENGTH(a) bdk_usbhx_uahc_caplength_t
+#define bustype_BDK_USBHX_UAHC_CAPLENGTH(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_CAPLENGTH(a) "USBHX_UAHC_CAPLENGTH"
+#define device_bar_BDK_USBHX_UAHC_CAPLENGTH(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_CAPLENGTH(a) (a)
+#define arguments_BDK_USBHX_UAHC_CAPLENGTH(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_config
+ *
+ * XHCI Configuration Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.7.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_config
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_config_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_8_31 : 24;
+ uint32_t maxslotsen : 8; /**< [ 7: 0](R/W) Maximum device slots enabled. */
+#else /* Word 0 - Little Endian */
+ uint32_t maxslotsen : 8; /**< [ 7: 0](R/W) Maximum device slots enabled. */
+ uint32_t reserved_8_31 : 24;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_config_s cn; */
+};
+typedef union bdk_usbhx_uahc_config bdk_usbhx_uahc_config_t;
+
+static inline uint64_t BDK_USBHX_UAHC_CONFIG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_CONFIG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000058ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_CONFIG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_CONFIG(a) bdk_usbhx_uahc_config_t
+#define bustype_BDK_USBHX_UAHC_CONFIG(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_CONFIG(a) "USBHX_UAHC_CONFIG"
+#define device_bar_BDK_USBHX_UAHC_CONFIG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_CONFIG(a) (a)
+#define arguments_BDK_USBHX_UAHC_CONFIG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uahc_crcr
+ *
+ * XHCI Command Ring Control Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.5.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_crcr
+{
+ uint64_t u;
+ struct bdk_usbhx_uahc_crcr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t cmd_ring_ptr : 58; /**< [ 63: 6](WO) Command ring pointer. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t crr : 1; /**< [ 3: 3](RO/H) Command ring running. */
+ uint64_t ca : 1; /**< [ 2: 2](WO) Command abort. */
+ uint64_t cs : 1; /**< [ 1: 1](WO) Command stop. */
+ uint64_t rcs : 1; /**< [ 0: 0](WO) Ring cycle state. */
+#else /* Word 0 - Little Endian */
+ uint64_t rcs : 1; /**< [ 0: 0](WO) Ring cycle state. */
+ uint64_t cs : 1; /**< [ 1: 1](WO) Command stop. */
+ uint64_t ca : 1; /**< [ 2: 2](WO) Command abort. */
+ uint64_t crr : 1; /**< [ 3: 3](RO/H) Command ring running. */
+ uint64_t reserved_4_5 : 2;
+ uint64_t cmd_ring_ptr : 58; /**< [ 63: 6](WO) Command ring pointer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_crcr_s cn; */
+};
+typedef union bdk_usbhx_uahc_crcr bdk_usbhx_uahc_crcr_t;
+
+static inline uint64_t BDK_USBHX_UAHC_CRCR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_CRCR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000038ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_CRCR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_CRCR(a) bdk_usbhx_uahc_crcr_t
+#define bustype_BDK_USBHX_UAHC_CRCR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UAHC_CRCR(a) "USBHX_UAHC_CRCR"
+#define device_bar_BDK_USBHX_UAHC_CRCR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_CRCR(a) (a)
+#define arguments_BDK_USBHX_UAHC_CRCR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_db#
+ *
+ * XHCI Doorbell Registers
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.6.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ *
+ * Internal:
+ * xHCI spec, page 32: there are USBH()_UAHC_HCSPARAMS1[MAXSLOTS]+1 doorbell registers.
+ */
+union bdk_usbhx_uahc_dbx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_dbx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dbstreamid : 16; /**< [ 31: 16](WO) Doorbell stream ID. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t dbtarget : 8; /**< [ 7: 0](WO) Doorbell target. */
+#else /* Word 0 - Little Endian */
+ uint32_t dbtarget : 8; /**< [ 7: 0](WO) Doorbell target. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t dbstreamid : 16; /**< [ 31: 16](WO) Doorbell stream ID. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_dbx_s cn; */
+};
+typedef union bdk_usbhx_uahc_dbx bdk_usbhx_uahc_dbx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_DBX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_DBX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=64)))
+ return 0x868000000480ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x7f);
+ __bdk_csr_fatal("USBHX_UAHC_DBX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_DBX(a,b) bdk_usbhx_uahc_dbx_t
+#define bustype_BDK_USBHX_UAHC_DBX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_DBX(a,b) "USBHX_UAHC_DBX"
+#define device_bar_BDK_USBHX_UAHC_DBX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_DBX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_DBX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_dboff
+ *
+ * XHCI Doorbell Array Offset Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.3.7.
+ */
+union bdk_usbhx_uahc_dboff
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_dboff_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dboff : 30; /**< [ 31: 2](RO) Doorbell array offset. */
+ uint32_t reserved_0_1 : 2;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_1 : 2;
+ uint32_t dboff : 30; /**< [ 31: 2](RO) Doorbell array offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_dboff_s cn; */
+};
+typedef union bdk_usbhx_uahc_dboff bdk_usbhx_uahc_dboff_t;
+
+static inline uint64_t BDK_USBHX_UAHC_DBOFF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_DBOFF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000014ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_DBOFF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_DBOFF(a) bdk_usbhx_uahc_dboff_t
+#define bustype_BDK_USBHX_UAHC_DBOFF(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_DBOFF(a) "USBHX_UAHC_DBOFF"
+#define device_bar_BDK_USBHX_UAHC_DBOFF(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_DBOFF(a) (a)
+#define arguments_BDK_USBHX_UAHC_DBOFF(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uahc_dcbaap
+ *
+ * XHCI Device Context Base-Address-Array Pointer Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.6.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_dcbaap
+{
+ uint64_t u;
+ struct bdk_usbhx_uahc_dcbaap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t dcbaap : 58; /**< [ 63: 6](R/W) Device context base address array pointer. */
+ uint64_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_5 : 6;
+ uint64_t dcbaap : 58; /**< [ 63: 6](R/W) Device context base address array pointer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_dcbaap_s cn; */
+};
+typedef union bdk_usbhx_uahc_dcbaap bdk_usbhx_uahc_dcbaap_t;
+
+static inline uint64_t BDK_USBHX_UAHC_DCBAAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_DCBAAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000050ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_DCBAAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_DCBAAP(a) bdk_usbhx_uahc_dcbaap_t
+#define bustype_BDK_USBHX_UAHC_DCBAAP(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UAHC_DCBAAP(a) "USBHX_UAHC_DCBAAP"
+#define device_bar_BDK_USBHX_UAHC_DCBAAP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_DCBAAP(a) (a)
+#define arguments_BDK_USBHX_UAHC_DCBAAP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_dnctrl
+ *
+ * XHCI Device Notification Control Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.4.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_dnctrl
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_dnctrl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t n : 16; /**< [ 15: 0](R/W) Notification enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t n : 16; /**< [ 15: 0](R/W) Notification enable. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_dnctrl_s cn; */
+};
+typedef union bdk_usbhx_uahc_dnctrl bdk_usbhx_uahc_dnctrl_t;
+
+static inline uint64_t BDK_USBHX_UAHC_DNCTRL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_DNCTRL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000034ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_DNCTRL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_DNCTRL(a) bdk_usbhx_uahc_dnctrl_t
+#define bustype_BDK_USBHX_UAHC_DNCTRL(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_DNCTRL(a) "USBHX_UAHC_DNCTRL"
+#define device_bar_BDK_USBHX_UAHC_DNCTRL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_DNCTRL(a) (a)
+#define arguments_BDK_USBHX_UAHC_DNCTRL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uahc_erdp#
+ *
+ * XHCI Event Ring Dequeue Pointer Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.5.2.3.3.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_erdpx
+{
+ uint64_t u;
+ struct bdk_usbhx_uahc_erdpx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t erdp : 60; /**< [ 63: 4](R/W) Event ring dequeue pointer bits \<63:4\>. */
+ uint64_t ehb : 1; /**< [ 3: 3](R/W1C/H) Event handler busy */
+ uint64_t desi : 3; /**< [ 2: 0](R/W) Dequeue ERST segment index. */
+#else /* Word 0 - Little Endian */
+ uint64_t desi : 3; /**< [ 2: 0](R/W) Dequeue ERST segment index. */
+ uint64_t ehb : 1; /**< [ 3: 3](R/W1C/H) Event handler busy */
+ uint64_t erdp : 60; /**< [ 63: 4](R/W) Event ring dequeue pointer bits \<63:4\>. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_erdpx_s cn; */
+};
+typedef union bdk_usbhx_uahc_erdpx bdk_usbhx_uahc_erdpx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_ERDPX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_ERDPX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000000478ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_ERDPX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_ERDPX(a,b) bdk_usbhx_uahc_erdpx_t
+#define bustype_BDK_USBHX_UAHC_ERDPX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UAHC_ERDPX(a,b) "USBHX_UAHC_ERDPX"
+#define device_bar_BDK_USBHX_UAHC_ERDPX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_ERDPX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_ERDPX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbh#_uahc_erstba#
+ *
+ * XHCI Event-Ring Segment-Table Base-Address Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.5.2.3.2.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_erstbax
+{
+ uint64_t u;
+ struct bdk_usbhx_uahc_erstbax_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t erstba : 58; /**< [ 63: 6](R/W) Event-ring segment-table base-address bits\<63:6\>. */
+ uint64_t reserved_0_5 : 6;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_5 : 6;
+ uint64_t erstba : 58; /**< [ 63: 6](R/W) Event-ring segment-table base-address bits\<63:6\>. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_erstbax_s cn; */
+};
+typedef union bdk_usbhx_uahc_erstbax bdk_usbhx_uahc_erstbax_t;
+
+static inline uint64_t BDK_USBHX_UAHC_ERSTBAX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_ERSTBAX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000000470ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_ERSTBAX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_ERSTBAX(a,b) bdk_usbhx_uahc_erstbax_t
+#define bustype_BDK_USBHX_UAHC_ERSTBAX(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UAHC_ERSTBAX(a,b) "USBHX_UAHC_ERSTBAX"
+#define device_bar_BDK_USBHX_UAHC_ERSTBAX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_ERSTBAX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_ERSTBAX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_erstsz#
+ *
+ * XHCI Event-Ring Segment-Table Size Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.5.2.3.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_erstszx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_erstszx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t erstsz : 16; /**< [ 15: 0](R/W) Event-ring segment-table size. */
+#else /* Word 0 - Little Endian */
+ uint32_t erstsz : 16; /**< [ 15: 0](R/W) Event-ring segment-table size. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_erstszx_s cn; */
+};
+typedef union bdk_usbhx_uahc_erstszx bdk_usbhx_uahc_erstszx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_ERSTSZX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_ERSTSZX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000000468ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_ERSTSZX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_ERSTSZX(a,b) bdk_usbhx_uahc_erstszx_t
+#define bustype_BDK_USBHX_UAHC_ERSTSZX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_ERSTSZX(a,b) "USBHX_UAHC_ERSTSZX"
+#define device_bar_BDK_USBHX_UAHC_ERSTSZX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_ERSTSZX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_ERSTSZX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbh#_uahc_gbuserraddr
+ *
+ * UAHC Bus-Error-Address Register
+ * When the AXI master bus returns error response, the SoC bus error is generated. In the host
+ * mode, the host_system_err port indicates this condition. In addition, it is also indicated in
+ * USBH()_UAHC_USBSTS[HSE]. Due to the nature of AXI, it is possible that multiple AXI
+ * transactions
+ * are active at a time. The host controller does not keep track of the start address of all
+ * outstanding transactions. Instead, it keeps track of the start address of the DMA transfer
+ * associated with all active transactions. It is this address that is reported in
+ * USBH()_UAHC_GBUSERRADDR when a bus error occurs. For example, if the host controller initiates
+ * a DMA
+ * transfer to write 1 k of packet data starting at buffer address 0xABCD0000, and this DMA is
+ * broken up into multiple 256 B bursts on the AXI, then if a bus error occurs on any of these
+ * associated AXI transfers, USBH()_UAHC_GBUSERRADDR reflects the DMA start address of 0xABCD0000
+ * regardless of which AXI transaction received the error.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.12.
+ */
+union bdk_usbhx_uahc_gbuserraddr
+{
+ uint64_t u;
+ struct bdk_usbhx_uahc_gbuserraddr_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t busaddr : 64; /**< [ 63: 0](RO/H) Bus address. Contains the first bus address that encountered an SoC bus error. It is valid
+ when the USBH()_UAHC_GSTS[BUSERRADDRVLD] = 1. It can only be cleared by resetting the
+ core. */
+#else /* Word 0 - Little Endian */
+ uint64_t busaddr : 64; /**< [ 63: 0](RO/H) Bus address. Contains the first bus address that encountered an SoC bus error. It is valid
+ when the USBH()_UAHC_GSTS[BUSERRADDRVLD] = 1. It can only be cleared by resetting the
+ core. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gbuserraddr_s cn; */
+};
+typedef union bdk_usbhx_uahc_gbuserraddr bdk_usbhx_uahc_gbuserraddr_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GBUSERRADDR(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GBUSERRADDR(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c130ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GBUSERRADDR", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GBUSERRADDR(a) bdk_usbhx_uahc_gbuserraddr_t
+#define bustype_BDK_USBHX_UAHC_GBUSERRADDR(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UAHC_GBUSERRADDR(a) "USBHX_UAHC_GBUSERRADDR"
+#define device_bar_BDK_USBHX_UAHC_GBUSERRADDR(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GBUSERRADDR(a) (a)
+#define arguments_BDK_USBHX_UAHC_GBUSERRADDR(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gctl
+ *
+ * UAHC Control Register
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.5.
+ */
+union bdk_usbhx_uahc_gctl
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t pwrdnscale : 13; /**< [ 31: 19](R/W) Power down scale. The USB3 suspend-clock input replaces pipe3_rx_pclk as a clock source to
+ a small part of the USB3 core that operates when the SuperSpeed PHY is in its lowest power
+ (P3) state, and therefore does not provide a clock. This field specifies how many suspend-
+ clock periods fit into a 16 kHz clock period. When performing the division, round up the
+ remainder.
+
+ For example, when using an 32-bit PHY and 25-MHz suspend clock, PWRDNSCALE = 25000 kHz/16
+ kHz = 1563 (rounded up).
+
+ The minimum suspend-clock frequency is 32 KHz, and maximum suspend-clock frequency is 125
+ MHz.
+
+ The LTSSM uses suspend clock for 12-ms and 100-ms timers during suspend mode. According to
+ the USB 3.0 specification, the accuracy on these timers is 0% to +50%. 12 ms + 0~+50%
+ accuracy = 18 ms (Range is 12 ms - 18 ms)
+ 100 ms + 0~+50% accuracy = 150 ms (Range is 100 ms - 150 ms).
+
+ The suspend clock accuracy requirement is:
+ _ (12,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 12,000 and
+ 18,000
+ _ (100,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 100,000 and
+ 150,000
+
+ For example, if your suspend_clk frequency varies from 7.5 MHz to 10.5 MHz, then the value
+ needs to programmed is: power down scale = 10500/16 = 657 (rounded up; and fastest
+ frequency used). */
+ uint32_t masterfiltbypass : 1; /**< [ 18: 18](R/W) Master filter bypass. Not relevant for Cavium's configuration. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t frmscldwn : 2; /**< [ 15: 14](R/W) Frame scale down. Scales down device view of a SOF/USOF/ITP duration.
+ For SuperSpeed/high-speed mode:
+ 0x0 = Interval is 125 us.
+ 0x1 = Interval is 62.5 us.
+ 0x2 = Interval is 31.25 us.
+ 0x3 = Interval is 15.625 us.
+
+ For full speed mode, the scale down value is multiplied by 8. */
+ uint32_t prtcapdir : 2; /**< [ 13: 12](R/W) Port capability direction. Always keep set to 0x1. */
+ uint32_t coresoftreset : 1; /**< [ 11: 11](R/W) Core soft reset: 1 = soft reset to core, 0 = no soft reset.
+ Clears the interrupts and all the USBH()_UAHC_* CSRs except the
+ following registers: USBH()_UAHC_GCTL, USBH()_UAHC_GUCTL, USBH()_UAHC_GSTS,
+ USBH()_UAHC_GRLSID, USBH()_UAHC_GGPIO, USBH()_UAHC_GUID, USBH()_UAHC_GUSB2PHYCFG(),
+ USBH()_UAHC_GUSB3PIPECTL().
+
+ When you reset PHYs (using USBH()_UAHC_GUSB2PHYCFG() or USBH()_UAHC_GUSB3PIPECTL()), you
+ must keep the
+ core in reset state until PHY clocks are stable. This controls the bus, RAM, and MAC
+ domain resets.
+
+ Internal:
+ Refer to Reset Generation on Synopsys Databook page 250.
+ Under soft reset, accesses to USBH()_UAHC_* CSRs other than USBH()_UAHC_GCTL may fail
+ (timeout).
+ This bit is for debug purposes only. Use USBH()_UAHC_USBCMD[HCRST] for soft reset. */
+ uint32_t sofitpsync : 1; /**< [ 10: 10](R/W) Synchronize ITP to reference clock. In host mode, if this bit is set to:
+ 0 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever
+ there is a SuperSpeed port that is not in Rx.Detect, SS.Disable, and U3 state.
+ 1 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever the
+ other non-SuperSpeed ports are not in suspended state.
+
+ This feature is useful because it saves power by suspending UTMI/ULPI when SuperSpeed only
+ is active and it helps resolve when the PHY does not transmit a host resume unless it is
+ placed in suspend state.
+ USBH()_UAHC_GUSB2PHYCFG()[SUSPHY] eventually decides to put the UTMI/ULPI PHY in to
+ suspend
+ state. In addition, when this bit is set to 1, the core generates ITP off of the REF_CLK-
+ based counter. Otherwise, ITP and SOF are generated off of UTMI/ULPI_CLK[0] based counter.
+
+ To program the reference clock period inside the core, refer to
+ USBH()_UAHC_GUCTL[REFCLKPER].
+
+ If you do not plan to ever use this feature or the
+ USBH()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL]
+ feature, the minimum frequency for the ref_clk can be as low as 32 KHz. You can connect
+ the
+ SUSPEND_CLK (as low as 32 KHz) to REF_CLK.
+
+ If you plan to enable hardware-based LPM (PORTPMSC[HLE] = 1), this feature cannot be used.
+ Turn off this feature by setting this bit to zero and use the
+ USBH()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL] feature.
+
+ If you set this bit to 1, the USBH()_UAHC_GUSB2PHYCFG() [U2_FREECLK_EXISTS] bit
+ must be set to 0. */
+ uint32_t u1u2timerscale : 1; /**< [ 9: 9](R/W) Disable U1/U2 timer scaledown. If set to 1, along with SCALEDOWN = 0x1, disables the scale
+ down of U1/U2 inactive timer values.
+ This is for simulation mode only. */
+ uint32_t debugattach : 1; /**< [ 8: 8](R/W) Debug attach. When this bit is set:
+ * SuperSpeed link proceeds directly to the polling-link state (USBH()_UAHC_DCTL[RS] = 1)
+ without checking remote termination.
+ * Link LFPS polling timeout is infinite.
+ * Polling timeout during TS1 is infinite (in case link is waiting for TXEQ to finish). */
+ uint32_t ramclksel : 2; /**< [ 7: 6](R/W) RAM clock select. Always keep set to 0x0. */
+ uint32_t scaledown : 2; /**< [ 5: 4](R/W) Scale-down mode. When scale-down mode is enabled for simulation, the core uses scaled-down
+ timing values, resulting in faster simulations. When scale-down mode is disabled, actual
+ timing values are used. This is required for hardware operation.
+
+ High-speed/full-speed/low-speed modes:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scale-down of all timing values. These include:
+ * Speed enumeration.
+ * HNP/SRP.
+ * Suspend and resume.
+
+ 0x2 = N/A.
+ 0x3 = Enables bits \<0\> and \<1\> scale-down timing values.
+
+ SuperSpeed mode:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scaled down SuperSpeed timing and repeat values including:
+ * Number of TxEq training sequences reduce to eight.
+ * LFPS polling burst time reduce to 100 ns.
+ * LFPS warm reset receive reduce to 30 us.
+
+ Internal:
+ Refer to the rtl_vip_scaledown_mapping.xls file under \<workspace\>/sim/SoC_sim
+ directory for the complete list.
+ 0x2 = No TxEq training sequences are sent. Overrides bit\<4\>.
+ 0x3 = Enables bits\<0\> and \<1\> scale-down timing values. */
+ uint32_t disscramble : 1; /**< [ 3: 3](R/W) Disable scrambling. Transmit request to link partner on next transition to recovery or polling. */
+ uint32_t u2exit_lfps : 1; /**< [ 2: 2](R/W) LFPS U2 exit.
+ 0 = The link treats 248 ns LFPS as a valid U2 exit.
+ 1 = The link waits for 8 us of LFPS before it detects a valid U2 exit.
+
+ This bit is added to improve interoperability with a third party host controller. This
+ host controller in U2 state while performing receiver detection generates an LFPS glitch
+ of about 4s duration. This causes the device to exit from U2 state because the LFPS filter
+ value is 248 ns. With the new functionality enabled, the device can stay in U2 while
+ ignoring this glitch from the host controller. */
+ uint32_t reserved_1 : 1;
+ uint32_t dsblclkgtng : 1; /**< [ 0: 0](R/W) Disable clock gating. When set to 1 and the core is in low power mode, internal clock
+ gating is disabled, which means the clocks are always running. This bit can be set to 1
+ after power-up reset. */
+#else /* Word 0 - Little Endian */
+ uint32_t dsblclkgtng : 1; /**< [ 0: 0](R/W) Disable clock gating. When set to 1 and the core is in low power mode, internal clock
+ gating is disabled, which means the clocks are always running. This bit can be set to 1
+ after power-up reset. */
+ uint32_t reserved_1 : 1;
+ uint32_t u2exit_lfps : 1; /**< [ 2: 2](R/W) LFPS U2 exit.
+ 0 = The link treats 248 ns LFPS as a valid U2 exit.
+ 1 = The link waits for 8 us of LFPS before it detects a valid U2 exit.
+
+ This bit is added to improve interoperability with a third party host controller. This
+ host controller in U2 state while performing receiver detection generates an LFPS glitch
+ of about 4s duration. This causes the device to exit from U2 state because the LFPS filter
+ value is 248 ns. With the new functionality enabled, the device can stay in U2 while
+ ignoring this glitch from the host controller. */
+ uint32_t disscramble : 1; /**< [ 3: 3](R/W) Disable scrambling. Transmit request to link partner on next transition to recovery or polling. */
+ uint32_t scaledown : 2; /**< [ 5: 4](R/W) Scale-down mode. When scale-down mode is enabled for simulation, the core uses scaled-down
+ timing values, resulting in faster simulations. When scale-down mode is disabled, actual
+ timing values are used. This is required for hardware operation.
+
+ High-speed/full-speed/low-speed modes:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scale-down of all timing values. These include:
+ * Speed enumeration.
+ * HNP/SRP.
+ * Suspend and resume.
+
+ 0x2 = N/A.
+ 0x3 = Enables bits \<0\> and \<1\> scale-down timing values.
+
+ SuperSpeed mode:
+ 0x0 = Disables all scale-downs. Actual timing values are used.
+ 0x1 = Enables scaled down SuperSpeed timing and repeat values including:
+ * Number of TxEq training sequences reduce to eight.
+ * LFPS polling burst time reduce to 100 ns.
+ * LFPS warm reset receive reduce to 30 us.
+
+ Internal:
+ Refer to the rtl_vip_scaledown_mapping.xls file under \<workspace\>/sim/SoC_sim
+ directory for the complete list.
+ 0x2 = No TxEq training sequences are sent. Overrides bit\<4\>.
+ 0x3 = Enables bits\<0\> and \<1\> scale-down timing values. */
+ uint32_t ramclksel : 2; /**< [ 7: 6](R/W) RAM clock select. Always keep set to 0x0. */
+ uint32_t debugattach : 1; /**< [ 8: 8](R/W) Debug attach. When this bit is set:
+ * SuperSpeed link proceeds directly to the polling-link state (USBH()_UAHC_DCTL[RS] = 1)
+ without checking remote termination.
+ * Link LFPS polling timeout is infinite.
+ * Polling timeout during TS1 is infinite (in case link is waiting for TXEQ to finish). */
+ uint32_t u1u2timerscale : 1; /**< [ 9: 9](R/W) Disable U1/U2 timer scaledown. If set to 1, along with SCALEDOWN = 0x1, disables the scale
+ down of U1/U2 inactive timer values.
+ This is for simulation mode only. */
+ uint32_t sofitpsync : 1; /**< [ 10: 10](R/W) Synchronize ITP to reference clock. In host mode, if this bit is set to:
+ 0 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever
+ there is a SuperSpeed port that is not in Rx.Detect, SS.Disable, and U3 state.
+ 1 = The core keeps the UTMI/ULPI PHY on the first port in non-suspended state whenever the
+ other non-SuperSpeed ports are not in suspended state.
+
+ This feature is useful because it saves power by suspending UTMI/ULPI when SuperSpeed only
+ is active and it helps resolve when the PHY does not transmit a host resume unless it is
+ placed in suspend state.
+ USBH()_UAHC_GUSB2PHYCFG()[SUSPHY] eventually decides to put the UTMI/ULPI PHY in to
+ suspend
+ state. In addition, when this bit is set to 1, the core generates ITP off of the REF_CLK-
+ based counter. Otherwise, ITP and SOF are generated off of UTMI/ULPI_CLK[0] based counter.
+
+ To program the reference clock period inside the core, refer to
+ USBH()_UAHC_GUCTL[REFCLKPER].
+
+ If you do not plan to ever use this feature or the
+ USBH()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL]
+ feature, the minimum frequency for the ref_clk can be as low as 32 KHz. You can connect
+ the
+ SUSPEND_CLK (as low as 32 KHz) to REF_CLK.
+
+ If you plan to enable hardware-based LPM (PORTPMSC[HLE] = 1), this feature cannot be used.
+ Turn off this feature by setting this bit to zero and use the
+ USBH()_UAHC_GFLADJ[GFLADJ_REFCLK_LPM_SEL] feature.
+
+ If you set this bit to 1, the USBH()_UAHC_GUSB2PHYCFG() [U2_FREECLK_EXISTS] bit
+ must be set to 0. */
+ uint32_t coresoftreset : 1; /**< [ 11: 11](R/W) Core soft reset: 1 = soft reset to core, 0 = no soft reset.
+ Clears the interrupts and all the USBH()_UAHC_* CSRs except the
+ following registers: USBH()_UAHC_GCTL, USBH()_UAHC_GUCTL, USBH()_UAHC_GSTS,
+ USBH()_UAHC_GRLSID, USBH()_UAHC_GGPIO, USBH()_UAHC_GUID, USBH()_UAHC_GUSB2PHYCFG(),
+ USBH()_UAHC_GUSB3PIPECTL().
+
+ When you reset PHYs (using USBH()_UAHC_GUSB2PHYCFG() or USBH()_UAHC_GUSB3PIPECTL()), you
+ must keep the
+ core in reset state until PHY clocks are stable. This controls the bus, RAM, and MAC
+ domain resets.
+
+ Internal:
+ Refer to Reset Generation on Synopsys Databook page 250.
+ Under soft reset, accesses to USBH()_UAHC_* CSRs other than USBH()_UAHC_GCTL may fail
+ (timeout).
+ This bit is for debug purposes only. Use USBH()_UAHC_USBCMD[HCRST] for soft reset. */
+ uint32_t prtcapdir : 2; /**< [ 13: 12](R/W) Port capability direction. Always keep set to 0x1. */
+ uint32_t frmscldwn : 2; /**< [ 15: 14](R/W) Frame scale down. Scales down device view of a SOF/USOF/ITP duration.
+ For SuperSpeed/high-speed mode:
+ 0x0 = Interval is 125 us.
+ 0x1 = Interval is 62.5 us.
+ 0x2 = Interval is 31.25 us.
+ 0x3 = Interval is 15.625 us.
+
+ For full speed mode, the scale down value is multiplied by 8. */
+ uint32_t reserved_16_17 : 2;
+ uint32_t masterfiltbypass : 1; /**< [ 18: 18](R/W) Master filter bypass. Not relevant for Cavium's configuration. */
+ uint32_t pwrdnscale : 13; /**< [ 31: 19](R/W) Power down scale. The USB3 suspend-clock input replaces pipe3_rx_pclk as a clock source to
+ a small part of the USB3 core that operates when the SuperSpeed PHY is in its lowest power
+ (P3) state, and therefore does not provide a clock. This field specifies how many suspend-
+ clock periods fit into a 16 kHz clock period. When performing the division, round up the
+ remainder.
+
+ For example, when using an 32-bit PHY and 25-MHz suspend clock, PWRDNSCALE = 25000 kHz/16
+ kHz = 1563 (rounded up).
+
+ The minimum suspend-clock frequency is 32 KHz, and maximum suspend-clock frequency is 125
+ MHz.
+
+ The LTSSM uses suspend clock for 12-ms and 100-ms timers during suspend mode. According to
+ the USB 3.0 specification, the accuracy on these timers is 0% to +50%. 12 ms + 0~+50%
+ accuracy = 18 ms (Range is 12 ms - 18 ms)
+ 100 ms + 0~+50% accuracy = 150 ms (Range is 100 ms - 150 ms).
+
+ The suspend clock accuracy requirement is:
+ _ (12,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 12,000 and
+ 18,000
+ _ (100,000/62.5) * (GCTL[31:19]) * actual suspend_clk_period should be between 100,000 and
+ 150,000
+
+ For example, if your suspend_clk frequency varies from 7.5 MHz to 10.5 MHz, then the value
+ needs to programmed is: power down scale = 10500/16 = 657 (rounded up; and fastest
+ frequency used). */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gctl_s cn; */
+};
+typedef union bdk_usbhx_uahc_gctl bdk_usbhx_uahc_gctl_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GCTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GCTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c110ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GCTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GCTL(a) bdk_usbhx_uahc_gctl_t
+#define bustype_BDK_USBHX_UAHC_GCTL(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GCTL(a) "USBHX_UAHC_GCTL"
+#define device_bar_BDK_USBHX_UAHC_GCTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GCTL(a) (a)
+#define arguments_BDK_USBHX_UAHC_GCTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gdbgbmu
+ *
+ * UAHC BMU Debug Register
+ * See description in USBH()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.4.5.
+ */
+union bdk_usbhx_uahc_gdbgbmu
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gdbgbmu_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bmu_bcu_dbg : 24; /**< [ 31: 8](RO/H) BMU_BCU debug information. */
+ uint32_t bmu_dcu_dbg : 4; /**< [ 7: 4](RO/H) BMU_DCU debug information. */
+ uint32_t bmu_ccu_dbg : 4; /**< [ 3: 0](RO/H) BMU_CCU debug information. */
+#else /* Word 0 - Little Endian */
+ uint32_t bmu_ccu_dbg : 4; /**< [ 3: 0](RO/H) BMU_CCU debug information. */
+ uint32_t bmu_dcu_dbg : 4; /**< [ 7: 4](RO/H) BMU_DCU debug information. */
+ uint32_t bmu_bcu_dbg : 24; /**< [ 31: 8](RO/H) BMU_BCU debug information. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gdbgbmu_s cn; */
+};
+typedef union bdk_usbhx_uahc_gdbgbmu bdk_usbhx_uahc_gdbgbmu_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GDBGBMU(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GDBGBMU(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c16cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GDBGBMU", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GDBGBMU(a) bdk_usbhx_uahc_gdbgbmu_t
+#define bustype_BDK_USBHX_UAHC_GDBGBMU(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GDBGBMU(a) "USBHX_UAHC_GDBGBMU"
+#define device_bar_BDK_USBHX_UAHC_GDBGBMU(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GDBGBMU(a) (a)
+#define arguments_BDK_USBHX_UAHC_GDBGBMU(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uahc_gdbgepinfo
+ *
+ * UAHC Endpoint Information Debug Register
+ * See description in USBH()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ */
+union bdk_usbhx_uahc_gdbgepinfo
+{
+ uint64_t u;
+ struct bdk_usbhx_uahc_gdbgepinfo_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t endpt_dbg : 64; /**< [ 63: 0](RO/H) Endpoint debug information. */
+#else /* Word 0 - Little Endian */
+ uint64_t endpt_dbg : 64; /**< [ 63: 0](RO/H) Endpoint debug information. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gdbgepinfo_s cn; */
+};
+typedef union bdk_usbhx_uahc_gdbgepinfo bdk_usbhx_uahc_gdbgepinfo_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GDBGEPINFO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GDBGEPINFO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c178ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GDBGEPINFO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GDBGEPINFO(a) bdk_usbhx_uahc_gdbgepinfo_t
+#define bustype_BDK_USBHX_UAHC_GDBGEPINFO(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UAHC_GDBGEPINFO(a) "USBHX_UAHC_GDBGEPINFO"
+#define device_bar_BDK_USBHX_UAHC_GDBGEPINFO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GDBGEPINFO(a) (a)
+#define arguments_BDK_USBHX_UAHC_GDBGEPINFO(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gdbgfifospace
+ *
+ * UAHC Debug FIFO Space Available Register
+ * This register is for debug purposes. It provides debug information on the internal status and
+ * state machines. Global debug registers have design-specific information, and are used by state
+ * machines. Global debug registers have design-specific information, and are used for debugging
+ * purposes. These registers are not intended to be used by the customer. If any debug assistance
+ * is needed for the silicon, contact customer support with a dump of these registers.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.4.2.
+ * INTERNAL: Contact Synopsys directly.
+ */
+union bdk_usbhx_uahc_gdbgfifospace
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gdbgfifospace_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t spaceavailable : 16; /**< [ 31: 16](RO/H) Space available in the selected FIFO. */
+ uint32_t reserved_9_15 : 7;
+ uint32_t select : 9; /**< [ 8: 0](R/W) FIFO/queue select/port-select.
+ FIFO/queue select: \<7:5\> indicates the FIFO/queue type; \<4:0\> indicates the FIFO/queue
+ number.
+ For example, 0x21 refers to RxFIFO_1, and 0x5E refers to TxReqQ_30.
+ 0x1F-0x0: TxFIFO_31 to TxFIFO_0.
+ 0x3F-0x20: RxFIFO_31 to RxFIFO_0.
+ 0x5F-0x40: TxReqQ_31 to TxReqQ_0.
+ 0x7F-0x60: RxReqQ_31 to RxReqQ_0.
+ 0x9F-0x80: RxInfoQ_31 to RxInfoQ_0.
+ 0xA0: DescFetchQ.
+ 0xA1: EventQ.
+ 0xA2: ProtocolStatusQ.
+
+ Port-select: \<3:0\> selects the port-number when accessing USBH()_UAHC_GDBGLTSSM. */
+#else /* Word 0 - Little Endian */
+ uint32_t select : 9; /**< [ 8: 0](R/W) FIFO/queue select/port-select.
+ FIFO/queue select: \<7:5\> indicates the FIFO/queue type; \<4:0\> indicates the FIFO/queue
+ number.
+ For example, 0x21 refers to RxFIFO_1, and 0x5E refers to TxReqQ_30.
+ 0x1F-0x0: TxFIFO_31 to TxFIFO_0.
+ 0x3F-0x20: RxFIFO_31 to RxFIFO_0.
+ 0x5F-0x40: TxReqQ_31 to TxReqQ_0.
+ 0x7F-0x60: RxReqQ_31 to RxReqQ_0.
+ 0x9F-0x80: RxInfoQ_31 to RxInfoQ_0.
+ 0xA0: DescFetchQ.
+ 0xA1: EventQ.
+ 0xA2: ProtocolStatusQ.
+
+ Port-select: \<3:0\> selects the port-number when accessing USBH()_UAHC_GDBGLTSSM. */
+ uint32_t reserved_9_15 : 7;
+ uint32_t spaceavailable : 16; /**< [ 31: 16](RO/H) Space available in the selected FIFO. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gdbgfifospace_s cn; */
+};
+typedef union bdk_usbhx_uahc_gdbgfifospace bdk_usbhx_uahc_gdbgfifospace_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GDBGFIFOSPACE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GDBGFIFOSPACE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c160ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GDBGFIFOSPACE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GDBGFIFOSPACE(a) bdk_usbhx_uahc_gdbgfifospace_t
+#define bustype_BDK_USBHX_UAHC_GDBGFIFOSPACE(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GDBGFIFOSPACE(a) "USBHX_UAHC_GDBGFIFOSPACE"
+#define device_bar_BDK_USBHX_UAHC_GDBGFIFOSPACE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GDBGFIFOSPACE(a) (a)
+#define arguments_BDK_USBHX_UAHC_GDBGFIFOSPACE(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gdbglnmcc
+ *
+ * UAHC LNMCC Debug Register
+ * See description in USBH()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.4.4.
+ */
+union bdk_usbhx_uahc_gdbglnmcc
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gdbglnmcc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_9_31 : 23;
+ uint32_t lnmcc_berc : 9; /**< [ 8: 0](RO/H) This field indicates the bit-error-rate information for the port selected in
+ USBH()_UAHC_GDBGFIFOSPACE[SELECT] (port-select).
+ This field is for debug purposes only. */
+#else /* Word 0 - Little Endian */
+ uint32_t lnmcc_berc : 9; /**< [ 8: 0](RO/H) This field indicates the bit-error-rate information for the port selected in
+ USBH()_UAHC_GDBGFIFOSPACE[SELECT] (port-select).
+ This field is for debug purposes only. */
+ uint32_t reserved_9_31 : 23;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gdbglnmcc_s cn; */
+};
+typedef union bdk_usbhx_uahc_gdbglnmcc bdk_usbhx_uahc_gdbglnmcc_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GDBGLNMCC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GDBGLNMCC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c168ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GDBGLNMCC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GDBGLNMCC(a) bdk_usbhx_uahc_gdbglnmcc_t
+#define bustype_BDK_USBHX_UAHC_GDBGLNMCC(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GDBGLNMCC(a) "USBHX_UAHC_GDBGLNMCC"
+#define device_bar_BDK_USBHX_UAHC_GDBGLNMCC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GDBGLNMCC(a) (a)
+#define arguments_BDK_USBHX_UAHC_GDBGLNMCC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gdbglsp
+ *
+ * UAHC LSP Debug Register
+ * See description in USBH()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ */
+union bdk_usbhx_uahc_gdbglsp
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gdbglsp_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t lsp_dbg : 32; /**< [ 31: 0](RO/H) LSP debug information. */
+#else /* Word 0 - Little Endian */
+ uint32_t lsp_dbg : 32; /**< [ 31: 0](RO/H) LSP debug information. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gdbglsp_s cn; */
+};
+typedef union bdk_usbhx_uahc_gdbglsp bdk_usbhx_uahc_gdbglsp_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GDBGLSP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GDBGLSP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c174ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GDBGLSP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GDBGLSP(a) bdk_usbhx_uahc_gdbglsp_t
+#define bustype_BDK_USBHX_UAHC_GDBGLSP(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GDBGLSP(a) "USBHX_UAHC_GDBGLSP"
+#define device_bar_BDK_USBHX_UAHC_GDBGLSP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GDBGLSP(a) (a)
+#define arguments_BDK_USBHX_UAHC_GDBGLSP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gdbglspmux
+ *
+ * UAHC LSP Multiplexer Debug Register
+ * See description in USBH()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.4.6.
+ * INTERNAL: This register is for Synopsys internal use only.
+ */
+union bdk_usbhx_uahc_gdbglspmux
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gdbglspmux_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_24_31 : 8;
+ uint32_t latraceportmuxselect : 8; /**< [ 23: 16](R/W) logic_analyzer_trace port multiplexer select. Only bits\<21:16\> are used. For details on
+ how the mux controls the debug traces, refer to the Verilog file.
+ A value of 0x3F drives 0s on the logic_analyzer_trace signal. If you plan to OR (instead
+ using a mux) this signal with other trace signals in your system to generate a common
+ trace signal, you can use this feature. */
+ uint32_t endbc : 1; /**< [ 15: 15](R/W) Enable debugging of the debug capability LSP. Use HOSTSELECT to select the DbC LSP debug
+ information presented in the GDBGLSP register.
+
+ Internal:
+ Note this can only be used if DebugCapabaility was enabled at compile. */
+ uint32_t reserved_14 : 1;
+ uint32_t hostselect : 14; /**< [ 13: 0](R/W) Host select. Selects the LSP debug information presented in USBH()_UAHC_GDBGLSP. */
+#else /* Word 0 - Little Endian */
+ uint32_t hostselect : 14; /**< [ 13: 0](R/W) Host select. Selects the LSP debug information presented in USBH()_UAHC_GDBGLSP. */
+ uint32_t reserved_14 : 1;
+ uint32_t endbc : 1; /**< [ 15: 15](R/W) Enable debugging of the debug capability LSP. Use HOSTSELECT to select the DbC LSP debug
+ information presented in the GDBGLSP register.
+
+ Internal:
+ Note this can only be used if DebugCapabaility was enabled at compile. */
+ uint32_t latraceportmuxselect : 8; /**< [ 23: 16](R/W) logic_analyzer_trace port multiplexer select. Only bits\<21:16\> are used. For details on
+ how the mux controls the debug traces, refer to the Verilog file.
+ A value of 0x3F drives 0s on the logic_analyzer_trace signal. If you plan to OR (instead
+ using a mux) this signal with other trace signals in your system to generate a common
+ trace signal, you can use this feature. */
+ uint32_t reserved_24_31 : 8;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gdbglspmux_s cn; */
+};
+typedef union bdk_usbhx_uahc_gdbglspmux bdk_usbhx_uahc_gdbglspmux_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GDBGLSPMUX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GDBGLSPMUX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c170ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GDBGLSPMUX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GDBGLSPMUX(a) bdk_usbhx_uahc_gdbglspmux_t
+#define bustype_BDK_USBHX_UAHC_GDBGLSPMUX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GDBGLSPMUX(a) "USBHX_UAHC_GDBGLSPMUX"
+#define device_bar_BDK_USBHX_UAHC_GDBGLSPMUX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GDBGLSPMUX(a) (a)
+#define arguments_BDK_USBHX_UAHC_GDBGLSPMUX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gdbgltssm
+ *
+ * UAHC LTSSM Debug Register
+ * In multiport host configuration, the port number is defined by
+ * USBH()_UAHC_GDBGFIFOSPACE[SELECT]\<3:0\>. Value of this register may change immediately after
+ * reset.
+ * See description in USBH()_UAHC_GDBGFIFOSPACE.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.4.3.
+ */
+union bdk_usbhx_uahc_gdbgltssm
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gdbgltssm_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_27_31 : 5;
+ uint32_t ltdbtimeout : 1; /**< [ 26: 26](RO/H) LTDB timeout. */
+ uint32_t ltdblinkstate : 4; /**< [ 25: 22](RO/H) LTDB link state. */
+ uint32_t ltdbsubstate : 4; /**< [ 21: 18](RO/H) LTDB substate. */
+ uint32_t debugpipestatus : 18; /**< [ 17: 0](RO/H) Debug PIPE status.
+ _ \<17\> Elastic buffer mode.
+ _ \<16\> TX elec idle.
+ _ \<15\> RX polarity.
+ _ \<14\> TX Detect RX/loopback.
+ _ \<13:11\> LTSSM PHY command state.
+ _ 0x0 = PHY_IDLE (PHY command state is in IDLE. No PHY request is pending).
+ _ 0x1 = PHY_DET (Request to start receiver detection).
+ _ 0x2 = PHY_DET_3 (Wait for Phy_Status (receiver detection)).
+ _ 0x3 = PHY_PWR_DLY (delay Pipe3_PowerDown P0 -\> P1/P2/P3 request).
+ _ 0x4 = PHY_PWR_A (delay for internal logic).
+ _ 0x5 = PHY_PWR_B (wait for Phy_Status(Power-state change request)).
+
+ _ \<10:9\> Power down.
+ _ \<8\> RxEq train.
+ _ \<7:6\> TX de-emphasis.
+ _ \<5:3\> LTSSM clock state.
+ _ 0x0 = CLK_NORM (PHY is in non-P3 state and PCLK is running).
+ _ 0x1 = CLK_TO_P3 (P3 entry request to PHY).
+ _ 0x2 = CLK_WAIT1 (wait for Phy_Status (P3 request)).
+ _ 0x3 = CLK_P3 (PHY is in P3 and PCLK is not running).
+ _ 0x4 = CLK_TO_P0 (P3 exit request to PHY).
+ _ 0x5 = CLK_WAIT2 (Wait for Phy_Status (P3 exit request)).
+
+ _ \<2\> TX swing.
+ _ \<1\> RX termination.
+ _ \<0\> TX 1s/0s. */
+#else /* Word 0 - Little Endian */
+ uint32_t debugpipestatus : 18; /**< [ 17: 0](RO/H) Debug PIPE status.
+ _ \<17\> Elastic buffer mode.
+ _ \<16\> TX elec idle.
+ _ \<15\> RX polarity.
+ _ \<14\> TX Detect RX/loopback.
+ _ \<13:11\> LTSSM PHY command state.
+ _ 0x0 = PHY_IDLE (PHY command state is in IDLE. No PHY request is pending).
+ _ 0x1 = PHY_DET (Request to start receiver detection).
+ _ 0x2 = PHY_DET_3 (Wait for Phy_Status (receiver detection)).
+ _ 0x3 = PHY_PWR_DLY (delay Pipe3_PowerDown P0 -\> P1/P2/P3 request).
+ _ 0x4 = PHY_PWR_A (delay for internal logic).
+ _ 0x5 = PHY_PWR_B (wait for Phy_Status(Power-state change request)).
+
+ _ \<10:9\> Power down.
+ _ \<8\> RxEq train.
+ _ \<7:6\> TX de-emphasis.
+ _ \<5:3\> LTSSM clock state.
+ _ 0x0 = CLK_NORM (PHY is in non-P3 state and PCLK is running).
+ _ 0x1 = CLK_TO_P3 (P3 entry request to PHY).
+ _ 0x2 = CLK_WAIT1 (wait for Phy_Status (P3 request)).
+ _ 0x3 = CLK_P3 (PHY is in P3 and PCLK is not running).
+ _ 0x4 = CLK_TO_P0 (P3 exit request to PHY).
+ _ 0x5 = CLK_WAIT2 (Wait for Phy_Status (P3 exit request)).
+
+ _ \<2\> TX swing.
+ _ \<1\> RX termination.
+ _ \<0\> TX 1s/0s. */
+ uint32_t ltdbsubstate : 4; /**< [ 21: 18](RO/H) LTDB substate. */
+ uint32_t ltdblinkstate : 4; /**< [ 25: 22](RO/H) LTDB link state. */
+ uint32_t ltdbtimeout : 1; /**< [ 26: 26](RO/H) LTDB timeout. */
+ uint32_t reserved_27_31 : 5;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gdbgltssm_s cn; */
+};
+typedef union bdk_usbhx_uahc_gdbgltssm bdk_usbhx_uahc_gdbgltssm_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GDBGLTSSM(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GDBGLTSSM(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c164ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GDBGLTSSM", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GDBGLTSSM(a) bdk_usbhx_uahc_gdbgltssm_t
+#define bustype_BDK_USBHX_UAHC_GDBGLTSSM(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GDBGLTSSM(a) "USBHX_UAHC_GDBGLTSSM"
+#define device_bar_BDK_USBHX_UAHC_GDBGLTSSM(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GDBGLTSSM(a) (a)
+#define arguments_BDK_USBHX_UAHC_GDBGLTSSM(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gdmahlratio
+ *
+ * UAHC DMA High/Low Ratio Register
+ * This register specifies the relative priority of the SuperSpeed FIFOs with respect to the
+ * high-speed/full-speed/low-speed FIFOs. The DMA arbiter prioritizes the high-speed/full-speed
+ * /low-speed round-robin arbiter group every DMA high-low priority ratio grants as indicated in
+ * the register separately for TX and RX.
+ *
+ * To illustrate, consider that all FIFOs are requesting access simultaneously, and the ratio is
+ * 4. SuperSpeed gets priority for four packets, high-speed/full-speed/low-speed gets priority
+ * for one packet, SuperSpeed gets priority for four packets, high-speed/full-speed/low-speed
+ * gets priority for one packet, and so on.
+ *
+ * If FIFOs from both speed groups are not requesting access simultaneously then:
+ * * If SuperSpeed got grants four out of the last four times, then high-speed/full-speed/
+ * low-speed get the priority on any future request.
+ * * If high-speed/full-speed/low-speed got the grant last time, SuperSpeed gets the priority on
+ * the next request.
+ *
+ * If there is a valid request on either SuperSpeed or high-speed/full-speed/low-speed, a grant
+ * is always awarded; there is no idle.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.9.5.
+ */
+union bdk_usbhx_uahc_gdmahlratio
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gdmahlratio_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_13_31 : 19;
+ uint32_t rx_ratio : 5; /**< [ 12: 8](R/W) Speed ratio for RX arbitration. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t tx_ratio : 5; /**< [ 4: 0](R/W) Speed ratio for TX arbitration. */
+#else /* Word 0 - Little Endian */
+ uint32_t tx_ratio : 5; /**< [ 4: 0](R/W) Speed ratio for TX arbitration. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t rx_ratio : 5; /**< [ 12: 8](R/W) Speed ratio for RX arbitration. */
+ uint32_t reserved_13_31 : 19;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gdmahlratio_s cn; */
+};
+typedef union bdk_usbhx_uahc_gdmahlratio bdk_usbhx_uahc_gdmahlratio_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GDMAHLRATIO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GDMAHLRATIO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c624ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GDMAHLRATIO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GDMAHLRATIO(a) bdk_usbhx_uahc_gdmahlratio_t
+#define bustype_BDK_USBHX_UAHC_GDMAHLRATIO(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GDMAHLRATIO(a) "USBHX_UAHC_GDMAHLRATIO"
+#define device_bar_BDK_USBHX_UAHC_GDMAHLRATIO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GDMAHLRATIO(a) (a)
+#define arguments_BDK_USBHX_UAHC_GDMAHLRATIO(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gfladj
+ *
+ * UAHC Global Frame Length Adjustment Register
+ * This register provides options for the software to control the core behavior with respect to
+ * SOF (start of frame) and ITP (isochronous timestamp packet) timers and frame timer
+ * functionality. It provides the option to override the sideband signal fladj_30mhz_reg. In
+ * addition, it enables running SOF or ITP frame timer counters completely off of the REF_CLK.
+ * This facilitates hardware LPM in host mode with the SOF or ITP counters being run off of the
+ * REF_CLK signal.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.9.6.
+ */
+union bdk_usbhx_uahc_gfladj
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gfladj_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t gfladj_refclk_240mhzdecr_pls1 : 1;/**< [ 31: 31](R/W) This field indicates that the decrement value that the controller applies for each REF_CLK
+ must be GFLADJ_REFCLK_240MHZ_DECR and GFLADJ_REFCLK_240MHZ_DECR +1 alternatively on each
+ REF_CLK. Set this bit to 1 only if [GFLADJ_REFCLK_LPM_SEL] is set to 1 and the fractional
+ component of 240/ref_frequency is greater than or equal to 0.5.
+
+ Example:
+
+ If the REF_CLK is 19.2 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 52.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = (240/19.2) = 12.5.
+ * [GFLADJ_REFCLK_240MHZDECR_PLS1] = 1.
+
+ If the REF_CLK is 24 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 41.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = (240/24) = 10.
+ * [GFLADJ_REFCLK_240MHZDECR_PLS1] = 0. */
+ uint32_t gfladj_refclk_240mhz_decr : 7;/**< [ 30: 24](R/W) This field indicates the decrement value that the controller applies for each REF_CLK in
+ order to derive a frame timer in terms of a 240-MHz clock. This field must be programmed
+ to a nonzero value only if [GFLADJ_REFCLK_LPM_SEL] is set to 1.
+
+ The value is derived as follows:
+ _ [GFLADJ_REFCLK_240MHZ_DECR] = 240/ref_clk_frequency
+
+ Examples:
+
+ If the REF_CLK is 24 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 41.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/24 = 10.
+
+ If the REF_CLK is 48 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 20.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/48 = 5.
+
+ If the REF_CLK is 17 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 58.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/17 = 14. */
+ uint32_t gfladj_refclk_lpm_sel : 1; /**< [ 23: 23](R/W) This bit enables the functionality of running SOF/ITP counters on the REF_CLK.
+ This bit must not be set to 1 if USBH()_UAHC_GCTL[SOFITPSYNC] = 1. Similarly, if
+ [GFLADJ_REFCLK_LPM_SEL] = 1, USBH()_UAHC_GCTL[SOFITPSYNC] must not be set to 1.
+ When [GFLADJ_REFCLK_LPM_SEL] = 1 the overloading of the suspend control of the USB 2.0
+ first
+ port PHY (UTMI) with USB 3.0 port states is removed. Note that the REF_CLK frequencies
+ supported in this mode are 16/17/19.2/20/24/39.7/40 MHz.
+
+ Internal:
+ The utmi_clk[0] signal of the core must be connected to the FREECLK of the PHY.
+ If you set this bit to 1, USBH()_UAHC_GUSB2PHYCFG()[U2_FREECLK_EXISTS] must be set to 0. */
+ uint32_t reserved_22 : 1;
+ uint32_t gfladj_refclk_fladj : 14; /**< [ 21: 8](R/W) This field indicates the frame length adjustment to be applied when SOF/ITP counter is
+ running off of the REF_CLK. This register value is used to adjust:.
+ * ITP interval when USBH()_UAHC_GCTL[SOFITPSYNC] = 1
+ * both SOF and ITP interval when [GFLADJ_REFCLK_LPM_SEL] = 1.
+
+ This field must be programmed to a nonzero value only if [GFLADJ_REFCLK_LPM_SEL] = 1 or
+ USBH()_UAHC_GCTL[SOFITPSYNC] = 1.
+
+ The value is derived as below:
+
+ _ FLADJ_REF_CLK_FLADJ = ((125000/ref_clk_period_integer) - (125000/ref_clk_period)) *
+ ref_clk_period
+
+ where,
+ * the ref_clk_period_integer is the integer value of the REF_CLK period got by truncating
+ the decimal (fractional) value that is programmed in USBH()_UAHC_GUCTL[REFCLKPER].
+ * the ref_clk_period is the REF_CLK period including the fractional value.
+
+ Examples:
+
+ If the REF_CLK is 24 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 41.
+ * GLADJ_REFCLK_FLADJ = ((125000/41) -
+ (125000/41.6666)) * 41.6666 = 2032 (ignoring the fractional value).
+
+ If the REF_CLK is 48 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 20.
+ * GLADJ_REFCLK_FLADJ = ((125000/20) -
+ (125000/20.8333)) * 20.8333 = 5208 (ignoring the fractional value). */
+ uint32_t gfladj_30mhz_reg_sel : 1; /**< [ 7: 7](R/W) This field selects whether to use the input signal fladj_30mhz_reg or the [GFLADJ_30MHZ]
+ to
+ adjust the frame length for the SOF/ITP. When this bit is set to, 1, the controller uses
+ [GFLADJ_30MHZ] value 0x0, the controller uses the input signal fladj_30mhz_reg value. */
+ uint32_t reserved_6 : 1;
+ uint32_t gfladj_30mhz : 6; /**< [ 5: 0](R/W) This field indicates the value that is used for frame length adjustment instead of
+ considering from the sideband input signal fladj_30mhz_reg. This enables post-silicon
+ frame length adjustment in case the input signal fladj_30mhz_reg is connected to a wrong
+ value or is not valid. The controller uses this value if [GFLADJ_30MHZ_REG_SEL] = 1 and
+ the
+ SOF/ITP counters are running off of UTMI(ULPI) clock ([GFLADJ_REFCLK_LPM_SEL] = 0 and
+ USBH()_UAHC_GCTL[SOFITPSYNC] is 1 or 0). For details on how to set this value, refer to
+ section 5.2.4 Frame Length Adjustment Register (FLADJ) of the xHCI Specification. */
+#else /* Word 0 - Little Endian */
+ uint32_t gfladj_30mhz : 6; /**< [ 5: 0](R/W) This field indicates the value that is used for frame length adjustment instead of
+ considering from the sideband input signal fladj_30mhz_reg. This enables post-silicon
+ frame length adjustment in case the input signal fladj_30mhz_reg is connected to a wrong
+ value or is not valid. The controller uses this value if [GFLADJ_30MHZ_REG_SEL] = 1 and
+ the
+ SOF/ITP counters are running off of UTMI(ULPI) clock ([GFLADJ_REFCLK_LPM_SEL] = 0 and
+ USBH()_UAHC_GCTL[SOFITPSYNC] is 1 or 0). For details on how to set this value, refer to
+ section 5.2.4 Frame Length Adjustment Register (FLADJ) of the xHCI Specification. */
+ uint32_t reserved_6 : 1;
+ uint32_t gfladj_30mhz_reg_sel : 1; /**< [ 7: 7](R/W) This field selects whether to use the input signal fladj_30mhz_reg or the [GFLADJ_30MHZ]
+ to
+ adjust the frame length for the SOF/ITP. When this bit is set to, 1, the controller uses
+ [GFLADJ_30MHZ] value 0x0, the controller uses the input signal fladj_30mhz_reg value. */
+ uint32_t gfladj_refclk_fladj : 14; /**< [ 21: 8](R/W) This field indicates the frame length adjustment to be applied when SOF/ITP counter is
+ running off of the REF_CLK. This register value is used to adjust:.
+ * ITP interval when USBH()_UAHC_GCTL[SOFITPSYNC] = 1
+ * both SOF and ITP interval when [GFLADJ_REFCLK_LPM_SEL] = 1.
+
+ This field must be programmed to a nonzero value only if [GFLADJ_REFCLK_LPM_SEL] = 1 or
+ USBH()_UAHC_GCTL[SOFITPSYNC] = 1.
+
+ The value is derived as below:
+
+ _ FLADJ_REF_CLK_FLADJ = ((125000/ref_clk_period_integer) - (125000/ref_clk_period)) *
+ ref_clk_period
+
+ where,
+ * the ref_clk_period_integer is the integer value of the REF_CLK period got by truncating
+ the decimal (fractional) value that is programmed in USBH()_UAHC_GUCTL[REFCLKPER].
+ * the ref_clk_period is the REF_CLK period including the fractional value.
+
+ Examples:
+
+ If the REF_CLK is 24 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 41.
+ * GLADJ_REFCLK_FLADJ = ((125000/41) -
+ (125000/41.6666)) * 41.6666 = 2032 (ignoring the fractional value).
+
+ If the REF_CLK is 48 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 20.
+ * GLADJ_REFCLK_FLADJ = ((125000/20) -
+ (125000/20.8333)) * 20.8333 = 5208 (ignoring the fractional value). */
+ uint32_t reserved_22 : 1;
+ uint32_t gfladj_refclk_lpm_sel : 1; /**< [ 23: 23](R/W) This bit enables the functionality of running SOF/ITP counters on the REF_CLK.
+ This bit must not be set to 1 if USBH()_UAHC_GCTL[SOFITPSYNC] = 1. Similarly, if
+ [GFLADJ_REFCLK_LPM_SEL] = 1, USBH()_UAHC_GCTL[SOFITPSYNC] must not be set to 1.
+ When [GFLADJ_REFCLK_LPM_SEL] = 1 the overloading of the suspend control of the USB 2.0
+ first
+ port PHY (UTMI) with USB 3.0 port states is removed. Note that the REF_CLK frequencies
+ supported in this mode are 16/17/19.2/20/24/39.7/40 MHz.
+
+ Internal:
+ The utmi_clk[0] signal of the core must be connected to the FREECLK of the PHY.
+ If you set this bit to 1, USBH()_UAHC_GUSB2PHYCFG()[U2_FREECLK_EXISTS] must be set to 0. */
+ uint32_t gfladj_refclk_240mhz_decr : 7;/**< [ 30: 24](R/W) This field indicates the decrement value that the controller applies for each REF_CLK in
+ order to derive a frame timer in terms of a 240-MHz clock. This field must be programmed
+ to a nonzero value only if [GFLADJ_REFCLK_LPM_SEL] is set to 1.
+
+ The value is derived as follows:
+ _ [GFLADJ_REFCLK_240MHZ_DECR] = 240/ref_clk_frequency
+
+ Examples:
+
+ If the REF_CLK is 24 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 41.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/24 = 10.
+
+ If the REF_CLK is 48 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 20.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/48 = 5.
+
+ If the REF_CLK is 17 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 58.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = 240/17 = 14. */
+ uint32_t gfladj_refclk_240mhzdecr_pls1 : 1;/**< [ 31: 31](R/W) This field indicates that the decrement value that the controller applies for each REF_CLK
+ must be GFLADJ_REFCLK_240MHZ_DECR and GFLADJ_REFCLK_240MHZ_DECR +1 alternatively on each
+ REF_CLK. Set this bit to 1 only if [GFLADJ_REFCLK_LPM_SEL] is set to 1 and the fractional
+ component of 240/ref_frequency is greater than or equal to 0.5.
+
+ Example:
+
+ If the REF_CLK is 19.2 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 52.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = (240/19.2) = 12.5.
+ * [GFLADJ_REFCLK_240MHZDECR_PLS1] = 1.
+
+ If the REF_CLK is 24 MHz then:
+ * USBH()_UAHC_GUCTL[REFCLKPER] = 41.
+ * [GFLADJ_REFCLK_240MHZ_DECR] = (240/24) = 10.
+ * [GFLADJ_REFCLK_240MHZDECR_PLS1] = 0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gfladj_s cn; */
+};
+typedef union bdk_usbhx_uahc_gfladj bdk_usbhx_uahc_gfladj_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GFLADJ(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GFLADJ(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c630ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GFLADJ", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GFLADJ(a) bdk_usbhx_uahc_gfladj_t
+#define bustype_BDK_USBHX_UAHC_GFLADJ(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GFLADJ(a) "USBHX_UAHC_GFLADJ"
+#define device_bar_BDK_USBHX_UAHC_GFLADJ(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GFLADJ(a) (a)
+#define arguments_BDK_USBHX_UAHC_GFLADJ(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_ggpio
+ *
+ * UAHC Core General-Purpose I/O Register
+ * The application can use this register for general purpose input and output ports or for
+ * debugging.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.9.
+ */
+union bdk_usbhx_uahc_ggpio
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_ggpio_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t gpo : 16; /**< [ 31: 16](R/W) General purpose output. These outputs are not connected to anything. Can be used as scratch. */
+ uint32_t gpi : 16; /**< [ 15: 0](RO) General purpose input. These inputs are tied 0x0. */
+#else /* Word 0 - Little Endian */
+ uint32_t gpi : 16; /**< [ 15: 0](RO) General purpose input. These inputs are tied 0x0. */
+ uint32_t gpo : 16; /**< [ 31: 16](R/W) General purpose output. These outputs are not connected to anything. Can be used as scratch. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_ggpio_s cn; */
+};
+typedef union bdk_usbhx_uahc_ggpio bdk_usbhx_uahc_ggpio_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GGPIO(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GGPIO(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c124ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GGPIO", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GGPIO(a) bdk_usbhx_uahc_ggpio_t
+#define bustype_BDK_USBHX_UAHC_GGPIO(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GGPIO(a) "USBHX_UAHC_GGPIO"
+#define device_bar_BDK_USBHX_UAHC_GGPIO(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GGPIO(a) (a)
+#define arguments_BDK_USBHX_UAHC_GGPIO(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_ghwparams0
+ *
+ * UAHC Hardware Parameters Register 0
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.50a, section 6.2.3.1.
+ */
+union bdk_usbhx_uahc_ghwparams0
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_ghwparams0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t awidth : 8; /**< [ 31: 24](RO) USB core bus-address width. */
+ uint32_t sdwidth : 8; /**< [ 23: 16](RO) USB core bus slave-data width. */
+ uint32_t mdwidth : 8; /**< [ 15: 8](RO) USB core bus master-data width. */
+ uint32_t sbus_type : 2; /**< [ 7: 6](RO) USB core bus slave type: AXI. */
+ uint32_t mbus_type : 3; /**< [ 5: 3](RO) USB core bus master type: AXI. */
+ uint32_t mode : 3; /**< [ 2: 0](RO) Operation mode: 0x1: host-only. */
+#else /* Word 0 - Little Endian */
+ uint32_t mode : 3; /**< [ 2: 0](RO) Operation mode: 0x1: host-only. */
+ uint32_t mbus_type : 3; /**< [ 5: 3](RO) USB core bus master type: AXI. */
+ uint32_t sbus_type : 2; /**< [ 7: 6](RO) USB core bus slave type: AXI. */
+ uint32_t mdwidth : 8; /**< [ 15: 8](RO) USB core bus master-data width. */
+ uint32_t sdwidth : 8; /**< [ 23: 16](RO) USB core bus slave-data width. */
+ uint32_t awidth : 8; /**< [ 31: 24](RO) USB core bus-address width. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_ghwparams0_s cn; */
+};
+typedef union bdk_usbhx_uahc_ghwparams0 bdk_usbhx_uahc_ghwparams0_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c140ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GHWPARAMS0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GHWPARAMS0(a) bdk_usbhx_uahc_ghwparams0_t
+#define bustype_BDK_USBHX_UAHC_GHWPARAMS0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GHWPARAMS0(a) "USBHX_UAHC_GHWPARAMS0"
+#define device_bar_BDK_USBHX_UAHC_GHWPARAMS0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GHWPARAMS0(a) (a)
+#define arguments_BDK_USBHX_UAHC_GHWPARAMS0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_ghwparams1
+ *
+ * UAHC Hardware Parameters Register 1
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.50a, section 6.2.3.2.
+ */
+union bdk_usbhx_uahc_ghwparams1
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_ghwparams1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t en_dbc : 1; /**< [ 31: 31](RO) Enable debug capability. */
+ uint32_t rm_opt_features : 1; /**< [ 30: 30](RO) Remove optional features. */
+ uint32_t sync_rst : 1; /**< [ 29: 29](RO) Synchronous reset coding. */
+ uint32_t ram_bus_clks_sync : 1; /**< [ 28: 28](RO) RAM_CLK and BUS_CLK are synchronous.
+ Internal:
+ (appears to be orthogonal from the
+ RAM_CLK_TO_BUS_CLK parameter) */
+ uint32_t mac_ram_clks_sync : 1; /**< [ 27: 27](RO) MAC3_CLK and RAM_CLK are synchronous. */
+ uint32_t mac_phy_clks_sync : 1; /**< [ 26: 26](RO) MAC3_CLK and PHY_CLK are synchronous. */
+ uint32_t en_pwropt : 2; /**< [ 25: 24](RO) Power optimization mode:
+ bit\<0\> = Clock-gating feature available.
+ bit\<1\> = Hibernation feature available. */
+ uint32_t spram_typ : 1; /**< [ 23: 23](RO) SRAM type: one-port RAMs. */
+ uint32_t num_rams : 2; /**< [ 22: 21](RO) Number of RAMs. */
+ uint32_t device_num_int : 6; /**< [ 20: 15](RO) Number of event buffers (and interrupts) in device-mode (unsupported). */
+ uint32_t aspacewidth : 3; /**< [ 14: 12](RO) Native interface address-space port width. */
+ uint32_t reqinfowidth : 3; /**< [ 11: 9](RO) Native interface request/response-info port width. */
+ uint32_t datainfowidth : 3; /**< [ 8: 6](RO) Native interface data-info port width. */
+ uint32_t burstwidth_m1 : 3; /**< [ 5: 3](RO) Width minus one of AXI length field. */
+ uint32_t idwidth_m1 : 3; /**< [ 2: 0](RO) Width minus one of AXI ID field. */
+#else /* Word 0 - Little Endian */
+ uint32_t idwidth_m1 : 3; /**< [ 2: 0](RO) Width minus one of AXI ID field. */
+ uint32_t burstwidth_m1 : 3; /**< [ 5: 3](RO) Width minus one of AXI length field. */
+ uint32_t datainfowidth : 3; /**< [ 8: 6](RO) Native interface data-info port width. */
+ uint32_t reqinfowidth : 3; /**< [ 11: 9](RO) Native interface request/response-info port width. */
+ uint32_t aspacewidth : 3; /**< [ 14: 12](RO) Native interface address-space port width. */
+ uint32_t device_num_int : 6; /**< [ 20: 15](RO) Number of event buffers (and interrupts) in device-mode (unsupported). */
+ uint32_t num_rams : 2; /**< [ 22: 21](RO) Number of RAMs. */
+ uint32_t spram_typ : 1; /**< [ 23: 23](RO) SRAM type: one-port RAMs. */
+ uint32_t en_pwropt : 2; /**< [ 25: 24](RO) Power optimization mode:
+ bit\<0\> = Clock-gating feature available.
+ bit\<1\> = Hibernation feature available. */
+ uint32_t mac_phy_clks_sync : 1; /**< [ 26: 26](RO) MAC3_CLK and PHY_CLK are synchronous. */
+ uint32_t mac_ram_clks_sync : 1; /**< [ 27: 27](RO) MAC3_CLK and RAM_CLK are synchronous. */
+ uint32_t ram_bus_clks_sync : 1; /**< [ 28: 28](RO) RAM_CLK and BUS_CLK are synchronous.
+ Internal:
+ (appears to be orthogonal from the
+ RAM_CLK_TO_BUS_CLK parameter) */
+ uint32_t sync_rst : 1; /**< [ 29: 29](RO) Synchronous reset coding. */
+ uint32_t rm_opt_features : 1; /**< [ 30: 30](RO) Remove optional features. */
+ uint32_t en_dbc : 1; /**< [ 31: 31](RO) Enable debug capability. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_ghwparams1_s cn; */
+};
+typedef union bdk_usbhx_uahc_ghwparams1 bdk_usbhx_uahc_ghwparams1_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c144ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GHWPARAMS1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GHWPARAMS1(a) bdk_usbhx_uahc_ghwparams1_t
+#define bustype_BDK_USBHX_UAHC_GHWPARAMS1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GHWPARAMS1(a) "USBHX_UAHC_GHWPARAMS1"
+#define device_bar_BDK_USBHX_UAHC_GHWPARAMS1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GHWPARAMS1(a) (a)
+#define arguments_BDK_USBHX_UAHC_GHWPARAMS1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_ghwparams2
+ *
+ * UAHC Core GHW Parameters Register 2
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.50a, section 6.2.3.3.
+ */
+union bdk_usbhx_uahc_ghwparams2
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_ghwparams2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t userid : 32; /**< [ 31: 0](RO) User ID. */
+#else /* Word 0 - Little Endian */
+ uint32_t userid : 32; /**< [ 31: 0](RO) User ID. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_ghwparams2_s cn; */
+};
+typedef union bdk_usbhx_uahc_ghwparams2 bdk_usbhx_uahc_ghwparams2_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c148ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GHWPARAMS2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GHWPARAMS2(a) bdk_usbhx_uahc_ghwparams2_t
+#define bustype_BDK_USBHX_UAHC_GHWPARAMS2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GHWPARAMS2(a) "USBHX_UAHC_GHWPARAMS2"
+#define device_bar_BDK_USBHX_UAHC_GHWPARAMS2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GHWPARAMS2(a) (a)
+#define arguments_BDK_USBHX_UAHC_GHWPARAMS2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_ghwparams3
+ *
+ * UAHC GHW Parameters Register 3
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.3.4.
+ */
+union bdk_usbhx_uahc_ghwparams3
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_ghwparams3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_31 : 1;
+ uint32_t cache_total_xfer_resources : 8;/**< [ 30: 23](RO) Maximum number of transfer resources in the core. */
+ uint32_t num_in_eps : 5; /**< [ 22: 18](RO) Maximum number of device-mode (unsupported) IN endpoints active. */
+ uint32_t num_eps : 6; /**< [ 17: 12](RO) Number of device-mode (unsupported) single-directional endpoints. */
+ uint32_t ulpi_carkit : 1; /**< [ 11: 11](RO) ULPI carkit is not supported. */
+ uint32_t vendor_ctl_interface : 1; /**< [ 10: 10](RO) UTMI+ PHY vendor control interface enabled. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t hsphy_dwidth : 2; /**< [ 7: 6](RO) Data width of the UTMI+ PHY interface: 0x2 = 8-or-16 bits. */
+ uint32_t fsphy_interface : 2; /**< [ 5: 4](RO) USB 1.1 full-speed serial transceiver interface. */
+ uint32_t hsphy_interface : 2; /**< [ 3: 2](RO) High-speed PHY interface: 0x1 = UTMI+. */
+ uint32_t ssphy_interface : 2; /**< [ 1: 0](RO) SuperSpeed PHY interface: 0x1 = PIPE3. */
+#else /* Word 0 - Little Endian */
+ uint32_t ssphy_interface : 2; /**< [ 1: 0](RO) SuperSpeed PHY interface: 0x1 = PIPE3. */
+ uint32_t hsphy_interface : 2; /**< [ 3: 2](RO) High-speed PHY interface: 0x1 = UTMI+. */
+ uint32_t fsphy_interface : 2; /**< [ 5: 4](RO) USB 1.1 full-speed serial transceiver interface. */
+ uint32_t hsphy_dwidth : 2; /**< [ 7: 6](RO) Data width of the UTMI+ PHY interface: 0x2 = 8-or-16 bits. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t vendor_ctl_interface : 1; /**< [ 10: 10](RO) UTMI+ PHY vendor control interface enabled. */
+ uint32_t ulpi_carkit : 1; /**< [ 11: 11](RO) ULPI carkit is not supported. */
+ uint32_t num_eps : 6; /**< [ 17: 12](RO) Number of device-mode (unsupported) single-directional endpoints. */
+ uint32_t num_in_eps : 5; /**< [ 22: 18](RO) Maximum number of device-mode (unsupported) IN endpoints active. */
+ uint32_t cache_total_xfer_resources : 8;/**< [ 30: 23](RO) Maximum number of transfer resources in the core. */
+ uint32_t reserved_31 : 1;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_ghwparams3_s cn; */
+};
+typedef union bdk_usbhx_uahc_ghwparams3 bdk_usbhx_uahc_ghwparams3_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c14cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GHWPARAMS3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GHWPARAMS3(a) bdk_usbhx_uahc_ghwparams3_t
+#define bustype_BDK_USBHX_UAHC_GHWPARAMS3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GHWPARAMS3(a) "USBHX_UAHC_GHWPARAMS3"
+#define device_bar_BDK_USBHX_UAHC_GHWPARAMS3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GHWPARAMS3(a) (a)
+#define arguments_BDK_USBHX_UAHC_GHWPARAMS3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_ghwparams4
+ *
+ * UAHC GHW Parameters Register 4
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.50a, section 6.2.3.5.
+ */
+union bdk_usbhx_uahc_ghwparams4
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_ghwparams4_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t bmu_lsp_depth : 4; /**< [ 31: 28](RO) Depth of the BMU-LSP status buffer. */
+ uint32_t bmu_ptl_depth_m1 : 4; /**< [ 27: 24](RO) Depth of the BMU-PTL source/sink buffers minus 1. */
+ uint32_t en_isoc_supt : 1; /**< [ 23: 23](RO) Isochronous support enabled. */
+ uint32_t reserved_22 : 1;
+ uint32_t ext_buff_control : 1; /**< [ 21: 21](RO) Enables device external buffer control sideband controls. */
+ uint32_t num_ss_usb_instances : 4; /**< [ 20: 17](RO) Number of SuperSpeed bus instances. */
+ uint32_t hiber_scratchbufs : 4; /**< [ 16: 13](RO) Number of hibernation scratchpad buffers. */
+ uint32_t reserved_6_12 : 7;
+ uint32_t cache_trbs_per_transfer : 6;/**< [ 5: 0](RO) Number of TRBs per transfer that can be cached. */
+#else /* Word 0 - Little Endian */
+ uint32_t cache_trbs_per_transfer : 6;/**< [ 5: 0](RO) Number of TRBs per transfer that can be cached. */
+ uint32_t reserved_6_12 : 7;
+ uint32_t hiber_scratchbufs : 4; /**< [ 16: 13](RO) Number of hibernation scratchpad buffers. */
+ uint32_t num_ss_usb_instances : 4; /**< [ 20: 17](RO) Number of SuperSpeed bus instances. */
+ uint32_t ext_buff_control : 1; /**< [ 21: 21](RO) Enables device external buffer control sideband controls. */
+ uint32_t reserved_22 : 1;
+ uint32_t en_isoc_supt : 1; /**< [ 23: 23](RO) Isochronous support enabled. */
+ uint32_t bmu_ptl_depth_m1 : 4; /**< [ 27: 24](RO) Depth of the BMU-PTL source/sink buffers minus 1. */
+ uint32_t bmu_lsp_depth : 4; /**< [ 31: 28](RO) Depth of the BMU-LSP status buffer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_ghwparams4_s cn; */
+};
+typedef union bdk_usbhx_uahc_ghwparams4 bdk_usbhx_uahc_ghwparams4_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS4(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS4(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c150ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GHWPARAMS4", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GHWPARAMS4(a) bdk_usbhx_uahc_ghwparams4_t
+#define bustype_BDK_USBHX_UAHC_GHWPARAMS4(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GHWPARAMS4(a) "USBHX_UAHC_GHWPARAMS4"
+#define device_bar_BDK_USBHX_UAHC_GHWPARAMS4(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GHWPARAMS4(a) (a)
+#define arguments_BDK_USBHX_UAHC_GHWPARAMS4(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_ghwparams5
+ *
+ * UAHC GHW Parameters Register 5
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.50a, section 6.2.3.6.
+ */
+union bdk_usbhx_uahc_ghwparams5
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_ghwparams5_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_28_31 : 4;
+ uint32_t dfq_fifo_depth : 6; /**< [ 27: 22](RO) Size of the BMU descriptor fetch-request queue. */
+ uint32_t dwq_fifo_depth : 6; /**< [ 21: 16](RO) Size of the BMU descriptor write queue. */
+ uint32_t txq_fifo_depth : 6; /**< [ 15: 10](RO) Size of the BMU TX request queue. */
+ uint32_t rxq_fifo_depth : 6; /**< [ 9: 4](RO) Size of the BMU RX request queue. */
+ uint32_t bmu_busgm_depth : 4; /**< [ 3: 0](RO) Depth of the BMU-BUSGM source/sink buffers. */
+#else /* Word 0 - Little Endian */
+ uint32_t bmu_busgm_depth : 4; /**< [ 3: 0](RO) Depth of the BMU-BUSGM source/sink buffers. */
+ uint32_t rxq_fifo_depth : 6; /**< [ 9: 4](RO) Size of the BMU RX request queue. */
+ uint32_t txq_fifo_depth : 6; /**< [ 15: 10](RO) Size of the BMU TX request queue. */
+ uint32_t dwq_fifo_depth : 6; /**< [ 21: 16](RO) Size of the BMU descriptor write queue. */
+ uint32_t dfq_fifo_depth : 6; /**< [ 27: 22](RO) Size of the BMU descriptor fetch-request queue. */
+ uint32_t reserved_28_31 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_ghwparams5_s cn; */
+};
+typedef union bdk_usbhx_uahc_ghwparams5 bdk_usbhx_uahc_ghwparams5_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS5(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS5(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c154ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GHWPARAMS5", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GHWPARAMS5(a) bdk_usbhx_uahc_ghwparams5_t
+#define bustype_BDK_USBHX_UAHC_GHWPARAMS5(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GHWPARAMS5(a) "USBHX_UAHC_GHWPARAMS5"
+#define device_bar_BDK_USBHX_UAHC_GHWPARAMS5(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GHWPARAMS5(a) (a)
+#define arguments_BDK_USBHX_UAHC_GHWPARAMS5(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_ghwparams6
+ *
+ * UAHC GHW Parameters Register 6
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.50a, section 6.2.3.7.
+ */
+union bdk_usbhx_uahc_ghwparams6
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_ghwparams6_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ram0_depth : 16; /**< [ 31: 16](RO) RAM0 depth. */
+ uint32_t en_bus_filters : 1; /**< [ 15: 15](RO) Enable VBus filters support. */
+ uint32_t en_bc : 1; /**< [ 14: 14](RO) Enable battery charging support. */
+ uint32_t en_otg_ss : 1; /**< [ 13: 13](RO) Enable OTG SuperSpeed support. */
+ uint32_t en_adp : 1; /**< [ 12: 12](RO) Enable ADP support. */
+ uint32_t hnp_support : 1; /**< [ 11: 11](RO) HNP support. */
+ uint32_t srp_support : 1; /**< [ 10: 10](RO) SRP support. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t en_fpga : 1; /**< [ 7: 7](RO) Enable FPGA implementation. */
+ uint32_t en_dbg_ports : 1; /**< [ 6: 6](RO) Enable debug ports for FGPA. */
+ uint32_t psq_fifo_depth : 6; /**< [ 5: 0](RO) Size of the BMU protocol status queue. */
+#else /* Word 0 - Little Endian */
+ uint32_t psq_fifo_depth : 6; /**< [ 5: 0](RO) Size of the BMU protocol status queue. */
+ uint32_t en_dbg_ports : 1; /**< [ 6: 6](RO) Enable debug ports for FGPA. */
+ uint32_t en_fpga : 1; /**< [ 7: 7](RO) Enable FPGA implementation. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t srp_support : 1; /**< [ 10: 10](RO) SRP support. */
+ uint32_t hnp_support : 1; /**< [ 11: 11](RO) HNP support. */
+ uint32_t en_adp : 1; /**< [ 12: 12](RO) Enable ADP support. */
+ uint32_t en_otg_ss : 1; /**< [ 13: 13](RO) Enable OTG SuperSpeed support. */
+ uint32_t en_bc : 1; /**< [ 14: 14](RO) Enable battery charging support. */
+ uint32_t en_bus_filters : 1; /**< [ 15: 15](RO) Enable VBus filters support. */
+ uint32_t ram0_depth : 16; /**< [ 31: 16](RO) RAM0 depth. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_ghwparams6_s cn; */
+};
+typedef union bdk_usbhx_uahc_ghwparams6 bdk_usbhx_uahc_ghwparams6_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS6(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS6(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c158ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GHWPARAMS6", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GHWPARAMS6(a) bdk_usbhx_uahc_ghwparams6_t
+#define bustype_BDK_USBHX_UAHC_GHWPARAMS6(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GHWPARAMS6(a) "USBHX_UAHC_GHWPARAMS6"
+#define device_bar_BDK_USBHX_UAHC_GHWPARAMS6(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GHWPARAMS6(a) (a)
+#define arguments_BDK_USBHX_UAHC_GHWPARAMS6(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_ghwparams7
+ *
+ * UAHC GHW Parameters Register 7
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.50a, section 6.2.3.8.
+ */
+union bdk_usbhx_uahc_ghwparams7
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_ghwparams7_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t ram2_depth : 16; /**< [ 31: 16](RO) RAM2 depth. */
+ uint32_t ram1_depth : 16; /**< [ 15: 0](RO) RAM1 depth. */
+#else /* Word 0 - Little Endian */
+ uint32_t ram1_depth : 16; /**< [ 15: 0](RO) RAM1 depth. */
+ uint32_t ram2_depth : 16; /**< [ 31: 16](RO) RAM2 depth. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_ghwparams7_s cn; */
+};
+typedef union bdk_usbhx_uahc_ghwparams7 bdk_usbhx_uahc_ghwparams7_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS7(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS7(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c15cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GHWPARAMS7", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GHWPARAMS7(a) bdk_usbhx_uahc_ghwparams7_t
+#define bustype_BDK_USBHX_UAHC_GHWPARAMS7(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GHWPARAMS7(a) "USBHX_UAHC_GHWPARAMS7"
+#define device_bar_BDK_USBHX_UAHC_GHWPARAMS7(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GHWPARAMS7(a) (a)
+#define arguments_BDK_USBHX_UAHC_GHWPARAMS7(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_ghwparams8
+ *
+ * UAHC GHW Parameters Register 8
+ * This register contains the hardware configuration options selected at compile-time.
+ * Internal:
+ * Register field names refer to Synopsys DWC_USB3_* parameters of the same suffix.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.20a, section 6.2.3.9.
+ */
+union bdk_usbhx_uahc_ghwparams8
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_ghwparams8_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t dcache_depth_info : 32; /**< [ 31: 0](RO) Dcache depth. */
+#else /* Word 0 - Little Endian */
+ uint32_t dcache_depth_info : 32; /**< [ 31: 0](RO) Dcache depth. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_ghwparams8_s cn; */
+};
+typedef union bdk_usbhx_uahc_ghwparams8 bdk_usbhx_uahc_ghwparams8_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS8(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GHWPARAMS8(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c600ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GHWPARAMS8", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GHWPARAMS8(a) bdk_usbhx_uahc_ghwparams8_t
+#define bustype_BDK_USBHX_UAHC_GHWPARAMS8(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GHWPARAMS8(a) "USBHX_UAHC_GHWPARAMS8"
+#define device_bar_BDK_USBHX_UAHC_GHWPARAMS8(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GHWPARAMS8(a) (a)
+#define arguments_BDK_USBHX_UAHC_GHWPARAMS8(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gpmsts
+ *
+ * UAHC Global Power Management Status Register
+ * This debug register gives information on which event caused the hibernation exit. These
+ * registers are for debug purposes. They provide debug information on the internal status and
+ * state machines. Global debug registers have design-specific information, and are used by for
+ * debugging purposes. These registers are not intended to be used by the customer. If any debug
+ * assistance is needed for the silicon, contact customer support with a dump of these registers.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.4.1.
+ * INTERNAL: Contact Synopsys directly.
+ */
+union bdk_usbhx_uahc_gpmsts
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gpmsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t portsel : 4; /**< [ 31: 28](WO) This field selects the port number. Always 0x0. */
+ uint32_t reserved_17_27 : 11;
+ uint32_t u3wakeup : 5; /**< [ 16: 12](RO/H) This field gives the USB 3.0 port wakeup conditions.
+ bit\<12\> = Overcurrent detected.
+ bit\<13\> = Resume detected.
+ bit\<14\> = Connect detected.
+ bit\<15\> = Disconnect detected.
+ bit\<16\> = Last connection state. */
+ uint32_t reserved_10_11 : 2;
+ uint32_t u2wakeup : 10; /**< [ 9: 0](RO/H) This field indicates the USB 2.0 port wakeup conditions.
+ bit\<0\> = Overcurrent detected.
+ bit\<1\> = Resume detected.
+ bit\<2\> = Connect detected.
+ bit\<3\> = Disconnect detected.
+ bit\<4\> = Last connection state.
+ bit\<5\> = ID change detected.
+ bit\<6\> = SRP request detected.
+ bit\<7\> = ULPI interrupt detected.
+ bit\<8\> = USB reset detected.
+ bit\<9\> = Resume detected changed. */
+#else /* Word 0 - Little Endian */
+ uint32_t u2wakeup : 10; /**< [ 9: 0](RO/H) This field indicates the USB 2.0 port wakeup conditions.
+ bit\<0\> = Overcurrent detected.
+ bit\<1\> = Resume detected.
+ bit\<2\> = Connect detected.
+ bit\<3\> = Disconnect detected.
+ bit\<4\> = Last connection state.
+ bit\<5\> = ID change detected.
+ bit\<6\> = SRP request detected.
+ bit\<7\> = ULPI interrupt detected.
+ bit\<8\> = USB reset detected.
+ bit\<9\> = Resume detected changed. */
+ uint32_t reserved_10_11 : 2;
+ uint32_t u3wakeup : 5; /**< [ 16: 12](RO/H) This field gives the USB 3.0 port wakeup conditions.
+ bit\<12\> = Overcurrent detected.
+ bit\<13\> = Resume detected.
+ bit\<14\> = Connect detected.
+ bit\<15\> = Disconnect detected.
+ bit\<16\> = Last connection state. */
+ uint32_t reserved_17_27 : 11;
+ uint32_t portsel : 4; /**< [ 31: 28](WO) This field selects the port number. Always 0x0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gpmsts_s cn; */
+};
+typedef union bdk_usbhx_uahc_gpmsts bdk_usbhx_uahc_gpmsts_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GPMSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GPMSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c114ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GPMSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GPMSTS(a) bdk_usbhx_uahc_gpmsts_t
+#define bustype_BDK_USBHX_UAHC_GPMSTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GPMSTS(a) "USBHX_UAHC_GPMSTS"
+#define device_bar_BDK_USBHX_UAHC_GPMSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GPMSTS(a) (a)
+#define arguments_BDK_USBHX_UAHC_GPMSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uahc_gprtbimap
+ *
+ * UAHC SuperSpeed Port-to-Bus Instance Mapping Register
+ * This register specifies the SuperSpeed USB instance number to which each USB 3.0 port is
+ * connected. By default, USB 3.0 ports are evenly distributed among all SuperSpeed USB
+ * instances. Software can program this register to specify how USB 3.0 ports are connected to
+ * SuperSpeed USB instances. The UAHC only implements one SuperSpeed bus-instance, so this
+ * register should always be 0.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.2.1.
+ */
+union bdk_usbhx_uahc_gprtbimap
+{
+ uint64_t u;
+ struct bdk_usbhx_uahc_gprtbimap_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) SuperSpeed USB instance number for port 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) SuperSpeed USB instance number for port 1. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gprtbimap_s cn; */
+};
+typedef union bdk_usbhx_uahc_gprtbimap bdk_usbhx_uahc_gprtbimap_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GPRTBIMAP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GPRTBIMAP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c138ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GPRTBIMAP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GPRTBIMAP(a) bdk_usbhx_uahc_gprtbimap_t
+#define bustype_BDK_USBHX_UAHC_GPRTBIMAP(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UAHC_GPRTBIMAP(a) "USBHX_UAHC_GPRTBIMAP"
+#define device_bar_BDK_USBHX_UAHC_GPRTBIMAP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GPRTBIMAP(a) (a)
+#define arguments_BDK_USBHX_UAHC_GPRTBIMAP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uahc_gprtbimap_fs
+ *
+ * UAHC Full/LowSpeed Port-to-Bus Instance Mapping Register
+ * This register specifies the full-speed/low-speed USB instance number to which each USB 1.1
+ * port is connected. By default, USB 1.1 ports are evenly distributed among all full-speed/
+ * low-speed USB instances. Software can program this register to specify how USB 1.1 ports are
+ * connected to full-speed/low-speed USB instances. The UAHC only implements one full-speed/
+ * low-speed bus-instance, so this register should always be 0x0.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.2.3.
+ */
+union bdk_usbhx_uahc_gprtbimap_fs
+{
+ uint64_t u;
+ struct bdk_usbhx_uahc_gprtbimap_fs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) Full-speed USB instance number for port 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) Full-speed USB instance number for port 1. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gprtbimap_fs_s cn; */
+};
+typedef union bdk_usbhx_uahc_gprtbimap_fs bdk_usbhx_uahc_gprtbimap_fs_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GPRTBIMAP_FS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GPRTBIMAP_FS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c188ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GPRTBIMAP_FS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GPRTBIMAP_FS(a) bdk_usbhx_uahc_gprtbimap_fs_t
+#define bustype_BDK_USBHX_UAHC_GPRTBIMAP_FS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UAHC_GPRTBIMAP_FS(a) "USBHX_UAHC_GPRTBIMAP_FS"
+#define device_bar_BDK_USBHX_UAHC_GPRTBIMAP_FS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GPRTBIMAP_FS(a) (a)
+#define arguments_BDK_USBHX_UAHC_GPRTBIMAP_FS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uahc_gprtbimap_hs
+ *
+ * UAHC High-Speed Port-to-Bus Instance Mapping Register
+ * This register specifies the high-speed USB instance number to which each USB 2.0 port is
+ * connected. By default, USB 2.0 ports are evenly distributed among all high-speed USB
+ * instances. Software can program this register to specify how USB 2.0 ports are connected to
+ * high-speed USB instances. The UAHC only implements one high-speed bus-instance, so this
+ * register should always be 0.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.2.2.
+ */
+union bdk_usbhx_uahc_gprtbimap_hs
+{
+ uint64_t u;
+ struct bdk_usbhx_uahc_gprtbimap_hs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_4_63 : 60;
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) High-speed USB instance number for port 1. */
+#else /* Word 0 - Little Endian */
+ uint64_t binum1 : 4; /**< [ 3: 0](R/W) High-speed USB instance number for port 1. */
+ uint64_t reserved_4_63 : 60;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gprtbimap_hs_s cn; */
+};
+typedef union bdk_usbhx_uahc_gprtbimap_hs bdk_usbhx_uahc_gprtbimap_hs_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GPRTBIMAP_HS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GPRTBIMAP_HS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c180ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GPRTBIMAP_HS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GPRTBIMAP_HS(a) bdk_usbhx_uahc_gprtbimap_hs_t
+#define bustype_BDK_USBHX_UAHC_GPRTBIMAP_HS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UAHC_GPRTBIMAP_HS(a) "USBHX_UAHC_GPRTBIMAP_HS"
+#define device_bar_BDK_USBHX_UAHC_GPRTBIMAP_HS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GPRTBIMAP_HS(a) (a)
+#define arguments_BDK_USBHX_UAHC_GPRTBIMAP_HS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_grlsid
+ *
+ * UAHC Release ID Register
+ * This is a read-only register that contains the release number of the core.
+ * Internal:
+ * Original name: GSNPSID = Synopsys ID.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.8.
+ */
+union bdk_usbhx_uahc_grlsid
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_grlsid_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t releaseid : 32; /**< [ 31: 0](RO) Software can use this register to configure release-specific features in the driver.
+ Internal:
+ Synopsys ID
+ * SynopsysID[31:16] indicates Core Identification Number. 0x5533 is ASCII for
+ U3 (DWC_usb3).
+ * SynopsysID[15:0] indicates the release number. Current Release is 2.50a. */
+#else /* Word 0 - Little Endian */
+ uint32_t releaseid : 32; /**< [ 31: 0](RO) Software can use this register to configure release-specific features in the driver.
+ Internal:
+ Synopsys ID
+ * SynopsysID[31:16] indicates Core Identification Number. 0x5533 is ASCII for
+ U3 (DWC_usb3).
+ * SynopsysID[15:0] indicates the release number. Current Release is 2.50a. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_grlsid_s cn; */
+};
+typedef union bdk_usbhx_uahc_grlsid bdk_usbhx_uahc_grlsid_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GRLSID(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GRLSID(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c120ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GRLSID", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GRLSID(a) bdk_usbhx_uahc_grlsid_t
+#define bustype_BDK_USBHX_UAHC_GRLSID(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GRLSID(a) "USBHX_UAHC_GRLSID"
+#define device_bar_BDK_USBHX_UAHC_GRLSID(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GRLSID(a) (a)
+#define arguments_BDK_USBHX_UAHC_GRLSID(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_grxfifoprihst
+ *
+ * UAHC RX FIFOs DMA Priority Register
+ * This register specifies the relative DMA priority level among the host RXFIFOs (one per USB
+ * bus instance) within the associated speed group (SuperSpeed or high-speed/full-speed/
+ * low-speed). When multiple RXFIFOs compete for DMA service at a given time, the RXDMA arbiter
+ * grants access on a packet-basis in the following manner:
+ *
+ * Among the FIFOs in the same speed group (SuperSpeed or high-speed/full-speed/low-speed):
+ * * High-priority RXFIFOs are granted access using round-robin arbitration.
+ * * Low-priority RXFIFOs are granted access using round-robin arbitration only after high-
+ * priority
+ * RXFIFOs have no further processing to do (i.e., either the RXQs are empty or the corresponding
+ * RXFIFOs do not have the required data).
+ *
+ * The RX DMA arbiter prioritizes the SuperSpeed group or high-speed/full-speed/low-speed group
+ * according to the ratio programmed in
+ * USBH()_UAHC_GDMAHLRATIO.
+ *
+ * For scatter-gather packets, the arbiter grants successive DMA requests to the same FIFO until
+ * the entire packet is completed. The register size corresponds to the number of configured USB
+ * bus instances; for example, in the default configuration, there are 3 USB bus instances (1
+ * SuperSpeed, 1 high-speed, and 1 full-speed/low-speed).
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.9.3.
+ */
+union bdk_usbhx_uahc_grxfifoprihst
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_grxfifoprihst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t rx_priority : 3; /**< [ 2: 0](R/W) Each register bit[n] controls the priority (1 = high, 0 = low) of RXFIFO[n] within a speed group. */
+#else /* Word 0 - Little Endian */
+ uint32_t rx_priority : 3; /**< [ 2: 0](R/W) Each register bit[n] controls the priority (1 = high, 0 = low) of RXFIFO[n] within a speed group. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_grxfifoprihst_s cn; */
+};
+typedef union bdk_usbhx_uahc_grxfifoprihst bdk_usbhx_uahc_grxfifoprihst_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GRXFIFOPRIHST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GRXFIFOPRIHST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c61cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GRXFIFOPRIHST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GRXFIFOPRIHST(a) bdk_usbhx_uahc_grxfifoprihst_t
+#define bustype_BDK_USBHX_UAHC_GRXFIFOPRIHST(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GRXFIFOPRIHST(a) "USBHX_UAHC_GRXFIFOPRIHST"
+#define device_bar_BDK_USBHX_UAHC_GRXFIFOPRIHST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GRXFIFOPRIHST(a) (a)
+#define arguments_BDK_USBHX_UAHC_GRXFIFOPRIHST(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_grxfifosiz#
+ *
+ * UAHC RX FIFO Size Register
+ * The application can program the internal RAM start address/depth of the each RxFIFO as shown
+ * below. It is recommended that software use the default value. In Host mode, per-port registers
+ * are implemented. One register per FIFO.
+ *
+ * Reset values = 0:{0x0000_0084} 1:{0x0084_0104} 2:{0x0188_0180}.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.6.2.
+ * INTERNAL: For more information, see the BMU section in Block Descriptions on Synopsys Databook
+ * page 238.
+ */
+union bdk_usbhx_uahc_grxfifosizx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_grxfifosizx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rxfstaddr : 16; /**< [ 31: 16](R/W) RxFIFOn RAM start address. This field contains the memory start address for RxFIFOn. The
+ reset value is derived from configuration parameters. */
+ uint32_t rxfdep : 16; /**< [ 15: 0](R/W) RxFIFOn depth. This value is in terms of RX RAM Data width.
+ minimum value = 0x20, maximum value = 0x4000.
+
+ Internal:
+ For more information, see the Hardware Integration chapter of the Synopsys
+ Databook.
+ The reset value is derived from configuration parameters. */
+#else /* Word 0 - Little Endian */
+ uint32_t rxfdep : 16; /**< [ 15: 0](R/W) RxFIFOn depth. This value is in terms of RX RAM Data width.
+ minimum value = 0x20, maximum value = 0x4000.
+
+ Internal:
+ For more information, see the Hardware Integration chapter of the Synopsys
+ Databook.
+ The reset value is derived from configuration parameters. */
+ uint32_t rxfstaddr : 16; /**< [ 31: 16](R/W) RxFIFOn RAM start address. This field contains the memory start address for RxFIFOn. The
+ reset value is derived from configuration parameters. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_grxfifosizx_s cn; */
+};
+typedef union bdk_usbhx_uahc_grxfifosizx bdk_usbhx_uahc_grxfifosizx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GRXFIFOSIZX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GRXFIFOSIZX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=2)))
+ return 0x86800000c380ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x3);
+ __bdk_csr_fatal("USBHX_UAHC_GRXFIFOSIZX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GRXFIFOSIZX(a,b) bdk_usbhx_uahc_grxfifosizx_t
+#define bustype_BDK_USBHX_UAHC_GRXFIFOSIZX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GRXFIFOSIZX(a,b) "USBHX_UAHC_GRXFIFOSIZX"
+#define device_bar_BDK_USBHX_UAHC_GRXFIFOSIZX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GRXFIFOSIZX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_GRXFIFOSIZX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_grxthrcfg
+ *
+ * UAHC RX Threshold Control Register
+ * In a normal case, an RX burst starts as soon as 1-packet space is available. This works well
+ * as long as the system bus is faster than the USB3.0 bus (a 1024-bytes packet takes ~2.2 us on
+ * the USB bus in SuperSpeed mode). If the system bus latency is larger than 2.2 us to access a
+ * 1024-byte packet, then starting a burst on 1-packet condition leads to an early abort of the
+ * burst causing unnecessary performance reduction. This register allows the configuration of
+ * threshold and burst size control. This feature is enabled by USBRXPKTCNTSEL.
+ *
+ * Receive Path:
+ * * The RX threshold is controlled by USBRXPKTCNT and the RX burst size is controlled by
+ * USBMAXRXBURSTSIZE.
+ * * Selecting optimal RX FIFO size, RX threshold, and RX burst size avoids RX burst aborts due
+ * to overrun if the system bus is slower than USB. Once in a while overrun is OK, and there is
+ * no functional issue.
+ * * Some devices do not support terminating ACK retry. With these devices, host cannot set ACK=0
+ * and Retry=0 and do retry later and you have to retry immediately. For such devices, minimize
+ * retry due to underrun. Setting threshold and burst size guarantees this.
+ * A larger RX threshold affects the performance since the scheduler is idle during this time.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.4.
+ */
+union bdk_usbhx_uahc_grxthrcfg
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_grxthrcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t usbrxpktcntsel : 1; /**< [ 29: 29](R/W) USB receive-packet-count enable. Enables/disables the USB reception multipacket
+ thresholding:
+ 0 = the core can only start reception on the USB when the RX FIFO has space for at least
+ one packet.
+ 1 = the core can only start reception on the USB when the RX FIFO has space for at least
+ USBRXPKTCNT amount of packets.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbrxpktcnt : 4; /**< [ 27: 24](R/W) USB receive-packet count. In host-mode, specifies space (in number of packets) that must
+ be available in the RX FIFO before the core can start the corresponding USB RX transaction
+ (burst).
+
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXRXBURSTSIZE]. */
+ uint32_t usbmaxrxburstsize : 5; /**< [ 23: 19](R/W) USB maximum receive-burst size. Specifies the maximum bulk IN burst the core
+ should do. When the system bus is slower than the USB, RX FIFO can overrun during a long
+ burst.
+
+ Program a smaller value to this field to limit the RX burst size that the core can do. It
+ only applies to SuperSpeed Bulk, Isochronous, and Interrupt IN endpoints.
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0x10. */
+ uint32_t reserved_0_18 : 19;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_18 : 19;
+ uint32_t usbmaxrxburstsize : 5; /**< [ 23: 19](R/W) USB maximum receive-burst size. Specifies the maximum bulk IN burst the core
+ should do. When the system bus is slower than the USB, RX FIFO can overrun during a long
+ burst.
+
+ Program a smaller value to this field to limit the RX burst size that the core can do. It
+ only applies to SuperSpeed Bulk, Isochronous, and Interrupt IN endpoints.
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0x10. */
+ uint32_t usbrxpktcnt : 4; /**< [ 27: 24](R/W) USB receive-packet count. In host-mode, specifies space (in number of packets) that must
+ be available in the RX FIFO before the core can start the corresponding USB RX transaction
+ (burst).
+
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXRXBURSTSIZE]. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbrxpktcntsel : 1; /**< [ 29: 29](R/W) USB receive-packet-count enable. Enables/disables the USB reception multipacket
+ thresholding:
+ 0 = the core can only start reception on the USB when the RX FIFO has space for at least
+ one packet.
+ 1 = the core can only start reception on the USB when the RX FIFO has space for at least
+ USBRXPKTCNT amount of packets.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbhx_uahc_grxthrcfg_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t usbrxpktcntsel : 1; /**< [ 29: 29](R/W) USB receive-packet-count enable. Enables/disables the USB reception multipacket
+ thresholding:
+ 0 = the core can only start reception on the USB when the RX FIFO has space for at least
+ one packet.
+ 1 = the core can only start reception on the USB when the RX FIFO has space for at least
+ USBRXPKTCNT amount of packets.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbrxpktcnt : 4; /**< [ 27: 24](R/W) USB receive-packet count. In host-mode, specifies space (in number of packets) that must
+ be available in the RX FIFO before the core can start the corresponding USB RX transaction
+ (burst).
+
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXRXBURSTSIZE]. */
+ uint32_t usbmaxrxburstsize : 5; /**< [ 23: 19](R/W) USB maximum receive-burst size. Specifies the maximum bulk IN burst the core
+ should do. When the system bus is slower than the USB, RX FIFO can overrun during a long
+ burst.
+
+ Program a smaller value to this field to limit the RX burst size that the core can do. It
+ only applies to SuperSpeed Bulk, Isochronous, and Interrupt IN endpoints.
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0x10. */
+ uint32_t reserved_16_18 : 3;
+ uint32_t reserved_15 : 1;
+ uint32_t reserved_11_14 : 4;
+ uint32_t reserved_0_10 : 11;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_10 : 11;
+ uint32_t reserved_11_14 : 4;
+ uint32_t reserved_15 : 1;
+ uint32_t reserved_16_18 : 3;
+ uint32_t usbmaxrxburstsize : 5; /**< [ 23: 19](R/W) USB maximum receive-burst size. Specifies the maximum bulk IN burst the core
+ should do. When the system bus is slower than the USB, RX FIFO can overrun during a long
+ burst.
+
+ Program a smaller value to this field to limit the RX burst size that the core can do. It
+ only applies to SuperSpeed Bulk, Isochronous, and Interrupt IN endpoints.
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0x10. */
+ uint32_t usbrxpktcnt : 4; /**< [ 27: 24](R/W) USB receive-packet count. In host-mode, specifies space (in number of packets) that must
+ be available in the RX FIFO before the core can start the corresponding USB RX transaction
+ (burst).
+
+ This field is only valid when [USBRXPKTCNTSEL] = 1. The valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXRXBURSTSIZE]. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbrxpktcntsel : 1; /**< [ 29: 29](R/W) USB receive-packet-count enable. Enables/disables the USB reception multipacket
+ thresholding:
+ 0 = the core can only start reception on the USB when the RX FIFO has space for at least
+ one packet.
+ 1 = the core can only start reception on the USB when the RX FIFO has space for at least
+ USBRXPKTCNT amount of packets.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_usbhx_uahc_grxthrcfg bdk_usbhx_uahc_grxthrcfg_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GRXTHRCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GRXTHRCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c10cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GRXTHRCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GRXTHRCFG(a) bdk_usbhx_uahc_grxthrcfg_t
+#define bustype_BDK_USBHX_UAHC_GRXTHRCFG(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GRXTHRCFG(a) "USBHX_UAHC_GRXTHRCFG"
+#define device_bar_BDK_USBHX_UAHC_GRXTHRCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GRXTHRCFG(a) (a)
+#define arguments_BDK_USBHX_UAHC_GRXTHRCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gsbuscfg0
+ *
+ * UAHC Bus Configuration Register 0
+ * This register can be used to configure the core after power-on or a change in mode of
+ * operation. This register mainly contains AXI system-related configuration parameters. Do not
+ * change this register after the initial programming. The application must program this register
+ * before starting any transactions on AXI. When [INCRBRSTENA] is enabled, it has the highest
+ * priority over other burst lengths. The core always performs the largest burst when enabled.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * The AXI cache signals are not connected in Cavium's hookup, so the *REQINFO fields
+ * can be ignored.
+ * INTERNAL: See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.1.
+ */
+union bdk_usbhx_uahc_gsbuscfg0
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gsbuscfg0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t datrdreqinfo : 4; /**< [ 31: 28](R/W) AXI-cache for data-read operations. Always set to 0x0. */
+ uint32_t desrdreqinfo : 4; /**< [ 27: 24](R/W) AXI-cache for descriptor-read operations. Always set to 0x0. */
+ uint32_t datwrreqinfo : 4; /**< [ 23: 20](R/W) AXI-cache for data-write operations. Always set to 0x0. */
+ uint32_t deswrreqinfo : 4; /**< [ 19: 16](R/W) AXI-cache for descriptor-write operations. Always set to 0x0. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t datbigend : 1; /**< [ 11: 11](R/W) Data access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBH()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t descbigend : 1; /**< [ 10: 10](R/W) Descriptor access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBH()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t incr256brstena : 1; /**< [ 7: 7](R/W) INCR256 burst-type enable. Always set to 0. */
+ uint32_t incr128brstena : 1; /**< [ 6: 6](R/W) INCR128 burst-type enable. Always set to 0. */
+ uint32_t incr64brstena : 1; /**< [ 5: 5](R/W) INCR64 burst-type enable. Always set to 0. */
+ uint32_t incr32brstena : 1; /**< [ 4: 4](R/W) INCR32 burst-type enable. Always set to 0. */
+ uint32_t incr16brstena : 1; /**< [ 3: 3](R/W) INCR16 burst-type enable. Allows the AXI master to generate INCR 16-beat bursts. */
+ uint32_t incr8brstena : 1; /**< [ 2: 2](R/W) INCR8 burst-type enable. Allows the AXI master to generate INCR eight-beat bursts. */
+ uint32_t incr4brstena : 1; /**< [ 1: 1](R/W) INCR4 burst-type enable. Allows the AXI master to generate INCR four-beat bursts. */
+ uint32_t incrbrstena : 1; /**< [ 0: 0](R/W) Undefined-length INCR burst-type enable.
+ This bit determines the set of burst lengths to be utilized by the master interface. It
+ works in conjunction with the GSBUSCFG0[7:1] enables (INCR*BRSTENA).
+
+ If disabled, the AXI master will use only the burst lengths
+ 1, 4, 8, 16 (assuming the INCR*BRSTENA are set to their reset values).
+
+ If enabled, the AXI master uses any length less than or equal to the largest-enabled burst
+ length based on the INCR*BRSTENA fields. */
+#else /* Word 0 - Little Endian */
+ uint32_t incrbrstena : 1; /**< [ 0: 0](R/W) Undefined-length INCR burst-type enable.
+ This bit determines the set of burst lengths to be utilized by the master interface. It
+ works in conjunction with the GSBUSCFG0[7:1] enables (INCR*BRSTENA).
+
+ If disabled, the AXI master will use only the burst lengths
+ 1, 4, 8, 16 (assuming the INCR*BRSTENA are set to their reset values).
+
+ If enabled, the AXI master uses any length less than or equal to the largest-enabled burst
+ length based on the INCR*BRSTENA fields. */
+ uint32_t incr4brstena : 1; /**< [ 1: 1](R/W) INCR4 burst-type enable. Allows the AXI master to generate INCR four-beat bursts. */
+ uint32_t incr8brstena : 1; /**< [ 2: 2](R/W) INCR8 burst-type enable. Allows the AXI master to generate INCR eight-beat bursts. */
+ uint32_t incr16brstena : 1; /**< [ 3: 3](R/W) INCR16 burst-type enable. Allows the AXI master to generate INCR 16-beat bursts. */
+ uint32_t incr32brstena : 1; /**< [ 4: 4](R/W) INCR32 burst-type enable. Always set to 0. */
+ uint32_t incr64brstena : 1; /**< [ 5: 5](R/W) INCR64 burst-type enable. Always set to 0. */
+ uint32_t incr128brstena : 1; /**< [ 6: 6](R/W) INCR128 burst-type enable. Always set to 0. */
+ uint32_t incr256brstena : 1; /**< [ 7: 7](R/W) INCR256 burst-type enable. Always set to 0. */
+ uint32_t reserved_8_9 : 2;
+ uint32_t descbigend : 1; /**< [ 10: 10](R/W) Descriptor access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBH()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t datbigend : 1; /**< [ 11: 11](R/W) Data access is big-endian. Keep this set to 0 (little-endian) and use the
+ USBH()_UCTL_SHIM_CFG[DMA_ENDIAN_MODE] setting instead.
+
+ For diagnostic use only, drivers should be written assuming little-endian. */
+ uint32_t reserved_12_15 : 4;
+ uint32_t deswrreqinfo : 4; /**< [ 19: 16](R/W) AXI-cache for descriptor-write operations. Always set to 0x0. */
+ uint32_t datwrreqinfo : 4; /**< [ 23: 20](R/W) AXI-cache for data-write operations. Always set to 0x0. */
+ uint32_t desrdreqinfo : 4; /**< [ 27: 24](R/W) AXI-cache for descriptor-read operations. Always set to 0x0. */
+ uint32_t datrdreqinfo : 4; /**< [ 31: 28](R/W) AXI-cache for data-read operations. Always set to 0x0. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gsbuscfg0_s cn; */
+};
+typedef union bdk_usbhx_uahc_gsbuscfg0 bdk_usbhx_uahc_gsbuscfg0_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GSBUSCFG0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GSBUSCFG0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c100ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GSBUSCFG0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GSBUSCFG0(a) bdk_usbhx_uahc_gsbuscfg0_t
+#define bustype_BDK_USBHX_UAHC_GSBUSCFG0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GSBUSCFG0(a) "USBHX_UAHC_GSBUSCFG0"
+#define device_bar_BDK_USBHX_UAHC_GSBUSCFG0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GSBUSCFG0(a) (a)
+#define arguments_BDK_USBHX_UAHC_GSBUSCFG0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gsbuscfg1
+ *
+ * UAHC Bus Configuration Register 1
+ * This register can be used to configure the core after power-on or a change in mode of
+ * operation. This register mainly contains AXI system-related configuration parameters. Do not
+ * change this register after the initial programming. The application must program this register
+ * before starting any transactions on AXI.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.2.
+ */
+union bdk_usbhx_uahc_gsbuscfg1
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gsbuscfg1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_13_31 : 19;
+ uint32_t en1kpage : 1; /**< [ 12: 12](R/W) 1K page-boundary enable.
+ 0 = Break transfers at the 4K page boundary (default).
+ 1 = Break transfers at the 1K page boundary. */
+ uint32_t pipetranslimit : 4; /**< [ 11: 8](R/W) AXI pipelined transfers burst-request limit. Controls the number of outstanding pipelined
+ transfers requests the AXI master will push to the AXI slave. Once the AXI master reaches
+ this limit, it does not make more requests on the AXI ARADDR and AWADDR buses until the
+ associated data phases complete. This field is encoded as follows:
+ 0x0 = 1 request. 0x8 = 9 requests.
+ 0x1 = 2 requests. 0x9 = 10 requests.
+ 0x2 = 3 requests. 0xA = 11 requests.
+ 0x3 = 4 requests. 0xB = 12 requests.
+ 0x4 = 5 requests. 0xC = 13 requests.
+ 0x5 = 6 requests. 0xD = 14 requests.
+ 0x6 = 7 requests. 0xE = 15 requests.
+ 0x7 = 8 requests. 0xF = 16 requests. */
+ uint32_t reserved_0_7 : 8;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_7 : 8;
+ uint32_t pipetranslimit : 4; /**< [ 11: 8](R/W) AXI pipelined transfers burst-request limit. Controls the number of outstanding pipelined
+ transfers requests the AXI master will push to the AXI slave. Once the AXI master reaches
+ this limit, it does not make more requests on the AXI ARADDR and AWADDR buses until the
+ associated data phases complete. This field is encoded as follows:
+ 0x0 = 1 request. 0x8 = 9 requests.
+ 0x1 = 2 requests. 0x9 = 10 requests.
+ 0x2 = 3 requests. 0xA = 11 requests.
+ 0x3 = 4 requests. 0xB = 12 requests.
+ 0x4 = 5 requests. 0xC = 13 requests.
+ 0x5 = 6 requests. 0xD = 14 requests.
+ 0x6 = 7 requests. 0xE = 15 requests.
+ 0x7 = 8 requests. 0xF = 16 requests. */
+ uint32_t en1kpage : 1; /**< [ 12: 12](R/W) 1K page-boundary enable.
+ 0 = Break transfers at the 4K page boundary (default).
+ 1 = Break transfers at the 1K page boundary. */
+ uint32_t reserved_13_31 : 19;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gsbuscfg1_s cn; */
+};
+typedef union bdk_usbhx_uahc_gsbuscfg1 bdk_usbhx_uahc_gsbuscfg1_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GSBUSCFG1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GSBUSCFG1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c104ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GSBUSCFG1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GSBUSCFG1(a) bdk_usbhx_uahc_gsbuscfg1_t
+#define bustype_BDK_USBHX_UAHC_GSBUSCFG1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GSBUSCFG1(a) "USBHX_UAHC_GSBUSCFG1"
+#define device_bar_BDK_USBHX_UAHC_GSBUSCFG1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GSBUSCFG1(a) (a)
+#define arguments_BDK_USBHX_UAHC_GSBUSCFG1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gsts
+ *
+ * UAHC Core Status Register
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.6.
+ */
+union bdk_usbhx_uahc_gsts
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t cbelt : 12; /**< [ 31: 20](RO/H) Current BELT value. In host mode, indicates the minimum value of all received device BELT
+ values and the BELT value that is set by the set latency tolerance value command. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t host_ip : 1; /**< [ 7: 7](RO/H) Host interrupt pending. Indicates that there is a pending interrupt pertaining to xHC in
+ the host-event queue. */
+ uint32_t reserved_6 : 1;
+ uint32_t csrtimeout : 1; /**< [ 5: 5](R/W1C/H) CSR timeout. When set to 1, indicates that software performed a write or read operation to
+ a core register that could not be completed within 0xFFFF controller-clock cycles. */
+ uint32_t buserraddrvld : 1; /**< [ 4: 4](R/W1C/H) Bus-error address valid. Indicates that USBH()_UAHC_GBUSERRADDR is valid and reports the
+ first bus address that encounters a bus error. */
+ uint32_t reserved_2_3 : 2;
+ uint32_t curmod : 2; /**< [ 1: 0](RO) Current mode of operation. Always 0x1.
+ Internal:
+ May vary from 0x1 if you write
+ USBH()_UAHC_GCTL[PRTCAPDIR]!=0x1. */
+#else /* Word 0 - Little Endian */
+ uint32_t curmod : 2; /**< [ 1: 0](RO) Current mode of operation. Always 0x1.
+ Internal:
+ May vary from 0x1 if you write
+ USBH()_UAHC_GCTL[PRTCAPDIR]!=0x1. */
+ uint32_t reserved_2_3 : 2;
+ uint32_t buserraddrvld : 1; /**< [ 4: 4](R/W1C/H) Bus-error address valid. Indicates that USBH()_UAHC_GBUSERRADDR is valid and reports the
+ first bus address that encounters a bus error. */
+ uint32_t csrtimeout : 1; /**< [ 5: 5](R/W1C/H) CSR timeout. When set to 1, indicates that software performed a write or read operation to
+ a core register that could not be completed within 0xFFFF controller-clock cycles. */
+ uint32_t reserved_6 : 1;
+ uint32_t host_ip : 1; /**< [ 7: 7](RO/H) Host interrupt pending. Indicates that there is a pending interrupt pertaining to xHC in
+ the host-event queue. */
+ uint32_t reserved_8_19 : 12;
+ uint32_t cbelt : 12; /**< [ 31: 20](RO/H) Current BELT value. In host mode, indicates the minimum value of all received device BELT
+ values and the BELT value that is set by the set latency tolerance value command. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gsts_s cn; */
+};
+typedef union bdk_usbhx_uahc_gsts bdk_usbhx_uahc_gsts_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c118ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GSTS(a) bdk_usbhx_uahc_gsts_t
+#define bustype_BDK_USBHX_UAHC_GSTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GSTS(a) "USBHX_UAHC_GSTS"
+#define device_bar_BDK_USBHX_UAHC_GSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GSTS(a) (a)
+#define arguments_BDK_USBHX_UAHC_GSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gtxfifoprihst
+ *
+ * UAHC TX FIFOs DMA Priority Register
+ * This register specifies the relative DMA priority level among the host TXFIFOs (one per USB
+ * bus instance) within the associated speed group (SuperSpeed or high-speed/full-speed/
+ * low-speed). When multiple TXFIFOs compete for DMA service at a given time, the TXDMA arbiter
+ * grants access on a packet-basis in the following manner:
+ *
+ * Among the FIFOs in the same speed group (SuperSpeed or high-speed/full-speed/low-speed):
+ *
+ * * High-priority TXFIFOs are granted access using round-robin arbitration.
+ * * Low-priority TXFIFOs are granted access using round-robin arbitration only after high-
+ * priority
+ * TXFIFOs have no further processing to do (i.e., either the TXQs are empty or the corresponding
+ * TXFIFOs do not have the required data).
+ *
+ * The TX DMA arbiter prioritizes the SuperSpeed group or high-speed/full-speed/low-speed group
+ * according to the ratio programmed in
+ * USBH()_UAHC_GDMAHLRATIO.
+ *
+ * For scatter-gather packets, the arbiter grants successive DMA requests to the same FIFO until
+ * the entire packet is completed. The register size corresponds to the number of configured USB
+ * bus instances; for example, in the default configuration, there are 3 USB bus instances (1
+ * SuperSpeed, 1 high-speed, and 1 full-speed/low-speed).
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.9.2.
+ */
+union bdk_usbhx_uahc_gtxfifoprihst
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gtxfifoprihst_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_3_31 : 29;
+ uint32_t tx_priority : 3; /**< [ 2: 0](R/W) Each register bit n controls the priority (1: high, 0: low) of TX FIFO\<n\> within a speed
+ group. */
+#else /* Word 0 - Little Endian */
+ uint32_t tx_priority : 3; /**< [ 2: 0](R/W) Each register bit n controls the priority (1: high, 0: low) of TX FIFO\<n\> within a speed
+ group. */
+ uint32_t reserved_3_31 : 29;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gtxfifoprihst_s cn; */
+};
+typedef union bdk_usbhx_uahc_gtxfifoprihst bdk_usbhx_uahc_gtxfifoprihst_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GTXFIFOPRIHST(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GTXFIFOPRIHST(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c618ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GTXFIFOPRIHST", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GTXFIFOPRIHST(a) bdk_usbhx_uahc_gtxfifoprihst_t
+#define bustype_BDK_USBHX_UAHC_GTXFIFOPRIHST(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GTXFIFOPRIHST(a) "USBHX_UAHC_GTXFIFOPRIHST"
+#define device_bar_BDK_USBHX_UAHC_GTXFIFOPRIHST(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GTXFIFOPRIHST(a) (a)
+#define arguments_BDK_USBHX_UAHC_GTXFIFOPRIHST(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gtxfifosiz#
+ *
+ * UAHC TX FIFO Size Registers
+ * This register holds the internal RAM start address/depth of each TxFIFO implemented. Unless
+ * packet size/buffer size for each endpoint is different and application-specific, it is
+ * recommended that the software use the default value. One register per FIFO. One register per
+ * FIFO.
+ *
+ * Reset values = 0:{0x0000_0082} 1:{0x0082_0103} 2:{0x0185_0205}.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.6.1.
+ * INTERNAL: For more information, refer to the BMU section in Block Descriptions on Synopsys
+ * Databook page 238.
+ */
+union bdk_usbhx_uahc_gtxfifosizx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gtxfifosizx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t txfstaddr : 16; /**< [ 31: 16](R/W) Transmit FIFOn RAM start address. Contains the memory start address for TxFIFOn. The reset
+ is value derived from configuration parameters. */
+ uint32_t txfdep : 16; /**< [ 15: 0](R/W) TxFIFOn depth. This value is in terms of TX RAM data width.
+ minimum value = 0x20, maximum value = 0x8000.
+
+ Internal:
+ For more information, see the Hardware Integration chapter of the Synopsys
+ Databook.
+ The reset value derived from configuration parameters. */
+#else /* Word 0 - Little Endian */
+ uint32_t txfdep : 16; /**< [ 15: 0](R/W) TxFIFOn depth. This value is in terms of TX RAM data width.
+ minimum value = 0x20, maximum value = 0x8000.
+
+ Internal:
+ For more information, see the Hardware Integration chapter of the Synopsys
+ Databook.
+ The reset value derived from configuration parameters. */
+ uint32_t txfstaddr : 16; /**< [ 31: 16](R/W) Transmit FIFOn RAM start address. Contains the memory start address for TxFIFOn. The reset
+ is value derived from configuration parameters. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gtxfifosizx_s cn; */
+};
+typedef union bdk_usbhx_uahc_gtxfifosizx bdk_usbhx_uahc_gtxfifosizx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GTXFIFOSIZX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GTXFIFOSIZX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=2)))
+ return 0x86800000c300ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x3);
+ __bdk_csr_fatal("USBHX_UAHC_GTXFIFOSIZX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GTXFIFOSIZX(a,b) bdk_usbhx_uahc_gtxfifosizx_t
+#define bustype_BDK_USBHX_UAHC_GTXFIFOSIZX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GTXFIFOSIZX(a,b) "USBHX_UAHC_GTXFIFOSIZX"
+#define device_bar_BDK_USBHX_UAHC_GTXFIFOSIZX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GTXFIFOSIZX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_GTXFIFOSIZX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gtxthrcfg
+ *
+ * UAHC TX Threshold Control Register
+ * In a normal case, a TX burst starts as soon as one packet is prefetched. This works well as
+ * long as the system bus is faster than the USB3.0 bus (a 1024-bytes packet takes ~2.2 us on the
+ * USB bus in SuperSpeed mode). If the system bus latency is larger than 2.2 us to access a
+ * 1024-byte packet, then starting a burst on 1-packet condition leads to an early abort of the
+ * burst causing unnecessary performance reduction. This register allows the configuration of
+ * threshold and burst size control. This feature is enabled by [USBTXPKTCNTSEL].
+ *
+ * Transmit path:
+ * * The TX threshold is controlled by [USBTXPKTCNT], and the TX burst size is controlled by
+ * [USBMAXTXBURSTSIZE].
+ * * Selecting optimal TX FIFO size, TX threshold, and TX burst size avoids TX burst aborts due
+ * to an underrun if the system bus is slower than USB. Once in a while an underrun is OK, and
+ * there is no functional issue.
+ * * A larger threshold affects the performance, since the scheduler is idle during this time.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.3.
+ */
+union bdk_usbhx_uahc_gtxthrcfg
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gtxthrcfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t usbtxpktcntsel : 1; /**< [ 29: 29](R/W) USB transmit packet-count enable. Enables/disables the USB transmission multipacket
+ thresholding:
+ 0 = USB transmission multipacket thresholding is disabled, the core can only start
+ transmission on the USB after the entire packet has been fetched into the corresponding
+ TXFIFO.
+ 1 = USB transmission multipacket thresholding is enabled. The core can only start
+ transmission on the USB after USBTXPKTCNT amount of packets for the USB transaction
+ (burst) are already in the corresponding TXFIFO.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbtxpktcnt : 4; /**< [ 27: 24](R/W) USB transmit-packet count. Specifies the number of packets that must be in the TXFIFO
+ before the core can start transmission for the corresponding USB transaction (burst). This
+ field is only valid when [USBTXPKTCNTSEL] = 1. Valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXTXBURSTSIZE]. */
+ uint32_t usbmaxtxburstsize : 8; /**< [ 23: 16](R/W) USB maximum TX burst size. When [USBTXPKTCNTSEL] = 1, this field specifies the
+ maximum bulk OUT burst the core should do. When the system bus is slower than
+ the USB, TX FIFO can underrun during a long burst. Program a smaller value to
+ this field to limit the TX burst size that the core can do. It only applies to
+ SuperSpeed bulk, isochronous, and interrupt OUT endpoints in the host
+ mode. Valid values are from 0x1 to 0x10. */
+ uint32_t reserved_0_15 : 16;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_15 : 16;
+ uint32_t usbmaxtxburstsize : 8; /**< [ 23: 16](R/W) USB maximum TX burst size. When [USBTXPKTCNTSEL] = 1, this field specifies the
+ maximum bulk OUT burst the core should do. When the system bus is slower than
+ the USB, TX FIFO can underrun during a long burst. Program a smaller value to
+ this field to limit the TX burst size that the core can do. It only applies to
+ SuperSpeed bulk, isochronous, and interrupt OUT endpoints in the host
+ mode. Valid values are from 0x1 to 0x10. */
+ uint32_t usbtxpktcnt : 4; /**< [ 27: 24](R/W) USB transmit-packet count. Specifies the number of packets that must be in the TXFIFO
+ before the core can start transmission for the corresponding USB transaction (burst). This
+ field is only valid when [USBTXPKTCNTSEL] = 1. Valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXTXBURSTSIZE]. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbtxpktcntsel : 1; /**< [ 29: 29](R/W) USB transmit packet-count enable. Enables/disables the USB transmission multipacket
+ thresholding:
+ 0 = USB transmission multipacket thresholding is disabled, the core can only start
+ transmission on the USB after the entire packet has been fetched into the corresponding
+ TXFIFO.
+ 1 = USB transmission multipacket thresholding is enabled. The core can only start
+ transmission on the USB after USBTXPKTCNT amount of packets for the USB transaction
+ (burst) are already in the corresponding TXFIFO.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbhx_uahc_gtxthrcfg_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_30_31 : 2;
+ uint32_t usbtxpktcntsel : 1; /**< [ 29: 29](R/W) USB transmit packet-count enable. Enables/disables the USB transmission multipacket
+ thresholding:
+ 0 = USB transmission multipacket thresholding is disabled, the core can only start
+ transmission on the USB after the entire packet has been fetched into the corresponding
+ TXFIFO.
+ 1 = USB transmission multipacket thresholding is enabled. The core can only start
+ transmission on the USB after USBTXPKTCNT amount of packets for the USB transaction
+ (burst) are already in the corresponding TXFIFO.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbtxpktcnt : 4; /**< [ 27: 24](R/W) USB transmit-packet count. Specifies the number of packets that must be in the TXFIFO
+ before the core can start transmission for the corresponding USB transaction (burst). This
+ field is only valid when [USBTXPKTCNTSEL] = 1. Valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXTXBURSTSIZE]. */
+ uint32_t usbmaxtxburstsize : 8; /**< [ 23: 16](R/W) USB maximum TX burst size. When [USBTXPKTCNTSEL] = 1, this field specifies the
+ maximum bulk OUT burst the core should do. When the system bus is slower than
+ the USB, TX FIFO can underrun during a long burst. Program a smaller value to
+ this field to limit the TX burst size that the core can do. It only applies to
+ SuperSpeed bulk, isochronous, and interrupt OUT endpoints in the host
+ mode. Valid values are from 0x1 to 0x10. */
+ uint32_t reserved_14_15 : 2;
+ uint32_t reserved_10_13 : 4;
+ uint32_t reserved_0_9 : 10;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_9 : 10;
+ uint32_t reserved_10_13 : 4;
+ uint32_t reserved_14_15 : 2;
+ uint32_t usbmaxtxburstsize : 8; /**< [ 23: 16](R/W) USB maximum TX burst size. When [USBTXPKTCNTSEL] = 1, this field specifies the
+ maximum bulk OUT burst the core should do. When the system bus is slower than
+ the USB, TX FIFO can underrun during a long burst. Program a smaller value to
+ this field to limit the TX burst size that the core can do. It only applies to
+ SuperSpeed bulk, isochronous, and interrupt OUT endpoints in the host
+ mode. Valid values are from 0x1 to 0x10. */
+ uint32_t usbtxpktcnt : 4; /**< [ 27: 24](R/W) USB transmit-packet count. Specifies the number of packets that must be in the TXFIFO
+ before the core can start transmission for the corresponding USB transaction (burst). This
+ field is only valid when [USBTXPKTCNTSEL] = 1. Valid values are from 0x1 to 0xF.
+ This field must be \<= [USBMAXTXBURSTSIZE]. */
+ uint32_t reserved_28 : 1;
+ uint32_t usbtxpktcntsel : 1; /**< [ 29: 29](R/W) USB transmit packet-count enable. Enables/disables the USB transmission multipacket
+ thresholding:
+ 0 = USB transmission multipacket thresholding is disabled, the core can only start
+ transmission on the USB after the entire packet has been fetched into the corresponding
+ TXFIFO.
+ 1 = USB transmission multipacket thresholding is enabled. The core can only start
+ transmission on the USB after USBTXPKTCNT amount of packets for the USB transaction
+ (burst) are already in the corresponding TXFIFO.
+ This mode is only used for SuperSpeed. */
+ uint32_t reserved_30_31 : 2;
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_usbhx_uahc_gtxthrcfg bdk_usbhx_uahc_gtxthrcfg_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GTXTHRCFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GTXTHRCFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c108ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GTXTHRCFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GTXTHRCFG(a) bdk_usbhx_uahc_gtxthrcfg_t
+#define bustype_BDK_USBHX_UAHC_GTXTHRCFG(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GTXTHRCFG(a) "USBHX_UAHC_GTXTHRCFG"
+#define device_bar_BDK_USBHX_UAHC_GTXTHRCFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GTXTHRCFG(a) (a)
+#define arguments_BDK_USBHX_UAHC_GTXTHRCFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_guctl
+ *
+ * UAHC Core User-Control Register
+ * This register provides a few options for the software to control the core behavior in the host
+ * mode. Most of the options are used to improve host inter-operability with different devices.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.11.
+ */
+union bdk_usbhx_uahc_guctl
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_guctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t refclkper : 10; /**< [ 31: 22](R/W) Reference-clock period. Indicates (in terms of ns) the period of REF_CLK. The default
+ value is set to 0x8
+ (8 ns/125 MHz). This field must be updated during power on initialization if
+ USBH()_UAHC_GCTL[SOFITPSYNC] = 1 or USBH()_UAHC_GFLADJ [GFLADJ_REFCLK_LPM_SEL] = 1. The
+ programmable maximum value 62 ns, and the minimum value is 8 ns. You use a reference clock
+ with a period that is a integer multiple, so that ITP can meet the jitter margin of 32 ns.
+ The allowable REF_CLK frequencies whose period is not integer multiples are
+ 16/17/19.2/24/39.7 MHz.
+
+ This field should not be set to 0x0 at any time. If you do not plan to use this feature,
+ then you need to set this field to 0x8, the default value. */
+ uint32_t noextrdl : 1; /**< [ 21: 21](R/W) No extra delay between SOF and the first packet.
+ Some high-speed devices misbehave when the host sends a packet immediately after an SOF.
+ However, adding an extra delay between an SOF and the first packet can reduce the USB data
+ rate and performance.
+
+ This bit is used to control whether the host should wait for 2 us before it sends the
+ first packet after a SOF, or not. You can set this bit to 1 to improve the performance if
+ those problematic devices are not a concern in your host environment.
+ 0 = host waits for 2 us after an SOF before it sends the first USB packet.
+ 1 = host does not wait after an SOF before it sends the first USB packet. */
+ uint32_t psqextrressp : 3; /**< [ 20: 18](R/W) PSQ extra reserved space. This is a debug feature, and is not intended for normal usage.
+ This parameter specifies how much additional space in the PSQ (protocol-status queue) must
+ be reserved before the U3PTL initiates a new USB transaction and burst beats. */
+ uint32_t sprsctrltransen : 1; /**< [ 17: 17](R/W) Sparse control transaction enable. Some devices are slow in responding to control
+ transfers. Scheduling multiple transactions in one microframe/frame can cause these
+ devices to misbehave. If this bit is set to 1, the host controller schedules transactions
+ for a control transfer in different microframes/frames. */
+ uint32_t resbwhseps : 1; /**< [ 16: 16](R/W) Reserving 85% bandwidth for high-speed periodic EPs. By default, host controller reserves
+ 80% of the bandwidth for periodic EPs. If this bit is set, the bandwidth is relaxed to 85%
+ to accommodate two high-speed, high-bandwidth ISOC EPs.
+
+ USB 2.0 required 80% bandwidth allocated for ISOC traffic. If two high bandwidth ISOC
+ devices (HD webcams) are connected, and if each requires 1024-bytes * 3 packets per
+ microframe, then the bandwidth required is around 82%. If this bit is set to 1, it is
+ possible to connect two webcams of 1024 bytes * 3 payload per microframe each. Otherwise,
+ you may have to reduce the resolution of the webcams. */
+ uint32_t cmdevaddr : 1; /**< [ 15: 15](R/W) Compliance mode for device address. When set to 1, slot ID can have different value than
+ device address if max_slot_enabled \< 128.
+ 0 = Device address is equal to slot ID.
+ 1 = Increment device address on each address device command.
+
+ The xHCI compliance requires this bit to be set to 1. The 0 mode is for debug purpose
+ only. This allows you to easily identify a device connected to a port in the Lecroy or
+ Eliisys trace during hardware debug. */
+ uint32_t usbhstinautoretryen : 1; /**< [ 14: 14](R/W) Host IN auto-retry enable. When set, this field enables the auto-retry feature. For IN
+ transfers (non-isochronous) that encounter data packets with CRC errors or internal
+ overrun scenarios, the auto-retry feature causes the host core to reply to the device with
+ a non-terminating retry ACK (i.e. an ACK transaction packet with Retry = 1 and NumP != 0).
+ If the auto-retry feature is disabled (default), the core responds with a terminating
+ retry ACK (i.e. an ACK transaction packet with Retry = 1 and NumP = 0). */
+ uint32_t enoverlapchk : 1; /**< [ 13: 13](R/W) Enable check for LFPS overlap during remote Ux Exit. If this bit is set to:
+ 0 = When the link exists U1/U2/U3 because of a remote exit, it does not look for an LFPS
+ overlap.
+ 1 = The SuperSpeed link, when exiting U1/U2/U3, waits for either the remote link LFPS or
+ TS1/TS2 training symbols before it confirms that the LFPS handshake is complete. This is
+ done to handle the case where the LFPS glitch causes the link to start exiting from the
+ low power state. Looking for the LFPS overlap makes sure that the link partner also sees
+ the LFPS. */
+ uint32_t extcapsupten : 1; /**< [ 12: 12](R/W) External extended capability support enable. If disabled, a read USBH()_UAHC_SUPTPRT3_DW0
+ [NEXTCAPPTR] returns 0 in the next capability pointer field. This indicates there are no
+ more capabilities. If enabled, a read to USBH()_UAHC_SUPTPRT3_DW0[NEXTCAPPTR] returns 4 in
+ the
+ next capability pointer field.
+ Always set to 0x0. */
+ uint32_t insrtextrfsbodi : 1; /**< [ 11: 11](R/W) Insert extra delay between full-speed bulk OUT transactions. Some full-speed devices are
+ slow to receive bulk OUT data and can get stuck when there are consecutive bulk OUT
+ transactions with short inter-transaction delays. This bit is used to control whether the
+ host inserts extra delay between consecutive bulk OUT transactions to a full-speed
+ endpoint.
+ 0 = Host does not insert extra delay.
+ Setting this bit to 1 reduces the bulk OUT transfer performance for most of the full-speed
+ devices.
+ 1 = Host inserts about 12 us extra delay between consecutive bulk OUT transactions to an
+ full-speed endpoint to work around the device issue. */
+ uint32_t dtct : 2; /**< [ 10: 9](R/W) Device timeout coarse tuning. This field determines how long the host waits for a response
+ from device before considering a timeout.
+ The core first checks the [DTCT] value. If it is 0, then the timeout value is defined by
+ the
+ [DTFT]. If it is nonzero, then it uses the following timeout values:
+ 0x0 = 0 us; use [DTFT] value instead.
+ 0x1 = 500 us.
+ 0x2 = 1.5 ms.
+ 0x3 = 6.5 ms. */
+ uint32_t dtft : 9; /**< [ 8: 0](R/W) Device timeout fine tuning. This field determines how long the host waits for a response
+ from a device before considering a timeout. For [DTFT] to take effect, [DTCT] must be set
+ to
+ 0x0.
+ The [DTFT] value specifies the number of 125 MHz clock cycles * 256 to count before
+ considering a device timeout. For the 125 MHz clock cycles (8 ns period), this is
+ calculated as follows:
+ _ [DTFT value] * 256 * 8 (ns)
+ 0x2 = 2 * 256 * 8 -\> 4 us.
+ 0x5 = 5 * 256 * 8 -\> 10 us.
+ 0xA = 10 * 256 * 8 -\> 20 us.
+ 0x10 = 16 * 256 * 8 -\> 32 us.
+ 0x19 = 25 * 256 * 8 -\> 51 us.
+ 0x31 = 49 * 256 * 8 -\> 100 us.
+ 0x62 = 98 * 256 * 8 -\> 200 us. */
+#else /* Word 0 - Little Endian */
+ uint32_t dtft : 9; /**< [ 8: 0](R/W) Device timeout fine tuning. This field determines how long the host waits for a response
+ from a device before considering a timeout. For [DTFT] to take effect, [DTCT] must be set
+ to
+ 0x0.
+ The [DTFT] value specifies the number of 125 MHz clock cycles * 256 to count before
+ considering a device timeout. For the 125 MHz clock cycles (8 ns period), this is
+ calculated as follows:
+ _ [DTFT value] * 256 * 8 (ns)
+ 0x2 = 2 * 256 * 8 -\> 4 us.
+ 0x5 = 5 * 256 * 8 -\> 10 us.
+ 0xA = 10 * 256 * 8 -\> 20 us.
+ 0x10 = 16 * 256 * 8 -\> 32 us.
+ 0x19 = 25 * 256 * 8 -\> 51 us.
+ 0x31 = 49 * 256 * 8 -\> 100 us.
+ 0x62 = 98 * 256 * 8 -\> 200 us. */
+ uint32_t dtct : 2; /**< [ 10: 9](R/W) Device timeout coarse tuning. This field determines how long the host waits for a response
+ from device before considering a timeout.
+ The core first checks the [DTCT] value. If it is 0, then the timeout value is defined by
+ the
+ [DTFT]. If it is nonzero, then it uses the following timeout values:
+ 0x0 = 0 us; use [DTFT] value instead.
+ 0x1 = 500 us.
+ 0x2 = 1.5 ms.
+ 0x3 = 6.5 ms. */
+ uint32_t insrtextrfsbodi : 1; /**< [ 11: 11](R/W) Insert extra delay between full-speed bulk OUT transactions. Some full-speed devices are
+ slow to receive bulk OUT data and can get stuck when there are consecutive bulk OUT
+ transactions with short inter-transaction delays. This bit is used to control whether the
+ host inserts extra delay between consecutive bulk OUT transactions to a full-speed
+ endpoint.
+ 0 = Host does not insert extra delay.
+ Setting this bit to 1 reduces the bulk OUT transfer performance for most of the full-speed
+ devices.
+ 1 = Host inserts about 12 us extra delay between consecutive bulk OUT transactions to an
+ full-speed endpoint to work around the device issue. */
+ uint32_t extcapsupten : 1; /**< [ 12: 12](R/W) External extended capability support enable. If disabled, a read USBH()_UAHC_SUPTPRT3_DW0
+ [NEXTCAPPTR] returns 0 in the next capability pointer field. This indicates there are no
+ more capabilities. If enabled, a read to USBH()_UAHC_SUPTPRT3_DW0[NEXTCAPPTR] returns 4 in
+ the
+ next capability pointer field.
+ Always set to 0x0. */
+ uint32_t enoverlapchk : 1; /**< [ 13: 13](R/W) Enable check for LFPS overlap during remote Ux Exit. If this bit is set to:
+ 0 = When the link exists U1/U2/U3 because of a remote exit, it does not look for an LFPS
+ overlap.
+ 1 = The SuperSpeed link, when exiting U1/U2/U3, waits for either the remote link LFPS or
+ TS1/TS2 training symbols before it confirms that the LFPS handshake is complete. This is
+ done to handle the case where the LFPS glitch causes the link to start exiting from the
+ low power state. Looking for the LFPS overlap makes sure that the link partner also sees
+ the LFPS. */
+ uint32_t usbhstinautoretryen : 1; /**< [ 14: 14](R/W) Host IN auto-retry enable. When set, this field enables the auto-retry feature. For IN
+ transfers (non-isochronous) that encounter data packets with CRC errors or internal
+ overrun scenarios, the auto-retry feature causes the host core to reply to the device with
+ a non-terminating retry ACK (i.e. an ACK transaction packet with Retry = 1 and NumP != 0).
+ If the auto-retry feature is disabled (default), the core responds with a terminating
+ retry ACK (i.e. an ACK transaction packet with Retry = 1 and NumP = 0). */
+ uint32_t cmdevaddr : 1; /**< [ 15: 15](R/W) Compliance mode for device address. When set to 1, slot ID can have different value than
+ device address if max_slot_enabled \< 128.
+ 0 = Device address is equal to slot ID.
+ 1 = Increment device address on each address device command.
+
+ The xHCI compliance requires this bit to be set to 1. The 0 mode is for debug purpose
+ only. This allows you to easily identify a device connected to a port in the Lecroy or
+ Eliisys trace during hardware debug. */
+ uint32_t resbwhseps : 1; /**< [ 16: 16](R/W) Reserving 85% bandwidth for high-speed periodic EPs. By default, host controller reserves
+ 80% of the bandwidth for periodic EPs. If this bit is set, the bandwidth is relaxed to 85%
+ to accommodate two high-speed, high-bandwidth ISOC EPs.
+
+ USB 2.0 required 80% bandwidth allocated for ISOC traffic. If two high bandwidth ISOC
+ devices (HD webcams) are connected, and if each requires 1024-bytes * 3 packets per
+ microframe, then the bandwidth required is around 82%. If this bit is set to 1, it is
+ possible to connect two webcams of 1024 bytes * 3 payload per microframe each. Otherwise,
+ you may have to reduce the resolution of the webcams. */
+ uint32_t sprsctrltransen : 1; /**< [ 17: 17](R/W) Sparse control transaction enable. Some devices are slow in responding to control
+ transfers. Scheduling multiple transactions in one microframe/frame can cause these
+ devices to misbehave. If this bit is set to 1, the host controller schedules transactions
+ for a control transfer in different microframes/frames. */
+ uint32_t psqextrressp : 3; /**< [ 20: 18](R/W) PSQ extra reserved space. This is a debug feature, and is not intended for normal usage.
+ This parameter specifies how much additional space in the PSQ (protocol-status queue) must
+ be reserved before the U3PTL initiates a new USB transaction and burst beats. */
+ uint32_t noextrdl : 1; /**< [ 21: 21](R/W) No extra delay between SOF and the first packet.
+ Some high-speed devices misbehave when the host sends a packet immediately after an SOF.
+ However, adding an extra delay between an SOF and the first packet can reduce the USB data
+ rate and performance.
+
+ This bit is used to control whether the host should wait for 2 us before it sends the
+ first packet after a SOF, or not. You can set this bit to 1 to improve the performance if
+ those problematic devices are not a concern in your host environment.
+ 0 = host waits for 2 us after an SOF before it sends the first USB packet.
+ 1 = host does not wait after an SOF before it sends the first USB packet. */
+ uint32_t refclkper : 10; /**< [ 31: 22](R/W) Reference-clock period. Indicates (in terms of ns) the period of REF_CLK. The default
+ value is set to 0x8
+ (8 ns/125 MHz). This field must be updated during power on initialization if
+ USBH()_UAHC_GCTL[SOFITPSYNC] = 1 or USBH()_UAHC_GFLADJ [GFLADJ_REFCLK_LPM_SEL] = 1. The
+ programmable maximum value 62 ns, and the minimum value is 8 ns. You use a reference clock
+ with a period that is a integer multiple, so that ITP can meet the jitter margin of 32 ns.
+ The allowable REF_CLK frequencies whose period is not integer multiples are
+ 16/17/19.2/24/39.7 MHz.
+
+ This field should not be set to 0x0 at any time. If you do not plan to use this feature,
+ then you need to set this field to 0x8, the default value. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_guctl_s cn; */
+};
+typedef union bdk_usbhx_uahc_guctl bdk_usbhx_uahc_guctl_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GUCTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GUCTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c12cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GUCTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GUCTL(a) bdk_usbhx_uahc_guctl_t
+#define bustype_BDK_USBHX_UAHC_GUCTL(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GUCTL(a) "USBHX_UAHC_GUCTL"
+#define device_bar_BDK_USBHX_UAHC_GUCTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GUCTL(a) (a)
+#define arguments_BDK_USBHX_UAHC_GUCTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_guctl1
+ *
+ * UAHC Global User Control Register 1
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.7.
+ */
+union bdk_usbhx_uahc_guctl1
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_guctl1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_18_31 : 14;
+ uint32_t parkmode_disable_ss : 1; /**< [ 17: 17](R/W) This bit is for debug purpose only.
+ When this bit is set to 1 all SS bus instances in park mode are
+ disabled. */
+ uint32_t parkmode_disable_hs : 1; /**< [ 16: 16](R/W) When this bit is set to 1 all HS bus instances park mode are
+ disabled. */
+ uint32_t parkmode_disable_fsls : 1; /**< [ 15: 15](R/W) When this bit is set to 1 all FS/LS bus instances park mode are
+ disabled. */
+ uint32_t reserved_9_14 : 6;
+ uint32_t l1_susp_thrld_en_for_host : 1;/**< [ 8: 8](R/W) The controller puts the PHY into deep low-power mode in L1 when both of the
+ following are true:
+
+ * The HIRD/BESL value used is greater than or equal to the
+ value in [L1_SUSP_THRLD_FOR_HOST].
+ * The [L1_SUSP_THRLD_EN_FOR_HOST] bit is set.
+
+ The controller the UTMI PHY transitions to shallow low-power
+ mode in L1 by powering down necessary blocks when one of the
+ following is true:
+
+ * The HIRD/BESL value used is less than the value in
+ [L1_SUSP_THRLD_FOR_HOST].
+ * [L1_SUSP_THRLD_EN_FOR_HOST] is clear. */
+ uint32_t l1_susp_thrld_for_host : 4; /**< [ 7: 4](R/W) This field is effective only when the [L1_SUSP_THRLD_EN_FOR_HOST] is set to 1. */
+ uint32_t helden : 1; /**< [ 3: 3](R/W) When this bit is set to 1, it enables the exit latency delta (ELD)
+ support defined in the xHCI 1.0 Errata. */
+ uint32_t hparchkdisable : 1; /**< [ 2: 2](R/W) When this bit is set to 0 (by default), the xHC checks that the input
+ slot/EP context fields comply to the xHCI Specification. Upon
+ detection of a parameter error during command execution, the
+ xHC generates an event TRB with completion code indicating
+ 'PARAMETER ERROR'.
+ When the bit is set to 1, the xHC does not perform parameter
+ checks and does not generate 'PARAMETER ERROR' completion
+ code. */
+ uint32_t ovrld_l1_susp_com : 1; /**< [ 1: 1](R/W) Always set to 0. */
+ uint32_t loa_filter_en : 1; /**< [ 0: 0](R/W) If this bit is set, the USB 2.0 port babble is checked at least three consecutive times
+ before the port is disabled. This prevents false triggering of the babble condition when
+ using low quality cables. */
+#else /* Word 0 - Little Endian */
+ uint32_t loa_filter_en : 1; /**< [ 0: 0](R/W) If this bit is set, the USB 2.0 port babble is checked at least three consecutive times
+ before the port is disabled. This prevents false triggering of the babble condition when
+ using low quality cables. */
+ uint32_t ovrld_l1_susp_com : 1; /**< [ 1: 1](R/W) Always set to 0. */
+ uint32_t hparchkdisable : 1; /**< [ 2: 2](R/W) When this bit is set to 0 (by default), the xHC checks that the input
+ slot/EP context fields comply to the xHCI Specification. Upon
+ detection of a parameter error during command execution, the
+ xHC generates an event TRB with completion code indicating
+ 'PARAMETER ERROR'.
+ When the bit is set to 1, the xHC does not perform parameter
+ checks and does not generate 'PARAMETER ERROR' completion
+ code. */
+ uint32_t helden : 1; /**< [ 3: 3](R/W) When this bit is set to 1, it enables the exit latency delta (ELD)
+ support defined in the xHCI 1.0 Errata. */
+ uint32_t l1_susp_thrld_for_host : 4; /**< [ 7: 4](R/W) This field is effective only when the [L1_SUSP_THRLD_EN_FOR_HOST] is set to 1. */
+ uint32_t l1_susp_thrld_en_for_host : 1;/**< [ 8: 8](R/W) The controller puts the PHY into deep low-power mode in L1 when both of the
+ following are true:
+
+ * The HIRD/BESL value used is greater than or equal to the
+ value in [L1_SUSP_THRLD_FOR_HOST].
+ * The [L1_SUSP_THRLD_EN_FOR_HOST] bit is set.
+
+ The controller the UTMI PHY transitions to shallow low-power
+ mode in L1 by powering down necessary blocks when one of the
+ following is true:
+
+ * The HIRD/BESL value used is less than the value in
+ [L1_SUSP_THRLD_FOR_HOST].
+ * [L1_SUSP_THRLD_EN_FOR_HOST] is clear. */
+ uint32_t reserved_9_14 : 6;
+ uint32_t parkmode_disable_fsls : 1; /**< [ 15: 15](R/W) When this bit is set to 1 all FS/LS bus instances park mode are
+ disabled. */
+ uint32_t parkmode_disable_hs : 1; /**< [ 16: 16](R/W) When this bit is set to 1 all HS bus instances park mode are
+ disabled. */
+ uint32_t parkmode_disable_ss : 1; /**< [ 17: 17](R/W) This bit is for debug purpose only.
+ When this bit is set to 1 all SS bus instances in park mode are
+ disabled. */
+ uint32_t reserved_18_31 : 14;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_guctl1_s cn; */
+};
+typedef union bdk_usbhx_uahc_guctl1 bdk_usbhx_uahc_guctl1_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GUCTL1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GUCTL1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c11cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GUCTL1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GUCTL1(a) bdk_usbhx_uahc_guctl1_t
+#define bustype_BDK_USBHX_UAHC_GUCTL1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GUCTL1(a) "USBHX_UAHC_GUCTL1"
+#define device_bar_BDK_USBHX_UAHC_GUCTL1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GUCTL1(a) (a)
+#define arguments_BDK_USBHX_UAHC_GUCTL1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_guid
+ *
+ * UAHC Core User ID Register
+ * This is a read/write register containing the User ID. The power-on value for this register is
+ * specified as the user identification register. This register can be used in the following
+ * ways:
+ * * To store the version or revision of your system.
+ * * To store hardware configurations that are outside of the core.
+ * * As a scratch register.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.50a, section 6.2.1.10.
+ */
+union bdk_usbhx_uahc_guid
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_guid_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t userid : 32; /**< [ 31: 0](R/W) User ID. Application-programmable ID field. */
+#else /* Word 0 - Little Endian */
+ uint32_t userid : 32; /**< [ 31: 0](R/W) User ID. Application-programmable ID field. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_guid_s cn; */
+};
+typedef union bdk_usbhx_uahc_guid bdk_usbhx_uahc_guid_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GUID(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GUID(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000c128ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_GUID", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GUID(a) bdk_usbhx_uahc_guid_t
+#define bustype_BDK_USBHX_UAHC_GUID(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GUID(a) "USBHX_UAHC_GUID"
+#define device_bar_BDK_USBHX_UAHC_GUID(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GUID(a) (a)
+#define arguments_BDK_USBHX_UAHC_GUID(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gusb2i2cctl#
+ *
+ * UAHC USB2 I2C Control Register
+ * This register is reserved for future use.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.5.2.
+ */
+union bdk_usbhx_uahc_gusb2i2cctlx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gusb2i2cctlx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gusb2i2cctlx_s cn; */
+};
+typedef union bdk_usbhx_uahc_gusb2i2cctlx bdk_usbhx_uahc_gusb2i2cctlx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GUSB2I2CCTLX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GUSB2I2CCTLX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x86800000c240ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_GUSB2I2CCTLX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GUSB2I2CCTLX(a,b) bdk_usbhx_uahc_gusb2i2cctlx_t
+#define bustype_BDK_USBHX_UAHC_GUSB2I2CCTLX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GUSB2I2CCTLX(a,b) "USBHX_UAHC_GUSB2I2CCTLX"
+#define device_bar_BDK_USBHX_UAHC_GUSB2I2CCTLX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GUSB2I2CCTLX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_GUSB2I2CCTLX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gusb2phycfg#
+ *
+ * UAHC USB2 PHY-Configuration Register
+ * This register is used to configure the core after power-on. It contains USB 2.0 and USB 2.0
+ * PHY-related configuration parameters. The application must program this register before
+ * starting any transactions on either the SoC bus or the USB. Per-port registers are
+ * implemented.
+ *
+ * Do not make changes to this register after the initial programming.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.5.1.
+ */
+union bdk_usbhx_uahc_gusb2phycfgx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gusb2phycfgx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) PHY soft reset. Causes the usb2phy_reset signal to be asserted to reset a UTMI PHY. */
+ uint32_t u2_freeclk_exists : 1; /**< [ 30: 30](R/W) Specifies whether your USB 2.0 PHY provides a free-running PHY clock, which is active when
+ the clock control input is active. If your USB 2.0 PHY provides a free-running PHY clock,
+ it must be connected to the utmi_clk[0] input. The remaining utmi_clk[n] must be connected
+ to the respective port clocks. The core uses the Port-0 clock for generating the internal
+ mac2 clock.
+ 0 = USB 2.0 free clock does not exist.
+ 1 = USB 2.0 free clock exists.
+
+ This field must be set to zero if you enable ITP generation based on the REF_CLK
+ counter, USBH()_UAHC_GCTL[SOFITPSYNC] = 1, or USBH()_UAHC_GFLADJ [GFLADJ_REFCLK_LPM_SEL] =
+ 1. */
+ uint32_t ulpi_lpm_with_opmode_chk : 1;/**< [ 29: 29](R/W) Support the LPM over ULPI without NOPID token to the ULPI PHY. Always 0x0. */
+ uint32_t hsic_con_width_adj : 2; /**< [ 28: 27](RO) This bit is used in the HSIC device mode of operation. Always 0x0 */
+ uint32_t inv_sel_hsic : 1; /**< [ 26: 26](RO) The application driver uses this bit to control the HSIC enable/disable function. */
+ uint32_t reserved_19_25 : 7;
+ uint32_t ulpiextvbusindicator : 1; /**< [ 18: 18](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusdrv : 1; /**< [ 17: 17](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiclksusm : 1; /**< [ 16: 16](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiautores : 1; /**< [ 15: 15](R/W) Reserved (unused in this configuration). */
+ uint32_t reserved_14 : 1;
+ uint32_t usbtrdtim : 4; /**< [ 13: 10](R/W) USB 2.0 turnaround time. Sets the turnaround time in PHY clock cycles. Specifies the
+ response time for a MAC request to the packet FIFO controller (PFC) to fetch data from the
+ DFIFO (SPRAM).
+ USB turnaround time is a critical certification criteria when using long cables and five
+ hub levels.
+ When the MAC interface is 8-bit UTMI+/ULPI, the required values for this field is 0x9. */
+ uint32_t reserved_9 : 1;
+ uint32_t enblslpm : 1; /**< [ 8: 8](R/W) Enable utmi_sleep_n and utmi_l1_suspend_n. The application uses this field to control
+ utmi_sleep_n and utmi_l1_suspend_n assertion to the PHY in the L1 state.
+ 0 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is not transferred to the
+ external PHY.
+ 1 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is transferred to the
+ external PHY.
+
+ When hardware LPM is enabled, this bit should be set high for Port0. */
+ uint32_t physel : 1; /**< [ 7: 7](WO) USB 2.0 high-speed PHY or USB 1.1 full-speed serial transceiver select. */
+ uint32_t susphy : 1; /**< [ 6: 6](R/W) Suspend USB2.0 high-speed/full-speed/low-speed PHY. When set, USB2.0 PHY enters suspend
+ mode if suspend conditions are valid. */
+ uint32_t fsintf : 1; /**< [ 5: 5](RO) Full-speed serial-interface select. Always reads as 0x0. */
+ uint32_t ulpi_utmi_sel : 1; /**< [ 4: 4](RO) ULPI or UTMI+ select. Always reads as 0x0, indicating UTMI+. */
+ uint32_t phyif : 1; /**< [ 3: 3](R/W) PHY interface width: 1 = 16-bit, 0 = 8-bit.
+ All the enabled 2.0 ports should have the same clock frequency as Port0 clock frequency
+ (utmi_clk[0]).
+ The UTMI 8-bit and 16-bit modes cannot be used together for different ports at the same
+ time (i.e., all the ports should be in 8-bit mode, or all of them should be in 16-bit
+ mode). */
+ uint32_t toutcal : 3; /**< [ 2: 0](R/W) High-speed/full-speed timeout calibration.
+ The number of PHY clock cycles, as indicated by the application in this field, is
+ multiplied by a bit-time factor; this factor is added to the high-speed/full-speed
+ interpacket timeout duration in the core to account for additional delays introduced by
+ the PHY. This might be required, since the delay introduced by the PHY in generating the
+ linestate condition can vary among PHYs.
+
+ The USB standard timeout value for high-speed operation is 736 to 816 (inclusive) bit
+ times. The USB standard timeout value for full-speed operation is 16 to 18 (inclusive) bit
+ times. The application must program this field based on the speed of connection.
+
+ The number of bit times added per PHY clock are:
+ * High-speed operation:
+ _ one 30-MHz PHY clock = 16 bit times.
+ _ one 60-MHz PHY clock = 8 bit times.
+
+ * Full-speed operation:
+ _ one 30-MHz PHY clock = 0.4 bit times.
+ _ one 60-MHz PHY clock = 0.2 bit times.
+ _ one 48-MHz PHY clock = 0.25 bit times. */
+#else /* Word 0 - Little Endian */
+ uint32_t toutcal : 3; /**< [ 2: 0](R/W) High-speed/full-speed timeout calibration.
+ The number of PHY clock cycles, as indicated by the application in this field, is
+ multiplied by a bit-time factor; this factor is added to the high-speed/full-speed
+ interpacket timeout duration in the core to account for additional delays introduced by
+ the PHY. This might be required, since the delay introduced by the PHY in generating the
+ linestate condition can vary among PHYs.
+
+ The USB standard timeout value for high-speed operation is 736 to 816 (inclusive) bit
+ times. The USB standard timeout value for full-speed operation is 16 to 18 (inclusive) bit
+ times. The application must program this field based on the speed of connection.
+
+ The number of bit times added per PHY clock are:
+ * High-speed operation:
+ _ one 30-MHz PHY clock = 16 bit times.
+ _ one 60-MHz PHY clock = 8 bit times.
+
+ * Full-speed operation:
+ _ one 30-MHz PHY clock = 0.4 bit times.
+ _ one 60-MHz PHY clock = 0.2 bit times.
+ _ one 48-MHz PHY clock = 0.25 bit times. */
+ uint32_t phyif : 1; /**< [ 3: 3](R/W) PHY interface width: 1 = 16-bit, 0 = 8-bit.
+ All the enabled 2.0 ports should have the same clock frequency as Port0 clock frequency
+ (utmi_clk[0]).
+ The UTMI 8-bit and 16-bit modes cannot be used together for different ports at the same
+ time (i.e., all the ports should be in 8-bit mode, or all of them should be in 16-bit
+ mode). */
+ uint32_t ulpi_utmi_sel : 1; /**< [ 4: 4](RO) ULPI or UTMI+ select. Always reads as 0x0, indicating UTMI+. */
+ uint32_t fsintf : 1; /**< [ 5: 5](RO) Full-speed serial-interface select. Always reads as 0x0. */
+ uint32_t susphy : 1; /**< [ 6: 6](R/W) Suspend USB2.0 high-speed/full-speed/low-speed PHY. When set, USB2.0 PHY enters suspend
+ mode if suspend conditions are valid. */
+ uint32_t physel : 1; /**< [ 7: 7](WO) USB 2.0 high-speed PHY or USB 1.1 full-speed serial transceiver select. */
+ uint32_t enblslpm : 1; /**< [ 8: 8](R/W) Enable utmi_sleep_n and utmi_l1_suspend_n. The application uses this field to control
+ utmi_sleep_n and utmi_l1_suspend_n assertion to the PHY in the L1 state.
+ 0 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is not transferred to the
+ external PHY.
+ 1 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is transferred to the
+ external PHY.
+
+ When hardware LPM is enabled, this bit should be set high for Port0. */
+ uint32_t reserved_9 : 1;
+ uint32_t usbtrdtim : 4; /**< [ 13: 10](R/W) USB 2.0 turnaround time. Sets the turnaround time in PHY clock cycles. Specifies the
+ response time for a MAC request to the packet FIFO controller (PFC) to fetch data from the
+ DFIFO (SPRAM).
+ USB turnaround time is a critical certification criteria when using long cables and five
+ hub levels.
+ When the MAC interface is 8-bit UTMI+/ULPI, the required values for this field is 0x9. */
+ uint32_t reserved_14 : 1;
+ uint32_t ulpiautores : 1; /**< [ 15: 15](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiclksusm : 1; /**< [ 16: 16](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusdrv : 1; /**< [ 17: 17](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusindicator : 1; /**< [ 18: 18](R/W) Reserved (unused in this configuration). */
+ uint32_t reserved_19_25 : 7;
+ uint32_t inv_sel_hsic : 1; /**< [ 26: 26](RO) The application driver uses this bit to control the HSIC enable/disable function. */
+ uint32_t hsic_con_width_adj : 2; /**< [ 28: 27](RO) This bit is used in the HSIC device mode of operation. Always 0x0 */
+ uint32_t ulpi_lpm_with_opmode_chk : 1;/**< [ 29: 29](R/W) Support the LPM over ULPI without NOPID token to the ULPI PHY. Always 0x0. */
+ uint32_t u2_freeclk_exists : 1; /**< [ 30: 30](R/W) Specifies whether your USB 2.0 PHY provides a free-running PHY clock, which is active when
+ the clock control input is active. If your USB 2.0 PHY provides a free-running PHY clock,
+ it must be connected to the utmi_clk[0] input. The remaining utmi_clk[n] must be connected
+ to the respective port clocks. The core uses the Port-0 clock for generating the internal
+ mac2 clock.
+ 0 = USB 2.0 free clock does not exist.
+ 1 = USB 2.0 free clock exists.
+
+ This field must be set to zero if you enable ITP generation based on the REF_CLK
+ counter, USBH()_UAHC_GCTL[SOFITPSYNC] = 1, or USBH()_UAHC_GFLADJ [GFLADJ_REFCLK_LPM_SEL] =
+ 1. */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) PHY soft reset. Causes the usb2phy_reset signal to be asserted to reset a UTMI PHY. */
+#endif /* Word 0 - End */
+ } s;
+ struct bdk_usbhx_uahc_gusb2phycfgx_cn
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) PHY soft reset. Causes the usb2phy_reset signal to be asserted to reset a UTMI PHY. */
+ uint32_t u2_freeclk_exists : 1; /**< [ 30: 30](R/W) Specifies whether your USB 2.0 PHY provides a free-running PHY clock, which is active when
+ the clock control input is active. If your USB 2.0 PHY provides a free-running PHY clock,
+ it must be connected to the utmi_clk[0] input. The remaining utmi_clk[n] must be connected
+ to the respective port clocks. The core uses the Port-0 clock for generating the internal
+ mac2 clock.
+ 0 = USB 2.0 free clock does not exist.
+ 1 = USB 2.0 free clock exists.
+
+ This field must be set to zero if you enable ITP generation based on the REF_CLK
+ counter, USBH()_UAHC_GCTL[SOFITPSYNC] = 1, or USBH()_UAHC_GFLADJ [GFLADJ_REFCLK_LPM_SEL] =
+ 1. */
+ uint32_t ulpi_lpm_with_opmode_chk : 1;/**< [ 29: 29](R/W) Support the LPM over ULPI without NOPID token to the ULPI PHY. Always 0x0. */
+ uint32_t hsic_con_width_adj : 2; /**< [ 28: 27](RO) This bit is used in the HSIC device mode of operation. Always 0x0 */
+ uint32_t inv_sel_hsic : 1; /**< [ 26: 26](RO) The application driver uses this bit to control the HSIC enable/disable function. */
+ uint32_t reserved_25 : 1;
+ uint32_t reserved_19_24 : 6;
+ uint32_t ulpiextvbusindicator : 1; /**< [ 18: 18](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusdrv : 1; /**< [ 17: 17](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiclksusm : 1; /**< [ 16: 16](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiautores : 1; /**< [ 15: 15](R/W) Reserved (unused in this configuration). */
+ uint32_t reserved_14 : 1;
+ uint32_t usbtrdtim : 4; /**< [ 13: 10](R/W) USB 2.0 turnaround time. Sets the turnaround time in PHY clock cycles. Specifies the
+ response time for a MAC request to the packet FIFO controller (PFC) to fetch data from the
+ DFIFO (SPRAM).
+ USB turnaround time is a critical certification criteria when using long cables and five
+ hub levels.
+ When the MAC interface is 8-bit UTMI+/ULPI, the required values for this field is 0x9. */
+ uint32_t reserved_9 : 1;
+ uint32_t enblslpm : 1; /**< [ 8: 8](R/W) Enable utmi_sleep_n and utmi_l1_suspend_n. The application uses this field to control
+ utmi_sleep_n and utmi_l1_suspend_n assertion to the PHY in the L1 state.
+ 0 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is not transferred to the
+ external PHY.
+ 1 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is transferred to the
+ external PHY.
+
+ When hardware LPM is enabled, this bit should be set high for Port0. */
+ uint32_t physel : 1; /**< [ 7: 7](WO) USB 2.0 high-speed PHY or USB 1.1 full-speed serial transceiver select. */
+ uint32_t susphy : 1; /**< [ 6: 6](R/W) Suspend USB2.0 high-speed/full-speed/low-speed PHY. When set, USB2.0 PHY enters suspend
+ mode if suspend conditions are valid. */
+ uint32_t fsintf : 1; /**< [ 5: 5](RO) Full-speed serial-interface select. Always reads as 0x0. */
+ uint32_t ulpi_utmi_sel : 1; /**< [ 4: 4](RO) ULPI or UTMI+ select. Always reads as 0x0, indicating UTMI+. */
+ uint32_t phyif : 1; /**< [ 3: 3](R/W) PHY interface width: 1 = 16-bit, 0 = 8-bit.
+ All the enabled 2.0 ports should have the same clock frequency as Port0 clock frequency
+ (utmi_clk[0]).
+ The UTMI 8-bit and 16-bit modes cannot be used together for different ports at the same
+ time (i.e., all the ports should be in 8-bit mode, or all of them should be in 16-bit
+ mode). */
+ uint32_t toutcal : 3; /**< [ 2: 0](R/W) High-speed/full-speed timeout calibration.
+ The number of PHY clock cycles, as indicated by the application in this field, is
+ multiplied by a bit-time factor; this factor is added to the high-speed/full-speed
+ interpacket timeout duration in the core to account for additional delays introduced by
+ the PHY. This might be required, since the delay introduced by the PHY in generating the
+ linestate condition can vary among PHYs.
+
+ The USB standard timeout value for high-speed operation is 736 to 816 (inclusive) bit
+ times. The USB standard timeout value for full-speed operation is 16 to 18 (inclusive) bit
+ times. The application must program this field based on the speed of connection.
+
+ The number of bit times added per PHY clock are:
+ * High-speed operation:
+ _ one 30-MHz PHY clock = 16 bit times.
+ _ one 60-MHz PHY clock = 8 bit times.
+
+ * Full-speed operation:
+ _ one 30-MHz PHY clock = 0.4 bit times.
+ _ one 60-MHz PHY clock = 0.2 bit times.
+ _ one 48-MHz PHY clock = 0.25 bit times. */
+#else /* Word 0 - Little Endian */
+ uint32_t toutcal : 3; /**< [ 2: 0](R/W) High-speed/full-speed timeout calibration.
+ The number of PHY clock cycles, as indicated by the application in this field, is
+ multiplied by a bit-time factor; this factor is added to the high-speed/full-speed
+ interpacket timeout duration in the core to account for additional delays introduced by
+ the PHY. This might be required, since the delay introduced by the PHY in generating the
+ linestate condition can vary among PHYs.
+
+ The USB standard timeout value for high-speed operation is 736 to 816 (inclusive) bit
+ times. The USB standard timeout value for full-speed operation is 16 to 18 (inclusive) bit
+ times. The application must program this field based on the speed of connection.
+
+ The number of bit times added per PHY clock are:
+ * High-speed operation:
+ _ one 30-MHz PHY clock = 16 bit times.
+ _ one 60-MHz PHY clock = 8 bit times.
+
+ * Full-speed operation:
+ _ one 30-MHz PHY clock = 0.4 bit times.
+ _ one 60-MHz PHY clock = 0.2 bit times.
+ _ one 48-MHz PHY clock = 0.25 bit times. */
+ uint32_t phyif : 1; /**< [ 3: 3](R/W) PHY interface width: 1 = 16-bit, 0 = 8-bit.
+ All the enabled 2.0 ports should have the same clock frequency as Port0 clock frequency
+ (utmi_clk[0]).
+ The UTMI 8-bit and 16-bit modes cannot be used together for different ports at the same
+ time (i.e., all the ports should be in 8-bit mode, or all of them should be in 16-bit
+ mode). */
+ uint32_t ulpi_utmi_sel : 1; /**< [ 4: 4](RO) ULPI or UTMI+ select. Always reads as 0x0, indicating UTMI+. */
+ uint32_t fsintf : 1; /**< [ 5: 5](RO) Full-speed serial-interface select. Always reads as 0x0. */
+ uint32_t susphy : 1; /**< [ 6: 6](R/W) Suspend USB2.0 high-speed/full-speed/low-speed PHY. When set, USB2.0 PHY enters suspend
+ mode if suspend conditions are valid. */
+ uint32_t physel : 1; /**< [ 7: 7](WO) USB 2.0 high-speed PHY or USB 1.1 full-speed serial transceiver select. */
+ uint32_t enblslpm : 1; /**< [ 8: 8](R/W) Enable utmi_sleep_n and utmi_l1_suspend_n. The application uses this field to control
+ utmi_sleep_n and utmi_l1_suspend_n assertion to the PHY in the L1 state.
+ 0 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is not transferred to the
+ external PHY.
+ 1 = utmi_sleep_n and utmi_l1_suspend_n assertion from the core is transferred to the
+ external PHY.
+
+ When hardware LPM is enabled, this bit should be set high for Port0. */
+ uint32_t reserved_9 : 1;
+ uint32_t usbtrdtim : 4; /**< [ 13: 10](R/W) USB 2.0 turnaround time. Sets the turnaround time in PHY clock cycles. Specifies the
+ response time for a MAC request to the packet FIFO controller (PFC) to fetch data from the
+ DFIFO (SPRAM).
+ USB turnaround time is a critical certification criteria when using long cables and five
+ hub levels.
+ When the MAC interface is 8-bit UTMI+/ULPI, the required values for this field is 0x9. */
+ uint32_t reserved_14 : 1;
+ uint32_t ulpiautores : 1; /**< [ 15: 15](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiclksusm : 1; /**< [ 16: 16](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusdrv : 1; /**< [ 17: 17](R/W) Reserved (unused in this configuration). */
+ uint32_t ulpiextvbusindicator : 1; /**< [ 18: 18](R/W) Reserved (unused in this configuration). */
+ uint32_t reserved_19_24 : 6;
+ uint32_t reserved_25 : 1;
+ uint32_t inv_sel_hsic : 1; /**< [ 26: 26](RO) The application driver uses this bit to control the HSIC enable/disable function. */
+ uint32_t hsic_con_width_adj : 2; /**< [ 28: 27](RO) This bit is used in the HSIC device mode of operation. Always 0x0 */
+ uint32_t ulpi_lpm_with_opmode_chk : 1;/**< [ 29: 29](R/W) Support the LPM over ULPI without NOPID token to the ULPI PHY. Always 0x0. */
+ uint32_t u2_freeclk_exists : 1; /**< [ 30: 30](R/W) Specifies whether your USB 2.0 PHY provides a free-running PHY clock, which is active when
+ the clock control input is active. If your USB 2.0 PHY provides a free-running PHY clock,
+ it must be connected to the utmi_clk[0] input. The remaining utmi_clk[n] must be connected
+ to the respective port clocks. The core uses the Port-0 clock for generating the internal
+ mac2 clock.
+ 0 = USB 2.0 free clock does not exist.
+ 1 = USB 2.0 free clock exists.
+
+ This field must be set to zero if you enable ITP generation based on the REF_CLK
+ counter, USBH()_UAHC_GCTL[SOFITPSYNC] = 1, or USBH()_UAHC_GFLADJ [GFLADJ_REFCLK_LPM_SEL] =
+ 1. */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) PHY soft reset. Causes the usb2phy_reset signal to be asserted to reset a UTMI PHY. */
+#endif /* Word 0 - End */
+ } cn;
+};
+typedef union bdk_usbhx_uahc_gusb2phycfgx bdk_usbhx_uahc_gusb2phycfgx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GUSB2PHYCFGX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GUSB2PHYCFGX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x86800000c200ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_GUSB2PHYCFGX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GUSB2PHYCFGX(a,b) bdk_usbhx_uahc_gusb2phycfgx_t
+#define bustype_BDK_USBHX_UAHC_GUSB2PHYCFGX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GUSB2PHYCFGX(a,b) "USBHX_UAHC_GUSB2PHYCFGX"
+#define device_bar_BDK_USBHX_UAHC_GUSB2PHYCFGX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GUSB2PHYCFGX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_GUSB2PHYCFGX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_gusb3pipectl#
+ *
+ * UAHC USB3 Pipe-Control Register
+ * This register is used to configure the core after power-on. It contains USB 3.0 and USB 3.0
+ * PHY-related configuration parameters. The application must program this register before
+ * starting any transactions on either the SoC bus or the USB. Per-port registers are
+ * implemented.
+ *
+ * Do not make changes to this register after the initial programming.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UAHC_RST].
+ *
+ * Internal:
+ * See Synopsys DWC_usb3 Databook v2.20a, section 6.2.5.4.
+ */
+union bdk_usbhx_uahc_gusb3pipectlx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_gusb3pipectlx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) USB3 PHY soft reset (PHYSoftRst). When set to 1, initiates a PHY soft reset. After setting
+ this bit to 1, the software needs to clear this bit. */
+ uint32_t hstprtcmpl : 1; /**< [ 30: 30](R/W) Host port compliance. Setting this bit to 1 enables placing the SuperSpeed port link into
+ a compliance state, which allows testing of the PIPE PHY compliance patterns without
+ having to have a test fixture on the USB 3.0 cable. By default, this bit should be set to
+ 0.
+
+ In compliance-lab testing, the SuperSpeed port link enters compliance after failing the
+ first polling sequence after power on. Set this bit to 0 when you run compliance tests.
+
+ The sequence for using this functionality is as follows:
+ * Disconnect any plugged-in devices.
+ * Set USBH()_UAHC_USBCMD[HCRST] = 1 or power-on-chip reset.
+ * Set USBH()_UAHC_PORTSC()[PP] = 0.
+ * Set HSTPRTCMPL = 1. This places the link into compliance state.
+
+ To advance the compliance pattern, follow this sequence (toggle HSTPRTCMPL):
+ * Set HSTPRTCMPL = 0.
+ * Set HSTPRTCMPL = 1. This advances the link to the next compliance pattern.
+
+ To exit from the compliance state, set USBH()_UAHC_USBCMD[HCRST] = 1 or power-on-chip
+ reset. */
+ uint32_t u2ssinactp3ok : 1; /**< [ 29: 29](R/W) P3 OK for U2/SS.Inactive:
+ 0 = During link state U2/SS.Inactive, put PHY in P2 (default).
+ 1 = During link state U2/SS.Inactive, put PHY in P3. */
+ uint32_t disrxdetp3 : 1; /**< [ 28: 28](R/W) Disables receiver detection in P3. If PHY is in P3 and the core needs to perform receiver
+ detection:
+ 0 = Core performs receiver detection in P3 (default).
+ 1 = Core changes the PHY power state to P2 and then performs receiver detection. After
+ receiver detection, core changes PHY power state to P3. */
+ uint32_t ux_exit_in_px : 1; /**< [ 27: 27](R/W) UX exit in Px:
+ 0 = Core does U1/U2/U3 exit in PHY power state P0 (default behavior).
+ 1 = Core does U1/U2/U3 exit in PHY power state P1/P2/P3 respectively.
+
+ This bit is added for SuperSpeed PHY workaround where SuperSpeed PHY injects a glitch on
+ pipe3_RxElecIdle while receiving Ux exit LFPS, and pipe3_PowerDown change is in progress.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t ping_enchance_en : 1; /**< [ 26: 26](R/W) Ping enhancement enable. When set to 1, the downstream-port U1-ping-receive timeout
+ becomes 500 ms instead of 300 ms. Minimum Ping.LFPS receive duration is 8 ns (one mac3_clk
+ cycle). This field is valid for the downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t u1u2exitfail_to_recov : 1; /**< [ 25: 25](R/W) U1U2exit fail to recovery. When set to 1, and U1/U2 LFPS handshake fails, the LTSSM
+ transitions from U1/U2 to recovery instead of SS.inactive.
+ If recovery fails, then the LTSSM can enter SS.Inactive. This is an enhancement only. It
+ prevents interoperability issue if the remote link does not do the proper handshake. */
+ uint32_t request_p1p2p3 : 1; /**< [ 24: 24](R/W) Always request P1/P2/P3 for U1/U2/U3.
+ 0 = if immediate Ux exit (remotely initiated, or locally initiated) happens, the core does
+ not request P1/P2/P3 power state change.
+ 1 = the core always requests PHY power change from P0 to P1/P2/P3 during U0 to U1/U2/U3
+ transition.
+
+ Internal:
+ Note: This bit should be set to 1 for Synopsys PHY. For third-party SuperSpeed
+ PHY, check with your PHY vendor. */
+ uint32_t startrxdetu3rxdet : 1; /**< [ 23: 23](WO) Start receiver detection in U3/Rx.Detect.
+ If DISRXDETU3RXDET is set to 1 during reset, and the link is in U3 or Rx.Detect state, the
+ core starts receiver detection on rising edge of this bit.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t disrxdetu3rxdet : 1; /**< [ 22: 22](R/W) Disable receiver detection in U3/Rx.Detect. When set to 1, the core does not do receiver
+ detection in U3 or Rx.Detect state. If STARTRXDETU3RXDET is set to 1 during reset,
+ receiver detection starts manually.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t delaypx : 3; /**< [ 21: 19](R/W) Delay P1P2P3. Delay P0 to P1/P2/P3 request when entering U1/U2/U3 until (DELAYPX * 8)
+ 8B10B error occurs, or Pipe3_RxValid drops to 0.
+ DELAYPXTRANSENTERUX must reset to 1 to enable this functionality.
+
+ Internal:
+ Should always be 0x1 for a Synopsys PHY. */
+ uint32_t delaypxtransenterux : 1; /**< [ 18: 18](R/W) Delay PHY power change from P0 to P1/P2/P3 when link state changing from U0 to U1/U2/U3
+ respectively.
+ 0 = when entering U1/U2/U3, transition to P1/P2/P3 without checking for Pipe3_RxElecIlde
+ and pipe3_RxValid.
+ 1 = when entering U1/U2/U3, delay the transition to P1/P2/P3 until the pipe3 signals,
+ Pipe3_RxElecIlde is 1 and pipe3_RxValid is 0.
+
+ Internal:
+ Note: This bit should be set to '1' for Synopsys PHY. It is also used by third-
+ party SuperSpeed PHY. */
+ uint32_t suspend_en : 1; /**< [ 17: 17](R/W) Suspend USB3.0 SuperSpeed PHY (Suspend_en). When set to 1, and if suspend conditions are
+ valid, the USB 3.0 PHY enters suspend mode. */
+ uint32_t datwidth : 2; /**< [ 16: 15](RO) PIPE data width.
+ 0x0 = 32 bits.
+ 0x1 = 16 bits.
+ 0x2 = 8 bits.
+ 0x3 = reserved.
+
+ One clock cycle after reset, these bits receive the value seen on the pipe3_DataBusWidth.
+ This will always be 0x0.
+
+ Internal:
+ The simulation testbench uses the coreConsultant parameter to configure the VIP.
+ INTERNAL: These bits in the coreConsultant parameter should match your PHY data width and
+ the pipe3_DataBusWidth port. */
+ uint32_t abortrxdetinu2 : 1; /**< [ 14: 14](R/W) Abort RX Detect in U2. When set to 1, and the link state is U2, the core aborts receiver
+ detection if it receives U2 exit LFPS from the remote link partner.
+
+ This bit is for downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t skiprxdet : 1; /**< [ 13: 13](R/W) Skip RX detect. When set to 1, the core skips RX detection if pipe3_RxElecIdle is low.
+ Skip is defined as waiting for the appropriate timeout, then repeating the operation. */
+ uint32_t lfpsp0algn : 1; /**< [ 12: 12](R/W) LFPS P0 align. When set to 1:
+ * the core deasserts LFPS transmission on the clock edge that it requests Phy power state
+ 0 when exiting U1, U2, or U3 low power states. Otherwise, LFPS transmission is asserted
+ one clock earlier.
+ * the core requests symbol transmission two pipe3_rx_pclks periods after the PHY asserts
+ PhyStatus as a result of the PHY switching from P1 or P2 state to P0 state.
+ For USB 3.0 host, this is not required. */
+ uint32_t p3p2tranok : 1; /**< [ 11: 11](R/W) P3 P2 transitions OK.
+ 0 = P0 is always entered as an intermediate state during transitions between P2 and P3, as
+ defined in the PIPE3 specification.
+ 1 = the core transitions directly from Phy power state P2 to P3 or from state P3 to P2.
+
+ According to PIPE3 specification, any direct transition between P3 and P2 is illegal.
+
+ Internal:
+ This bit is used only for some non-Synopsys PHYs that cannot do LFPS in P3.
+ INTERNAL: Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t p3exsigp2 : 1; /**< [ 10: 10](R/W) P3 exit signal in P2. When set to 1, the core always changes the PHY power state to P2,
+ before attempting a U3 exit handshake.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t lfpsfilt : 1; /**< [ 9: 9](R/W) LFPS filter. When set to 1, filter LFPS reception with pipe3_RxValid in PHY power state
+ P0, ignore LFPS reception from the PHY unless both pipe3_Rxelecidle and pipe3_RxValid are
+ deasserted. */
+ uint32_t rxdet2polllfpsctrl : 1; /**< [ 8: 8](R/W) RX_DETECT to Polling.
+ 0 = Enables a 400 us delay to start polling LFPS after RX_DETECT. This allows VCM offset
+ to settle to a proper level.
+ 1 = Disables the 400 us delay to start polling LFPS after RX_DETECT. */
+ uint32_t ssicen : 1; /**< [ 7: 7](R/W) SSIC is not supported. This bit must be set to 0. */
+ uint32_t txswing : 1; /**< [ 6: 6](R/W) TX swing. Refer to the PIPE3 specification. */
+ uint32_t txmargin : 3; /**< [ 5: 3](R/W) TX margin. Refer to the PIPE3 specification, table 5-3. */
+ uint32_t txdeemphasis : 2; /**< [ 2: 1](R/W) TX de-emphasis. The value driven to the PHY is controlled by the LTSSM during USB3
+ compliance mode. Refer to the PIPE3 specification, table 5-3.
+
+ Use the following values for the appropriate level of de-emphasis (From pipe3 spec):
+ 0x0 = -6 dB de-emphasis, use USBH()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_6DB].
+ 0x1 = -3.5 dB de-emphasis, use USBH()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_3P5DB].
+ 0x2 = No de-emphasis.
+ 0x3 = Reserved. */
+ uint32_t elasticbuffermode : 1; /**< [ 0: 0](R/W) Elastic buffer mode. Refer to the PIPE3 specification, table 5-3. */
+#else /* Word 0 - Little Endian */
+ uint32_t elasticbuffermode : 1; /**< [ 0: 0](R/W) Elastic buffer mode. Refer to the PIPE3 specification, table 5-3. */
+ uint32_t txdeemphasis : 2; /**< [ 2: 1](R/W) TX de-emphasis. The value driven to the PHY is controlled by the LTSSM during USB3
+ compliance mode. Refer to the PIPE3 specification, table 5-3.
+
+ Use the following values for the appropriate level of de-emphasis (From pipe3 spec):
+ 0x0 = -6 dB de-emphasis, use USBH()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_6DB].
+ 0x1 = -3.5 dB de-emphasis, use USBH()_UCTL_PORT()_CFG_SS[PCS_TX_DEEMPH_3P5DB].
+ 0x2 = No de-emphasis.
+ 0x3 = Reserved. */
+ uint32_t txmargin : 3; /**< [ 5: 3](R/W) TX margin. Refer to the PIPE3 specification, table 5-3. */
+ uint32_t txswing : 1; /**< [ 6: 6](R/W) TX swing. Refer to the PIPE3 specification. */
+ uint32_t ssicen : 1; /**< [ 7: 7](R/W) SSIC is not supported. This bit must be set to 0. */
+ uint32_t rxdet2polllfpsctrl : 1; /**< [ 8: 8](R/W) RX_DETECT to Polling.
+ 0 = Enables a 400 us delay to start polling LFPS after RX_DETECT. This allows VCM offset
+ to settle to a proper level.
+ 1 = Disables the 400 us delay to start polling LFPS after RX_DETECT. */
+ uint32_t lfpsfilt : 1; /**< [ 9: 9](R/W) LFPS filter. When set to 1, filter LFPS reception with pipe3_RxValid in PHY power state
+ P0, ignore LFPS reception from the PHY unless both pipe3_Rxelecidle and pipe3_RxValid are
+ deasserted. */
+ uint32_t p3exsigp2 : 1; /**< [ 10: 10](R/W) P3 exit signal in P2. When set to 1, the core always changes the PHY power state to P2,
+ before attempting a U3 exit handshake.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t p3p2tranok : 1; /**< [ 11: 11](R/W) P3 P2 transitions OK.
+ 0 = P0 is always entered as an intermediate state during transitions between P2 and P3, as
+ defined in the PIPE3 specification.
+ 1 = the core transitions directly from Phy power state P2 to P3 or from state P3 to P2.
+
+ According to PIPE3 specification, any direct transition between P3 and P2 is illegal.
+
+ Internal:
+ This bit is used only for some non-Synopsys PHYs that cannot do LFPS in P3.
+ INTERNAL: Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t lfpsp0algn : 1; /**< [ 12: 12](R/W) LFPS P0 align. When set to 1:
+ * the core deasserts LFPS transmission on the clock edge that it requests Phy power state
+ 0 when exiting U1, U2, or U3 low power states. Otherwise, LFPS transmission is asserted
+ one clock earlier.
+ * the core requests symbol transmission two pipe3_rx_pclks periods after the PHY asserts
+ PhyStatus as a result of the PHY switching from P1 or P2 state to P0 state.
+ For USB 3.0 host, this is not required. */
+ uint32_t skiprxdet : 1; /**< [ 13: 13](R/W) Skip RX detect. When set to 1, the core skips RX detection if pipe3_RxElecIdle is low.
+ Skip is defined as waiting for the appropriate timeout, then repeating the operation. */
+ uint32_t abortrxdetinu2 : 1; /**< [ 14: 14](R/W) Abort RX Detect in U2. When set to 1, and the link state is U2, the core aborts receiver
+ detection if it receives U2 exit LFPS from the remote link partner.
+
+ This bit is for downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t datwidth : 2; /**< [ 16: 15](RO) PIPE data width.
+ 0x0 = 32 bits.
+ 0x1 = 16 bits.
+ 0x2 = 8 bits.
+ 0x3 = reserved.
+
+ One clock cycle after reset, these bits receive the value seen on the pipe3_DataBusWidth.
+ This will always be 0x0.
+
+ Internal:
+ The simulation testbench uses the coreConsultant parameter to configure the VIP.
+ INTERNAL: These bits in the coreConsultant parameter should match your PHY data width and
+ the pipe3_DataBusWidth port. */
+ uint32_t suspend_en : 1; /**< [ 17: 17](R/W) Suspend USB3.0 SuperSpeed PHY (Suspend_en). When set to 1, and if suspend conditions are
+ valid, the USB 3.0 PHY enters suspend mode. */
+ uint32_t delaypxtransenterux : 1; /**< [ 18: 18](R/W) Delay PHY power change from P0 to P1/P2/P3 when link state changing from U0 to U1/U2/U3
+ respectively.
+ 0 = when entering U1/U2/U3, transition to P1/P2/P3 without checking for Pipe3_RxElecIlde
+ and pipe3_RxValid.
+ 1 = when entering U1/U2/U3, delay the transition to P1/P2/P3 until the pipe3 signals,
+ Pipe3_RxElecIlde is 1 and pipe3_RxValid is 0.
+
+ Internal:
+ Note: This bit should be set to '1' for Synopsys PHY. It is also used by third-
+ party SuperSpeed PHY. */
+ uint32_t delaypx : 3; /**< [ 21: 19](R/W) Delay P1P2P3. Delay P0 to P1/P2/P3 request when entering U1/U2/U3 until (DELAYPX * 8)
+ 8B10B error occurs, or Pipe3_RxValid drops to 0.
+ DELAYPXTRANSENTERUX must reset to 1 to enable this functionality.
+
+ Internal:
+ Should always be 0x1 for a Synopsys PHY. */
+ uint32_t disrxdetu3rxdet : 1; /**< [ 22: 22](R/W) Disable receiver detection in U3/Rx.Detect. When set to 1, the core does not do receiver
+ detection in U3 or Rx.Detect state. If STARTRXDETU3RXDET is set to 1 during reset,
+ receiver detection starts manually.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t startrxdetu3rxdet : 1; /**< [ 23: 23](WO) Start receiver detection in U3/Rx.Detect.
+ If DISRXDETU3RXDET is set to 1 during reset, and the link is in U3 or Rx.Detect state, the
+ core starts receiver detection on rising edge of this bit.
+ This bit is valid for downstream ports only, and this feature must not be enabled for
+ normal operation.
+
+ Internal:
+ If have to use this feature, contact Synopsys. */
+ uint32_t request_p1p2p3 : 1; /**< [ 24: 24](R/W) Always request P1/P2/P3 for U1/U2/U3.
+ 0 = if immediate Ux exit (remotely initiated, or locally initiated) happens, the core does
+ not request P1/P2/P3 power state change.
+ 1 = the core always requests PHY power change from P0 to P1/P2/P3 during U0 to U1/U2/U3
+ transition.
+
+ Internal:
+ Note: This bit should be set to 1 for Synopsys PHY. For third-party SuperSpeed
+ PHY, check with your PHY vendor. */
+ uint32_t u1u2exitfail_to_recov : 1; /**< [ 25: 25](R/W) U1U2exit fail to recovery. When set to 1, and U1/U2 LFPS handshake fails, the LTSSM
+ transitions from U1/U2 to recovery instead of SS.inactive.
+ If recovery fails, then the LTSSM can enter SS.Inactive. This is an enhancement only. It
+ prevents interoperability issue if the remote link does not do the proper handshake. */
+ uint32_t ping_enchance_en : 1; /**< [ 26: 26](R/W) Ping enhancement enable. When set to 1, the downstream-port U1-ping-receive timeout
+ becomes 500 ms instead of 300 ms. Minimum Ping.LFPS receive duration is 8 ns (one mac3_clk
+ cycle). This field is valid for the downstream port only.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t ux_exit_in_px : 1; /**< [ 27: 27](R/W) UX exit in Px:
+ 0 = Core does U1/U2/U3 exit in PHY power state P0 (default behavior).
+ 1 = Core does U1/U2/U3 exit in PHY power state P1/P2/P3 respectively.
+
+ This bit is added for SuperSpeed PHY workaround where SuperSpeed PHY injects a glitch on
+ pipe3_RxElecIdle while receiving Ux exit LFPS, and pipe3_PowerDown change is in progress.
+
+ Internal:
+ Note: This bit is used by third-party SuperSpeed PHY. It should be set to 0 for
+ Synopsys PHY. */
+ uint32_t disrxdetp3 : 1; /**< [ 28: 28](R/W) Disables receiver detection in P3. If PHY is in P3 and the core needs to perform receiver
+ detection:
+ 0 = Core performs receiver detection in P3 (default).
+ 1 = Core changes the PHY power state to P2 and then performs receiver detection. After
+ receiver detection, core changes PHY power state to P3. */
+ uint32_t u2ssinactp3ok : 1; /**< [ 29: 29](R/W) P3 OK for U2/SS.Inactive:
+ 0 = During link state U2/SS.Inactive, put PHY in P2 (default).
+ 1 = During link state U2/SS.Inactive, put PHY in P3. */
+ uint32_t hstprtcmpl : 1; /**< [ 30: 30](R/W) Host port compliance. Setting this bit to 1 enables placing the SuperSpeed port link into
+ a compliance state, which allows testing of the PIPE PHY compliance patterns without
+ having to have a test fixture on the USB 3.0 cable. By default, this bit should be set to
+ 0.
+
+ In compliance-lab testing, the SuperSpeed port link enters compliance after failing the
+ first polling sequence after power on. Set this bit to 0 when you run compliance tests.
+
+ The sequence for using this functionality is as follows:
+ * Disconnect any plugged-in devices.
+ * Set USBH()_UAHC_USBCMD[HCRST] = 1 or power-on-chip reset.
+ * Set USBH()_UAHC_PORTSC()[PP] = 0.
+ * Set HSTPRTCMPL = 1. This places the link into compliance state.
+
+ To advance the compliance pattern, follow this sequence (toggle HSTPRTCMPL):
+ * Set HSTPRTCMPL = 0.
+ * Set HSTPRTCMPL = 1. This advances the link to the next compliance pattern.
+
+ To exit from the compliance state, set USBH()_UAHC_USBCMD[HCRST] = 1 or power-on-chip
+ reset. */
+ uint32_t physoftrst : 1; /**< [ 31: 31](R/W) USB3 PHY soft reset (PHYSoftRst). When set to 1, initiates a PHY soft reset. After setting
+ this bit to 1, the software needs to clear this bit. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_gusb3pipectlx_s cn; */
+};
+typedef union bdk_usbhx_uahc_gusb3pipectlx bdk_usbhx_uahc_gusb3pipectlx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_GUSB3PIPECTLX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_GUSB3PIPECTLX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x86800000c2c0ll + 0x1000000000ll * ((a) & 0x1) + 4ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_GUSB3PIPECTLX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_GUSB3PIPECTLX(a,b) bdk_usbhx_uahc_gusb3pipectlx_t
+#define bustype_BDK_USBHX_UAHC_GUSB3PIPECTLX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_GUSB3PIPECTLX(a,b) "USBHX_UAHC_GUSB3PIPECTLX"
+#define device_bar_BDK_USBHX_UAHC_GUSB3PIPECTLX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_GUSB3PIPECTLX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_GUSB3PIPECTLX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_hccparams
+ *
+ * XHCI Controller Capability Parameters Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.3.6.
+ */
+union bdk_usbhx_uahc_hccparams
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_hccparams_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t xecp : 16; /**< [ 31: 16](RO) xHCI extended capabilities pointer. */
+ uint32_t maxpsasize : 4; /**< [ 15: 12](RO) Maximum primary-stream-array size. */
+ uint32_t reserved_11 : 1;
+ uint32_t sec : 1; /**< [ 10: 10](RO/H) Stopped EDLTA Capability */
+ uint32_t spc : 1; /**< [ 9: 9](RO/H) Stopped - short packet Capability */
+ uint32_t pae : 1; /**< [ 8: 8](RO) Parse all event data. */
+ uint32_t nss : 1; /**< [ 7: 7](RO) No secondary SID support. */
+ uint32_t ltc : 1; /**< [ 6: 6](RO) Latency tolerance messaging capability. */
+ uint32_t lhrc : 1; /**< [ 5: 5](RO) Light HC reset capability. */
+ uint32_t pind : 1; /**< [ 4: 4](RO) Port indicators. */
+ uint32_t ppc : 1; /**< [ 3: 3](RO) Port power control. Value is based on USBH()_UCTL_HOST_CFG[PPC_EN]. */
+ uint32_t csz : 1; /**< [ 2: 2](RO) Context size. */
+ uint32_t bnc : 1; /**< [ 1: 1](RO) BW negotiation capability. */
+ uint32_t ac64 : 1; /**< [ 0: 0](RO) 64-bit addressing capability. */
+#else /* Word 0 - Little Endian */
+ uint32_t ac64 : 1; /**< [ 0: 0](RO) 64-bit addressing capability. */
+ uint32_t bnc : 1; /**< [ 1: 1](RO) BW negotiation capability. */
+ uint32_t csz : 1; /**< [ 2: 2](RO) Context size. */
+ uint32_t ppc : 1; /**< [ 3: 3](RO) Port power control. Value is based on USBH()_UCTL_HOST_CFG[PPC_EN]. */
+ uint32_t pind : 1; /**< [ 4: 4](RO) Port indicators. */
+ uint32_t lhrc : 1; /**< [ 5: 5](RO) Light HC reset capability. */
+ uint32_t ltc : 1; /**< [ 6: 6](RO) Latency tolerance messaging capability. */
+ uint32_t nss : 1; /**< [ 7: 7](RO) No secondary SID support. */
+ uint32_t pae : 1; /**< [ 8: 8](RO) Parse all event data. */
+ uint32_t spc : 1; /**< [ 9: 9](RO/H) Stopped - short packet Capability */
+ uint32_t sec : 1; /**< [ 10: 10](RO/H) Stopped EDLTA Capability */
+ uint32_t reserved_11 : 1;
+ uint32_t maxpsasize : 4; /**< [ 15: 12](RO) Maximum primary-stream-array size. */
+ uint32_t xecp : 16; /**< [ 31: 16](RO) xHCI extended capabilities pointer. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_hccparams_s cn; */
+};
+typedef union bdk_usbhx_uahc_hccparams bdk_usbhx_uahc_hccparams_t;
+
+static inline uint64_t BDK_USBHX_UAHC_HCCPARAMS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_HCCPARAMS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000010ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_HCCPARAMS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_HCCPARAMS(a) bdk_usbhx_uahc_hccparams_t
+#define bustype_BDK_USBHX_UAHC_HCCPARAMS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_HCCPARAMS(a) "USBHX_UAHC_HCCPARAMS"
+#define device_bar_BDK_USBHX_UAHC_HCCPARAMS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_HCCPARAMS(a) (a)
+#define arguments_BDK_USBHX_UAHC_HCCPARAMS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_hcsparams1
+ *
+ * XHCI Controller Structural Parameters Register 1
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.3.3.
+ */
+union bdk_usbhx_uahc_hcsparams1
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_hcsparams1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t maxports : 8; /**< [ 31: 24](RO) Maximum number of ports. */
+ uint32_t reserved_19_23 : 5;
+ uint32_t maxintrs : 11; /**< [ 18: 8](RO) Maximum number of interrupters. */
+ uint32_t maxslots : 8; /**< [ 7: 0](RO) Maximum number of device slots. */
+#else /* Word 0 - Little Endian */
+ uint32_t maxslots : 8; /**< [ 7: 0](RO) Maximum number of device slots. */
+ uint32_t maxintrs : 11; /**< [ 18: 8](RO) Maximum number of interrupters. */
+ uint32_t reserved_19_23 : 5;
+ uint32_t maxports : 8; /**< [ 31: 24](RO) Maximum number of ports. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_hcsparams1_s cn; */
+};
+typedef union bdk_usbhx_uahc_hcsparams1 bdk_usbhx_uahc_hcsparams1_t;
+
+static inline uint64_t BDK_USBHX_UAHC_HCSPARAMS1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_HCSPARAMS1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000004ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_HCSPARAMS1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_HCSPARAMS1(a) bdk_usbhx_uahc_hcsparams1_t
+#define bustype_BDK_USBHX_UAHC_HCSPARAMS1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_HCSPARAMS1(a) "USBHX_UAHC_HCSPARAMS1"
+#define device_bar_BDK_USBHX_UAHC_HCSPARAMS1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_HCSPARAMS1(a) (a)
+#define arguments_BDK_USBHX_UAHC_HCSPARAMS1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_hcsparams2
+ *
+ * XHCI Controller Structural Parameters Register 2
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.3.4.
+ */
+union bdk_usbhx_uahc_hcsparams2
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_hcsparams2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t maxscratchpadbufs_l : 5; /**< [ 31: 27](RO) Maximum number of scratchpad buffers[4:0]. */
+ uint32_t spr : 1; /**< [ 26: 26](RO) Scratchpad restore. */
+ uint32_t maxscratchpadbufs_h : 5; /**< [ 25: 21](RO) Maximum number of scratchpad buffers[9:5]. */
+ uint32_t reserved_8_20 : 13;
+ uint32_t erst_max : 4; /**< [ 7: 4](RO) Event ring segment table maximum. */
+ uint32_t ist : 4; /**< [ 3: 0](RO) Isochronous scheduling threshold. */
+#else /* Word 0 - Little Endian */
+ uint32_t ist : 4; /**< [ 3: 0](RO) Isochronous scheduling threshold. */
+ uint32_t erst_max : 4; /**< [ 7: 4](RO) Event ring segment table maximum. */
+ uint32_t reserved_8_20 : 13;
+ uint32_t maxscratchpadbufs_h : 5; /**< [ 25: 21](RO) Maximum number of scratchpad buffers[9:5]. */
+ uint32_t spr : 1; /**< [ 26: 26](RO) Scratchpad restore. */
+ uint32_t maxscratchpadbufs_l : 5; /**< [ 31: 27](RO) Maximum number of scratchpad buffers[4:0]. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_hcsparams2_s cn; */
+};
+typedef union bdk_usbhx_uahc_hcsparams2 bdk_usbhx_uahc_hcsparams2_t;
+
+static inline uint64_t BDK_USBHX_UAHC_HCSPARAMS2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_HCSPARAMS2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000008ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_HCSPARAMS2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_HCSPARAMS2(a) bdk_usbhx_uahc_hcsparams2_t
+#define bustype_BDK_USBHX_UAHC_HCSPARAMS2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_HCSPARAMS2(a) "USBHX_UAHC_HCSPARAMS2"
+#define device_bar_BDK_USBHX_UAHC_HCSPARAMS2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_HCSPARAMS2(a) (a)
+#define arguments_BDK_USBHX_UAHC_HCSPARAMS2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_hcsparams3
+ *
+ * XHCI Controller Structural Parameters Register 3
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.3.5.
+ */
+union bdk_usbhx_uahc_hcsparams3
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_hcsparams3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t u2_device_exit_latency : 16;/**< [ 31: 16](RO) U2 device exit latency. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t u1_device_exit_latency : 8; /**< [ 7: 0](RO) U1 device exit latency. */
+#else /* Word 0 - Little Endian */
+ uint32_t u1_device_exit_latency : 8; /**< [ 7: 0](RO) U1 device exit latency. */
+ uint32_t reserved_8_15 : 8;
+ uint32_t u2_device_exit_latency : 16;/**< [ 31: 16](RO) U2 device exit latency. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_hcsparams3_s cn; */
+};
+typedef union bdk_usbhx_uahc_hcsparams3 bdk_usbhx_uahc_hcsparams3_t;
+
+static inline uint64_t BDK_USBHX_UAHC_HCSPARAMS3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_HCSPARAMS3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000000cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_HCSPARAMS3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_HCSPARAMS3(a) bdk_usbhx_uahc_hcsparams3_t
+#define bustype_BDK_USBHX_UAHC_HCSPARAMS3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_HCSPARAMS3(a) "USBHX_UAHC_HCSPARAMS3"
+#define device_bar_BDK_USBHX_UAHC_HCSPARAMS3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_HCSPARAMS3(a) (a)
+#define arguments_BDK_USBHX_UAHC_HCSPARAMS3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_iman#
+ *
+ * XHCI Interrupt Management Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.5.2.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_imanx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_imanx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_2_31 : 30;
+ uint32_t ie : 1; /**< [ 1: 1](R/W) Interrupt enable. */
+ uint32_t ip : 1; /**< [ 0: 0](R/W1C/H) Interrupt pending. */
+#else /* Word 0 - Little Endian */
+ uint32_t ip : 1; /**< [ 0: 0](R/W1C/H) Interrupt pending. */
+ uint32_t ie : 1; /**< [ 1: 1](R/W) Interrupt enable. */
+ uint32_t reserved_2_31 : 30;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_imanx_s cn; */
+};
+typedef union bdk_usbhx_uahc_imanx bdk_usbhx_uahc_imanx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_IMANX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_IMANX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000000460ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_IMANX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_IMANX(a,b) bdk_usbhx_uahc_imanx_t
+#define bustype_BDK_USBHX_UAHC_IMANX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_IMANX(a,b) "USBHX_UAHC_IMANX"
+#define device_bar_BDK_USBHX_UAHC_IMANX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_IMANX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_IMANX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_imod#
+ *
+ * XHCI Interrupt Moderation Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.5.2.2.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_imodx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_imodx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t imodc : 16; /**< [ 31: 16](R/W) Interrupt moderation counter. */
+ uint32_t imodi : 16; /**< [ 15: 0](R/W) Interrupt moderation interval. */
+#else /* Word 0 - Little Endian */
+ uint32_t imodi : 16; /**< [ 15: 0](R/W) Interrupt moderation interval. */
+ uint32_t imodc : 16; /**< [ 31: 16](R/W) Interrupt moderation counter. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_imodx_s cn; */
+};
+typedef union bdk_usbhx_uahc_imodx bdk_usbhx_uahc_imodx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_IMODX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_IMODX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000000464ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_IMODX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_IMODX(a,b) bdk_usbhx_uahc_imodx_t
+#define bustype_BDK_USBHX_UAHC_IMODX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_IMODX(a,b) "USBHX_UAHC_IMODX"
+#define device_bar_BDK_USBHX_UAHC_IMODX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_IMODX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_IMODX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_mfindex
+ *
+ * XHCI Microframe Index Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.5.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_mfindex
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_mfindex_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_14_31 : 18;
+ uint32_t mfindex : 14; /**< [ 13: 0](RO/H) Microframe index. */
+#else /* Word 0 - Little Endian */
+ uint32_t mfindex : 14; /**< [ 13: 0](RO/H) Microframe index. */
+ uint32_t reserved_14_31 : 18;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_mfindex_s cn; */
+};
+typedef union bdk_usbhx_uahc_mfindex bdk_usbhx_uahc_mfindex_t;
+
+static inline uint64_t BDK_USBHX_UAHC_MFINDEX(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_MFINDEX(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000440ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_MFINDEX", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_MFINDEX(a) bdk_usbhx_uahc_mfindex_t
+#define bustype_BDK_USBHX_UAHC_MFINDEX(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_MFINDEX(a) "USBHX_UAHC_MFINDEX"
+#define device_bar_BDK_USBHX_UAHC_MFINDEX(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_MFINDEX(a) (a)
+#define arguments_BDK_USBHX_UAHC_MFINDEX(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_pagesize
+ *
+ * XHCI Page-Size Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.3.
+ */
+union bdk_usbhx_uahc_pagesize
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_pagesize_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t pagesize : 16; /**< [ 15: 0](RO) Page size. */
+#else /* Word 0 - Little Endian */
+ uint32_t pagesize : 16; /**< [ 15: 0](RO) Page size. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_pagesize_s cn; */
+};
+typedef union bdk_usbhx_uahc_pagesize bdk_usbhx_uahc_pagesize_t;
+
+static inline uint64_t BDK_USBHX_UAHC_PAGESIZE(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_PAGESIZE(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000028ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_PAGESIZE", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_PAGESIZE(a) bdk_usbhx_uahc_pagesize_t
+#define bustype_BDK_USBHX_UAHC_PAGESIZE(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_PAGESIZE(a) "USBHX_UAHC_PAGESIZE"
+#define device_bar_BDK_USBHX_UAHC_PAGESIZE(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_PAGESIZE(a) (a)
+#define arguments_BDK_USBHX_UAHC_PAGESIZE(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_porthlpmc_20#
+ *
+ * XHCI Port Hardware LPM Control (High-Speed) Register
+ * For information on this register, refer to the xHCI Specification, v1.1, section 5.4.11.2.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_porthlpmc_20x
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_porthlpmc_20x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_14_31 : 18;
+ uint32_t hirdd : 4; /**< [ 13: 10](R/W) See section 5.4.11.2 of the XHCI Spec 1.1.
+ If USBH()_UAHC_SUPTPRT2_DW2[BLC] = 0, then HIRD timing is applied to this field.
+ If USBH()_UAHC_SUPTPRT2_DW2[BLC] = 1, then BESL timing is applied to this field. */
+ uint32_t l1_timeout : 8; /**< [ 9: 2](R/W) Timeout value for the L1 inactivity timer (LPM timer). This field is set to 0x0 by the
+ assertion of PR to 1. Refer to section 4.23.5.1.1.1 (in XHCI spec 1.1) for more
+ information on L1 Timeout operation.
+ The following are permissible values:
+ 0x0 = 128 us. (default).
+ 0x1 = 256 us.
+ 0x2 = 512 us.
+ 0x3 = 768 us.
+ _ ...
+ 0xFF = 65280 us. */
+ uint32_t hirdm : 2; /**< [ 1: 0](R/W) Host-initiated resume-duration mode. */
+#else /* Word 0 - Little Endian */
+ uint32_t hirdm : 2; /**< [ 1: 0](R/W) Host-initiated resume-duration mode. */
+ uint32_t l1_timeout : 8; /**< [ 9: 2](R/W) Timeout value for the L1 inactivity timer (LPM timer). This field is set to 0x0 by the
+ assertion of PR to 1. Refer to section 4.23.5.1.1.1 (in XHCI spec 1.1) for more
+ information on L1 Timeout operation.
+ The following are permissible values:
+ 0x0 = 128 us. (default).
+ 0x1 = 256 us.
+ 0x2 = 512 us.
+ 0x3 = 768 us.
+ _ ...
+ 0xFF = 65280 us. */
+ uint32_t hirdd : 4; /**< [ 13: 10](R/W) See section 5.4.11.2 of the XHCI Spec 1.1.
+ If USBH()_UAHC_SUPTPRT2_DW2[BLC] = 0, then HIRD timing is applied to this field.
+ If USBH()_UAHC_SUPTPRT2_DW2[BLC] = 1, then BESL timing is applied to this field. */
+ uint32_t reserved_14_31 : 18;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_porthlpmc_20x_s cn; */
+};
+typedef union bdk_usbhx_uahc_porthlpmc_20x bdk_usbhx_uahc_porthlpmc_20x_t;
+
+static inline uint64_t BDK_USBHX_UAHC_PORTHLPMC_20X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_PORTHLPMC_20X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x86800000042cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_PORTHLPMC_20X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_PORTHLPMC_20X(a,b) bdk_usbhx_uahc_porthlpmc_20x_t
+#define bustype_BDK_USBHX_UAHC_PORTHLPMC_20X(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_PORTHLPMC_20X(a,b) "USBHX_UAHC_PORTHLPMC_20X"
+#define device_bar_BDK_USBHX_UAHC_PORTHLPMC_20X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_PORTHLPMC_20X(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_PORTHLPMC_20X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_porthlpmc_ss#
+ *
+ * XHCI Port Hardware LPM Control (SuperSpeed) Register
+ * The USB3 port hardware LPM control register is reserved and shall be treated as RsvdP by
+ * software. See xHCI specification v1.1 section 5.4.11.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST].
+ */
+union bdk_usbhx_uahc_porthlpmc_ssx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_porthlpmc_ssx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_porthlpmc_ssx_s cn; */
+};
+typedef union bdk_usbhx_uahc_porthlpmc_ssx bdk_usbhx_uahc_porthlpmc_ssx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_PORTHLPMC_SSX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_PORTHLPMC_SSX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==1)))
+ return 0x86800000042cll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_PORTHLPMC_SSX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_PORTHLPMC_SSX(a,b) bdk_usbhx_uahc_porthlpmc_ssx_t
+#define bustype_BDK_USBHX_UAHC_PORTHLPMC_SSX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_PORTHLPMC_SSX(a,b) "USBHX_UAHC_PORTHLPMC_SSX"
+#define device_bar_BDK_USBHX_UAHC_PORTHLPMC_SSX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_PORTHLPMC_SSX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_PORTHLPMC_SSX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_portli_20#
+ *
+ * XHCI Port Link (High-Speed) Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.10.
+ */
+union bdk_usbhx_uahc_portli_20x
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_portli_20x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_0_31 : 32;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_31 : 32;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_portli_20x_s cn; */
+};
+typedef union bdk_usbhx_uahc_portli_20x bdk_usbhx_uahc_portli_20x_t;
+
+static inline uint64_t BDK_USBHX_UAHC_PORTLI_20X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_PORTLI_20X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000000428ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_PORTLI_20X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_PORTLI_20X(a,b) bdk_usbhx_uahc_portli_20x_t
+#define bustype_BDK_USBHX_UAHC_PORTLI_20X(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_PORTLI_20X(a,b) "USBHX_UAHC_PORTLI_20X"
+#define device_bar_BDK_USBHX_UAHC_PORTLI_20X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_PORTLI_20X(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_PORTLI_20X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_portli_ss#
+ *
+ * XHCI Port Link (SuperSpeed) Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.10.
+ */
+union bdk_usbhx_uahc_portli_ssx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_portli_ssx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_16_31 : 16;
+ uint32_t linkerrorcount : 16; /**< [ 15: 0](RO/H) Link error count. */
+#else /* Word 0 - Little Endian */
+ uint32_t linkerrorcount : 16; /**< [ 15: 0](RO/H) Link error count. */
+ uint32_t reserved_16_31 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_portli_ssx_s cn; */
+};
+typedef union bdk_usbhx_uahc_portli_ssx bdk_usbhx_uahc_portli_ssx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_PORTLI_SSX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_PORTLI_SSX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==1)))
+ return 0x868000000428ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_PORTLI_SSX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_PORTLI_SSX(a,b) bdk_usbhx_uahc_portli_ssx_t
+#define bustype_BDK_USBHX_UAHC_PORTLI_SSX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_PORTLI_SSX(a,b) "USBHX_UAHC_PORTLI_SSX"
+#define device_bar_BDK_USBHX_UAHC_PORTLI_SSX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_PORTLI_SSX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_PORTLI_SSX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_portpmsc_20#
+ *
+ * XHCI Port Power Management Status/Control (High-Speed) Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.9.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST].
+ */
+union bdk_usbhx_uahc_portpmsc_20x
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_portpmsc_20x_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t port_test_control : 4; /**< [ 31: 28](R/W) Port test control. */
+ uint32_t reserved_17_27 : 11;
+ uint32_t hle : 1; /**< [ 16: 16](R/W) Hardware LPM enable. */
+ uint32_t l1_device_slot : 8; /**< [ 15: 8](R/W) L1 device slot. */
+ uint32_t hird : 4; /**< [ 7: 4](R/W) Host-initiated resume duration. */
+ uint32_t rwe : 1; /**< [ 3: 3](R/W) Remove wake enable. */
+ uint32_t l1s : 3; /**< [ 2: 0](RO/H) L1 status. */
+#else /* Word 0 - Little Endian */
+ uint32_t l1s : 3; /**< [ 2: 0](RO/H) L1 status. */
+ uint32_t rwe : 1; /**< [ 3: 3](R/W) Remove wake enable. */
+ uint32_t hird : 4; /**< [ 7: 4](R/W) Host-initiated resume duration. */
+ uint32_t l1_device_slot : 8; /**< [ 15: 8](R/W) L1 device slot. */
+ uint32_t hle : 1; /**< [ 16: 16](R/W) Hardware LPM enable. */
+ uint32_t reserved_17_27 : 11;
+ uint32_t port_test_control : 4; /**< [ 31: 28](R/W) Port test control. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_portpmsc_20x_s cn; */
+};
+typedef union bdk_usbhx_uahc_portpmsc_20x bdk_usbhx_uahc_portpmsc_20x_t;
+
+static inline uint64_t BDK_USBHX_UAHC_PORTPMSC_20X(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_PORTPMSC_20X(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000000424ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UAHC_PORTPMSC_20X", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_PORTPMSC_20X(a,b) bdk_usbhx_uahc_portpmsc_20x_t
+#define bustype_BDK_USBHX_UAHC_PORTPMSC_20X(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_PORTPMSC_20X(a,b) "USBHX_UAHC_PORTPMSC_20X"
+#define device_bar_BDK_USBHX_UAHC_PORTPMSC_20X(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_PORTPMSC_20X(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_PORTPMSC_20X(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_portpmsc_ss#
+ *
+ * XHCI Port Power Management Status/Control (SuperSpeed) Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.9.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST].
+ */
+union bdk_usbhx_uahc_portpmsc_ssx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_portpmsc_ssx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_17_31 : 15;
+ uint32_t fla : 1; /**< [ 16: 16](R/W/H) Force link PM accept. */
+ uint32_t u2_timeout : 8; /**< [ 15: 8](R/W/H) U2 timeout. */
+ uint32_t u1_timeout : 8; /**< [ 7: 0](R/W/H) U1 timeout. */
+#else /* Word 0 - Little Endian */
+ uint32_t u1_timeout : 8; /**< [ 7: 0](R/W/H) U1 timeout. */
+ uint32_t u2_timeout : 8; /**< [ 15: 8](R/W/H) U2 timeout. */
+ uint32_t fla : 1; /**< [ 16: 16](R/W/H) Force link PM accept. */
+ uint32_t reserved_17_31 : 15;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_portpmsc_ssx_s cn; */
+};
+typedef union bdk_usbhx_uahc_portpmsc_ssx bdk_usbhx_uahc_portpmsc_ssx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_PORTPMSC_SSX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_PORTPMSC_SSX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==1)))
+ return 0x868000000424ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_PORTPMSC_SSX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_PORTPMSC_SSX(a,b) bdk_usbhx_uahc_portpmsc_ssx_t
+#define bustype_BDK_USBHX_UAHC_PORTPMSC_SSX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_PORTPMSC_SSX(a,b) "USBHX_UAHC_PORTPMSC_SSX"
+#define device_bar_BDK_USBHX_UAHC_PORTPMSC_SSX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_PORTPMSC_SSX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_PORTPMSC_SSX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_portsc#
+ *
+ * XHCI Port Status and Control Registers
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.8. Port 1
+ * is USB3.0 SuperSpeed link, Port 0 is USB2.0 high-speed/full-speed/low-speed link.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST].
+ */
+union bdk_usbhx_uahc_portscx
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_portscx_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t wpr : 1; /**< [ 31: 31](WO) Warm port reset. */
+ uint32_t dr : 1; /**< [ 30: 30](RO/H) Device removable. */
+ uint32_t reserved_28_29 : 2;
+ uint32_t woe : 1; /**< [ 27: 27](R/W) Wake on overcurrent enable. */
+ uint32_t wde : 1; /**< [ 26: 26](R/W) Wake on disconnect enable. */
+ uint32_t wce : 1; /**< [ 25: 25](R/W) Wake on connect enable. */
+ uint32_t cas : 1; /**< [ 24: 24](RO/H) Cold attach status. */
+ uint32_t cec : 1; /**< [ 23: 23](R/W1C/H) Port configuration error change. */
+ uint32_t plc : 1; /**< [ 22: 22](R/W1C/H) Port link state change. */
+ uint32_t prc : 1; /**< [ 21: 21](R/W1C/H) Port reset change. */
+ uint32_t occ : 1; /**< [ 20: 20](R/W1C/H) Overcurrent change. */
+ uint32_t wrc : 1; /**< [ 19: 19](R/W1C/H) Warm port reset change. */
+ uint32_t pec : 1; /**< [ 18: 18](R/W1C/H) Port enabled/disabled change. */
+ uint32_t csc : 1; /**< [ 17: 17](R/W1C/H) Connect status change. */
+ uint32_t lws : 1; /**< [ 16: 16](WO) Port link state write strobe. */
+ uint32_t pic : 2; /**< [ 15: 14](R/W/H) Port indicator control. */
+ uint32_t portspeed : 4; /**< [ 13: 10](RO/H) Port speed. */
+ uint32_t pp : 1; /**< [ 9: 9](R/W/H) Port power. */
+ uint32_t pls : 4; /**< [ 8: 5](R/W/H) Port link state. */
+ uint32_t pr : 1; /**< [ 4: 4](R/W1S/H) Port reset. */
+ uint32_t oca : 1; /**< [ 3: 3](RO/H) Overcurrent active. */
+ uint32_t reserved_2 : 1;
+ uint32_t ped : 1; /**< [ 1: 1](R/W1C/H) Port enabled/disabled. */
+ uint32_t ccs : 1; /**< [ 0: 0](RO/H) Current connect status. */
+#else /* Word 0 - Little Endian */
+ uint32_t ccs : 1; /**< [ 0: 0](RO/H) Current connect status. */
+ uint32_t ped : 1; /**< [ 1: 1](R/W1C/H) Port enabled/disabled. */
+ uint32_t reserved_2 : 1;
+ uint32_t oca : 1; /**< [ 3: 3](RO/H) Overcurrent active. */
+ uint32_t pr : 1; /**< [ 4: 4](R/W1S/H) Port reset. */
+ uint32_t pls : 4; /**< [ 8: 5](R/W/H) Port link state. */
+ uint32_t pp : 1; /**< [ 9: 9](R/W/H) Port power. */
+ uint32_t portspeed : 4; /**< [ 13: 10](RO/H) Port speed. */
+ uint32_t pic : 2; /**< [ 15: 14](R/W/H) Port indicator control. */
+ uint32_t lws : 1; /**< [ 16: 16](WO) Port link state write strobe. */
+ uint32_t csc : 1; /**< [ 17: 17](R/W1C/H) Connect status change. */
+ uint32_t pec : 1; /**< [ 18: 18](R/W1C/H) Port enabled/disabled change. */
+ uint32_t wrc : 1; /**< [ 19: 19](R/W1C/H) Warm port reset change. */
+ uint32_t occ : 1; /**< [ 20: 20](R/W1C/H) Overcurrent change. */
+ uint32_t prc : 1; /**< [ 21: 21](R/W1C/H) Port reset change. */
+ uint32_t plc : 1; /**< [ 22: 22](R/W1C/H) Port link state change. */
+ uint32_t cec : 1; /**< [ 23: 23](R/W1C/H) Port configuration error change. */
+ uint32_t cas : 1; /**< [ 24: 24](RO/H) Cold attach status. */
+ uint32_t wce : 1; /**< [ 25: 25](R/W) Wake on connect enable. */
+ uint32_t wde : 1; /**< [ 26: 26](R/W) Wake on disconnect enable. */
+ uint32_t woe : 1; /**< [ 27: 27](R/W) Wake on overcurrent enable. */
+ uint32_t reserved_28_29 : 2;
+ uint32_t dr : 1; /**< [ 30: 30](RO/H) Device removable. */
+ uint32_t wpr : 1; /**< [ 31: 31](WO) Warm port reset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_portscx_s cn; */
+};
+typedef union bdk_usbhx_uahc_portscx bdk_usbhx_uahc_portscx_t;
+
+static inline uint64_t BDK_USBHX_UAHC_PORTSCX(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_PORTSCX(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b<=1)))
+ return 0x868000000420ll + 0x1000000000ll * ((a) & 0x1) + 0x10ll * ((b) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_PORTSCX", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_PORTSCX(a,b) bdk_usbhx_uahc_portscx_t
+#define bustype_BDK_USBHX_UAHC_PORTSCX(a,b) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_PORTSCX(a,b) "USBHX_UAHC_PORTSCX"
+#define device_bar_BDK_USBHX_UAHC_PORTSCX(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_PORTSCX(a,b) (a)
+#define arguments_BDK_USBHX_UAHC_PORTSCX(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_rtsoff
+ *
+ * XHCI Runtime Register-Space Offset Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.3.8.
+ */
+union bdk_usbhx_uahc_rtsoff
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_rtsoff_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t rtsoff : 27; /**< [ 31: 5](RO) Runtime register-space offset. */
+ uint32_t reserved_0_4 : 5;
+#else /* Word 0 - Little Endian */
+ uint32_t reserved_0_4 : 5;
+ uint32_t rtsoff : 27; /**< [ 31: 5](RO) Runtime register-space offset. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_rtsoff_s cn; */
+};
+typedef union bdk_usbhx_uahc_rtsoff bdk_usbhx_uahc_rtsoff_t;
+
+static inline uint64_t BDK_USBHX_UAHC_RTSOFF(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_RTSOFF(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000018ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_RTSOFF", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_RTSOFF(a) bdk_usbhx_uahc_rtsoff_t
+#define bustype_BDK_USBHX_UAHC_RTSOFF(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_RTSOFF(a) "USBHX_UAHC_RTSOFF"
+#define device_bar_BDK_USBHX_UAHC_RTSOFF(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_RTSOFF(a) (a)
+#define arguments_BDK_USBHX_UAHC_RTSOFF(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_suptprt2_dw0
+ *
+ * XHCI Supported-Protocol-Capability (USB 2.0) Register 0
+ * For information on this register, refer to the xHCI Specification, v1.0, section 7.2.
+ */
+union bdk_usbhx_uahc_suptprt2_dw0
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_suptprt2_dw0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t majorrev : 8; /**< [ 31: 24](RO) Major revision. */
+ uint32_t minorrev : 8; /**< [ 23: 16](RO) Minor revision. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO) Next capability pointer. */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = supported protocol. */
+#else /* Word 0 - Little Endian */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = supported protocol. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO) Next capability pointer. */
+ uint32_t minorrev : 8; /**< [ 23: 16](RO) Minor revision. */
+ uint32_t majorrev : 8; /**< [ 31: 24](RO) Major revision. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_suptprt2_dw0_s cn; */
+};
+typedef union bdk_usbhx_uahc_suptprt2_dw0 bdk_usbhx_uahc_suptprt2_dw0_t;
+
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT2_DW0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT2_DW0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000890ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_SUPTPRT2_DW0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_SUPTPRT2_DW0(a) bdk_usbhx_uahc_suptprt2_dw0_t
+#define bustype_BDK_USBHX_UAHC_SUPTPRT2_DW0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_SUPTPRT2_DW0(a) "USBHX_UAHC_SUPTPRT2_DW0"
+#define device_bar_BDK_USBHX_UAHC_SUPTPRT2_DW0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_SUPTPRT2_DW0(a) (a)
+#define arguments_BDK_USBHX_UAHC_SUPTPRT2_DW0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_suptprt2_dw1
+ *
+ * XHCI Supported-Protocol-Capability (USB 2.0) Register 1
+ * For information on this register, refer to the xHCI Specification, v1.0, section 7.2.
+ */
+union bdk_usbhx_uahc_suptprt2_dw1
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_suptprt2_dw1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t name : 32; /**< [ 31: 0](RO) Name string: 'USB'. */
+#else /* Word 0 - Little Endian */
+ uint32_t name : 32; /**< [ 31: 0](RO) Name string: 'USB'. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_suptprt2_dw1_s cn; */
+};
+typedef union bdk_usbhx_uahc_suptprt2_dw1 bdk_usbhx_uahc_suptprt2_dw1_t;
+
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT2_DW1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT2_DW1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000894ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_SUPTPRT2_DW1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_SUPTPRT2_DW1(a) bdk_usbhx_uahc_suptprt2_dw1_t
+#define bustype_BDK_USBHX_UAHC_SUPTPRT2_DW1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_SUPTPRT2_DW1(a) "USBHX_UAHC_SUPTPRT2_DW1"
+#define device_bar_BDK_USBHX_UAHC_SUPTPRT2_DW1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_SUPTPRT2_DW1(a) (a)
+#define arguments_BDK_USBHX_UAHC_SUPTPRT2_DW1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_suptprt2_dw2
+ *
+ * XHCI Supported-Protocol-Capability (USB 2.0) Register 2
+ * For information on this register, refer to the xHCI Specification, v1.0, section 7.2.
+ */
+union bdk_usbhx_uahc_suptprt2_dw2
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_suptprt2_dw2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t psic : 4; /**< [ 31: 28](RO) Protocol speed ID count. */
+ uint32_t reserved_21_27 : 7;
+ uint32_t blc : 1; /**< [ 20: 20](RO) BESL LPM capability. */
+ uint32_t hlc : 1; /**< [ 19: 19](RO) Hardware LMP capability. */
+ uint32_t ihi : 1; /**< [ 18: 18](RO) Integrated hub implemented. */
+ uint32_t hso : 1; /**< [ 17: 17](RO) High-speed only. */
+ uint32_t reserved_16 : 1;
+ uint32_t compatprtcnt : 8; /**< [ 15: 8](RO) Compatible port count. */
+ uint32_t compatprtoff : 8; /**< [ 7: 0](RO) Compatible port offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t compatprtoff : 8; /**< [ 7: 0](RO) Compatible port offset. */
+ uint32_t compatprtcnt : 8; /**< [ 15: 8](RO) Compatible port count. */
+ uint32_t reserved_16 : 1;
+ uint32_t hso : 1; /**< [ 17: 17](RO) High-speed only. */
+ uint32_t ihi : 1; /**< [ 18: 18](RO) Integrated hub implemented. */
+ uint32_t hlc : 1; /**< [ 19: 19](RO) Hardware LMP capability. */
+ uint32_t blc : 1; /**< [ 20: 20](RO) BESL LPM capability. */
+ uint32_t reserved_21_27 : 7;
+ uint32_t psic : 4; /**< [ 31: 28](RO) Protocol speed ID count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_suptprt2_dw2_s cn; */
+};
+typedef union bdk_usbhx_uahc_suptprt2_dw2 bdk_usbhx_uahc_suptprt2_dw2_t;
+
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT2_DW2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT2_DW2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000898ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_SUPTPRT2_DW2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_SUPTPRT2_DW2(a) bdk_usbhx_uahc_suptprt2_dw2_t
+#define bustype_BDK_USBHX_UAHC_SUPTPRT2_DW2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_SUPTPRT2_DW2(a) "USBHX_UAHC_SUPTPRT2_DW2"
+#define device_bar_BDK_USBHX_UAHC_SUPTPRT2_DW2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_SUPTPRT2_DW2(a) (a)
+#define arguments_BDK_USBHX_UAHC_SUPTPRT2_DW2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_suptprt2_dw3
+ *
+ * XHCI Supported-Protocol-Capability (USB 2.0) Register 3
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.2.
+ */
+union bdk_usbhx_uahc_suptprt2_dw3
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_suptprt2_dw3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_5_31 : 27;
+ uint32_t protslottype : 5; /**< [ 4: 0](RO) Protocol slot type. */
+#else /* Word 0 - Little Endian */
+ uint32_t protslottype : 5; /**< [ 4: 0](RO) Protocol slot type. */
+ uint32_t reserved_5_31 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_suptprt2_dw3_s cn; */
+};
+typedef union bdk_usbhx_uahc_suptprt2_dw3 bdk_usbhx_uahc_suptprt2_dw3_t;
+
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT2_DW3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT2_DW3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x86800000089cll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_SUPTPRT2_DW3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_SUPTPRT2_DW3(a) bdk_usbhx_uahc_suptprt2_dw3_t
+#define bustype_BDK_USBHX_UAHC_SUPTPRT2_DW3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_SUPTPRT2_DW3(a) "USBHX_UAHC_SUPTPRT2_DW3"
+#define device_bar_BDK_USBHX_UAHC_SUPTPRT2_DW3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_SUPTPRT2_DW3(a) (a)
+#define arguments_BDK_USBHX_UAHC_SUPTPRT2_DW3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_suptprt3_dw0
+ *
+ * XHCI Supported-Protocol-Capability (USB 3.0) Register 0
+ * For information on this register, refer to the xHCI Specification, v1.0, section 7.2.
+ */
+union bdk_usbhx_uahc_suptprt3_dw0
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_suptprt3_dw0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t majorrev : 8; /**< [ 31: 24](RO) Major revision. */
+ uint32_t minorrev : 8; /**< [ 23: 16](RO) Minor revision. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO/H) Next capability pointer. Value depends on USBH()_UAHC_GUCTL[EXTCAPSUPTEN]. If EXTCAPSUPTEN
+ =
+ 0, value is 0x0. If EXTCAPSUPTEN = 1, value is 0x4. */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = supported protocol. */
+#else /* Word 0 - Little Endian */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = supported protocol. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO/H) Next capability pointer. Value depends on USBH()_UAHC_GUCTL[EXTCAPSUPTEN]. If EXTCAPSUPTEN
+ =
+ 0, value is 0x0. If EXTCAPSUPTEN = 1, value is 0x4. */
+ uint32_t minorrev : 8; /**< [ 23: 16](RO) Minor revision. */
+ uint32_t majorrev : 8; /**< [ 31: 24](RO) Major revision. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_suptprt3_dw0_s cn; */
+};
+typedef union bdk_usbhx_uahc_suptprt3_dw0 bdk_usbhx_uahc_suptprt3_dw0_t;
+
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT3_DW0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT3_DW0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x8680000008a0ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_SUPTPRT3_DW0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_SUPTPRT3_DW0(a) bdk_usbhx_uahc_suptprt3_dw0_t
+#define bustype_BDK_USBHX_UAHC_SUPTPRT3_DW0(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_SUPTPRT3_DW0(a) "USBHX_UAHC_SUPTPRT3_DW0"
+#define device_bar_BDK_USBHX_UAHC_SUPTPRT3_DW0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_SUPTPRT3_DW0(a) (a)
+#define arguments_BDK_USBHX_UAHC_SUPTPRT3_DW0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_suptprt3_dw1
+ *
+ * XHCI Supported-Protocol-Capability (USB 3.0) Register 1
+ * For information on this register, refer to the xHCI Specification, v1.0, section 7.2.
+ */
+union bdk_usbhx_uahc_suptprt3_dw1
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_suptprt3_dw1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t name : 32; /**< [ 31: 0](RO) Name string: 'USB'. */
+#else /* Word 0 - Little Endian */
+ uint32_t name : 32; /**< [ 31: 0](RO) Name string: 'USB'. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_suptprt3_dw1_s cn; */
+};
+typedef union bdk_usbhx_uahc_suptprt3_dw1 bdk_usbhx_uahc_suptprt3_dw1_t;
+
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT3_DW1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT3_DW1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x8680000008a4ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_SUPTPRT3_DW1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_SUPTPRT3_DW1(a) bdk_usbhx_uahc_suptprt3_dw1_t
+#define bustype_BDK_USBHX_UAHC_SUPTPRT3_DW1(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_SUPTPRT3_DW1(a) "USBHX_UAHC_SUPTPRT3_DW1"
+#define device_bar_BDK_USBHX_UAHC_SUPTPRT3_DW1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_SUPTPRT3_DW1(a) (a)
+#define arguments_BDK_USBHX_UAHC_SUPTPRT3_DW1(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_suptprt3_dw2
+ *
+ * XHCI Supported-Protocol-Capability (USB 3.0) Register 2
+ * For information on this register, refer to the xHCI Specification, v1.0, section 7.2.
+ */
+union bdk_usbhx_uahc_suptprt3_dw2
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_suptprt3_dw2_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t psic : 4; /**< [ 31: 28](RO) Protocol speed ID count. */
+ uint32_t reserved_16_27 : 12;
+ uint32_t compatprtcnt : 8; /**< [ 15: 8](RO) Compatible port count. */
+ uint32_t compatprtoff : 8; /**< [ 7: 0](RO) Compatible port offset. */
+#else /* Word 0 - Little Endian */
+ uint32_t compatprtoff : 8; /**< [ 7: 0](RO) Compatible port offset. */
+ uint32_t compatprtcnt : 8; /**< [ 15: 8](RO) Compatible port count. */
+ uint32_t reserved_16_27 : 12;
+ uint32_t psic : 4; /**< [ 31: 28](RO) Protocol speed ID count. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_suptprt3_dw2_s cn; */
+};
+typedef union bdk_usbhx_uahc_suptprt3_dw2 bdk_usbhx_uahc_suptprt3_dw2_t;
+
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT3_DW2(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT3_DW2(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x8680000008a8ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_SUPTPRT3_DW2", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_SUPTPRT3_DW2(a) bdk_usbhx_uahc_suptprt3_dw2_t
+#define bustype_BDK_USBHX_UAHC_SUPTPRT3_DW2(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_SUPTPRT3_DW2(a) "USBHX_UAHC_SUPTPRT3_DW2"
+#define device_bar_BDK_USBHX_UAHC_SUPTPRT3_DW2(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_SUPTPRT3_DW2(a) (a)
+#define arguments_BDK_USBHX_UAHC_SUPTPRT3_DW2(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_suptprt3_dw3
+ *
+ * XHCI Supported-Protocol-Capability (USB 3.0) Register 3
+ * For information on this register, refer to the xHCI Specification, v1.1, section 7.2.
+ */
+union bdk_usbhx_uahc_suptprt3_dw3
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_suptprt3_dw3_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_5_31 : 27;
+ uint32_t protslottype : 5; /**< [ 4: 0](RO) Protocol slot type. */
+#else /* Word 0 - Little Endian */
+ uint32_t protslottype : 5; /**< [ 4: 0](RO) Protocol slot type. */
+ uint32_t reserved_5_31 : 27;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_suptprt3_dw3_s cn; */
+};
+typedef union bdk_usbhx_uahc_suptprt3_dw3 bdk_usbhx_uahc_suptprt3_dw3_t;
+
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT3_DW3(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_SUPTPRT3_DW3(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x8680000008acll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_SUPTPRT3_DW3", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_SUPTPRT3_DW3(a) bdk_usbhx_uahc_suptprt3_dw3_t
+#define bustype_BDK_USBHX_UAHC_SUPTPRT3_DW3(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_SUPTPRT3_DW3(a) "USBHX_UAHC_SUPTPRT3_DW3"
+#define device_bar_BDK_USBHX_UAHC_SUPTPRT3_DW3(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_SUPTPRT3_DW3(a) (a)
+#define arguments_BDK_USBHX_UAHC_SUPTPRT3_DW3(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_usbcmd
+ *
+ * XHCI Command Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_usbcmd
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_usbcmd_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_12_31 : 20;
+ uint32_t eu3s : 1; /**< [ 11: 11](R/W) Enable U3 MFINDEX stop. */
+ uint32_t ewe : 1; /**< [ 10: 10](R/W) Enable wrap event. */
+ uint32_t crs : 1; /**< [ 9: 9](WO) Controller restore state. */
+ uint32_t css : 1; /**< [ 8: 8](WO) Controller save state. */
+ uint32_t lhcrst : 1; /**< [ 7: 7](R/W1S/H) Light host controller reset. */
+ uint32_t reserved_4_6 : 3;
+ uint32_t hsee : 1; /**< [ 3: 3](R/W) Host system error enable. */
+ uint32_t inte : 1; /**< [ 2: 2](R/W) Interrupter enable. */
+ uint32_t hcrst : 1; /**< [ 1: 1](R/W1S/H) Host controller reset. */
+ uint32_t r_s : 1; /**< [ 0: 0](R/W) Run/stop. */
+#else /* Word 0 - Little Endian */
+ uint32_t r_s : 1; /**< [ 0: 0](R/W) Run/stop. */
+ uint32_t hcrst : 1; /**< [ 1: 1](R/W1S/H) Host controller reset. */
+ uint32_t inte : 1; /**< [ 2: 2](R/W) Interrupter enable. */
+ uint32_t hsee : 1; /**< [ 3: 3](R/W) Host system error enable. */
+ uint32_t reserved_4_6 : 3;
+ uint32_t lhcrst : 1; /**< [ 7: 7](R/W1S/H) Light host controller reset. */
+ uint32_t css : 1; /**< [ 8: 8](WO) Controller save state. */
+ uint32_t crs : 1; /**< [ 9: 9](WO) Controller restore state. */
+ uint32_t ewe : 1; /**< [ 10: 10](R/W) Enable wrap event. */
+ uint32_t eu3s : 1; /**< [ 11: 11](R/W) Enable U3 MFINDEX stop. */
+ uint32_t reserved_12_31 : 20;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_usbcmd_s cn; */
+};
+typedef union bdk_usbhx_uahc_usbcmd bdk_usbhx_uahc_usbcmd_t;
+
+static inline uint64_t BDK_USBHX_UAHC_USBCMD(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_USBCMD(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000020ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_USBCMD", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_USBCMD(a) bdk_usbhx_uahc_usbcmd_t
+#define bustype_BDK_USBHX_UAHC_USBCMD(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_USBCMD(a) "USBHX_UAHC_USBCMD"
+#define device_bar_BDK_USBHX_UAHC_USBCMD(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_USBCMD(a) (a)
+#define arguments_BDK_USBHX_UAHC_USBCMD(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_usblegctlsts
+ *
+ * XHCI Legacy Support Control/Status Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 7.1.2. Note
+ * that the SMI interrupts are not connected to anything in a CNXXXX configuration.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_usblegctlsts
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_usblegctlsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t smi_on_bar : 1; /**< [ 31: 31](R/W1C/H) System management interrupt on BAR. Never generated. */
+ uint32_t smi_on_pci_command : 1; /**< [ 30: 30](R/W1C/H) System management interrupt on PCI command. Never generated. */
+ uint32_t smi_on_os_ownership : 1; /**< [ 29: 29](R/W1C/H) System management interrupt on OS ownership change. This bit is set to 1 whenever
+ USBH()_UAHC_USBLEGSUP[HC_OS_OWNED_SEMAPHORES] transitions. */
+ uint32_t reserved_21_28 : 8;
+ uint32_t smi_on_hostsystemerr : 1; /**< [ 20: 20](RO/H) System-management interrupt on host-system error. Shadow bit of USBH()_UAHC_USBSTS[HSE].
+ Refer to
+ xHCI Section 5.4.2 for definition and effects of the events associated with this bit being
+ set to 1.
+
+ To clear this bit to a 0, system software must write a 1 to USBH()_UAHC_USBSTS[HSE]. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t smi_on_event_interrupt : 1; /**< [ 16: 16](RO/H) System-management interrupt on event interrupt. Shadow bit of USBH()_UAHC_USBSTS[EINT].
+ Refer to
+ xHCI Section 5.4.2 for definition. This bit automatically clears when [EINT] clears and
+ sets when [EINT] sets. */
+ uint32_t smi_on_bar_en : 1; /**< [ 15: 15](R/W) System-management interrupt on BAR enable. */
+ uint32_t smi_on_pci_command_en : 1; /**< [ 14: 14](R/W) System-management interrupt on PCI command enable. */
+ uint32_t smi_on_os_ownership_en : 1; /**< [ 13: 13](R/W) System-management interrupt on OS ownership enable. */
+ uint32_t reserved_5_12 : 8;
+ uint32_t smi_on_hostsystemerr_en : 1;/**< [ 4: 4](R/W) System-management interrupt on host-system error enable */
+ uint32_t reserved_1_3 : 3;
+ uint32_t usb_smi_en : 1; /**< [ 0: 0](R/W) USB system-management interrupt enable. */
+#else /* Word 0 - Little Endian */
+ uint32_t usb_smi_en : 1; /**< [ 0: 0](R/W) USB system-management interrupt enable. */
+ uint32_t reserved_1_3 : 3;
+ uint32_t smi_on_hostsystemerr_en : 1;/**< [ 4: 4](R/W) System-management interrupt on host-system error enable */
+ uint32_t reserved_5_12 : 8;
+ uint32_t smi_on_os_ownership_en : 1; /**< [ 13: 13](R/W) System-management interrupt on OS ownership enable. */
+ uint32_t smi_on_pci_command_en : 1; /**< [ 14: 14](R/W) System-management interrupt on PCI command enable. */
+ uint32_t smi_on_bar_en : 1; /**< [ 15: 15](R/W) System-management interrupt on BAR enable. */
+ uint32_t smi_on_event_interrupt : 1; /**< [ 16: 16](RO/H) System-management interrupt on event interrupt. Shadow bit of USBH()_UAHC_USBSTS[EINT].
+ Refer to
+ xHCI Section 5.4.2 for definition. This bit automatically clears when [EINT] clears and
+ sets when [EINT] sets. */
+ uint32_t reserved_17_19 : 3;
+ uint32_t smi_on_hostsystemerr : 1; /**< [ 20: 20](RO/H) System-management interrupt on host-system error. Shadow bit of USBH()_UAHC_USBSTS[HSE].
+ Refer to
+ xHCI Section 5.4.2 for definition and effects of the events associated with this bit being
+ set to 1.
+
+ To clear this bit to a 0, system software must write a 1 to USBH()_UAHC_USBSTS[HSE]. */
+ uint32_t reserved_21_28 : 8;
+ uint32_t smi_on_os_ownership : 1; /**< [ 29: 29](R/W1C/H) System management interrupt on OS ownership change. This bit is set to 1 whenever
+ USBH()_UAHC_USBLEGSUP[HC_OS_OWNED_SEMAPHORES] transitions. */
+ uint32_t smi_on_pci_command : 1; /**< [ 30: 30](R/W1C/H) System management interrupt on PCI command. Never generated. */
+ uint32_t smi_on_bar : 1; /**< [ 31: 31](R/W1C/H) System management interrupt on BAR. Never generated. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_usblegctlsts_s cn; */
+};
+typedef union bdk_usbhx_uahc_usblegctlsts bdk_usbhx_uahc_usblegctlsts_t;
+
+static inline uint64_t BDK_USBHX_UAHC_USBLEGCTLSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_USBLEGCTLSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000884ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_USBLEGCTLSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_USBLEGCTLSTS(a) bdk_usbhx_uahc_usblegctlsts_t
+#define bustype_BDK_USBHX_UAHC_USBLEGCTLSTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_USBLEGCTLSTS(a) "USBHX_UAHC_USBLEGCTLSTS"
+#define device_bar_BDK_USBHX_UAHC_USBLEGCTLSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_USBLEGCTLSTS(a) (a)
+#define arguments_BDK_USBHX_UAHC_USBLEGCTLSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_usblegsup
+ *
+ * XHCI Legacy Support Capability Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 7.1.1.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_usblegsup
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_usblegsup_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_25_31 : 7;
+ uint32_t hc_os_owned_semaphores : 1; /**< [ 24: 24](R/W) HC OS-owned semaphore. */
+ uint32_t reserved_17_23 : 7;
+ uint32_t hc_bios_owned_semaphores : 1;/**< [ 16: 16](R/W) HC BIOS-owned semaphore. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO) Next xHCI extended-capability pointer. */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = USB legacy support. */
+#else /* Word 0 - Little Endian */
+ uint32_t capid : 8; /**< [ 7: 0](RO) Capability ID = USB legacy support. */
+ uint32_t nextcapptr : 8; /**< [ 15: 8](RO) Next xHCI extended-capability pointer. */
+ uint32_t hc_bios_owned_semaphores : 1;/**< [ 16: 16](R/W) HC BIOS-owned semaphore. */
+ uint32_t reserved_17_23 : 7;
+ uint32_t hc_os_owned_semaphores : 1; /**< [ 24: 24](R/W) HC OS-owned semaphore. */
+ uint32_t reserved_25_31 : 7;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_usblegsup_s cn; */
+};
+typedef union bdk_usbhx_uahc_usblegsup bdk_usbhx_uahc_usblegsup_t;
+
+static inline uint64_t BDK_USBHX_UAHC_USBLEGSUP(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_USBLEGSUP(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000880ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_USBLEGSUP", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_USBLEGSUP(a) bdk_usbhx_uahc_usblegsup_t
+#define bustype_BDK_USBHX_UAHC_USBLEGSUP(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_USBLEGSUP(a) "USBHX_UAHC_USBLEGSUP"
+#define device_bar_BDK_USBHX_UAHC_USBLEGSUP(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_USBLEGSUP(a) (a)
+#define arguments_BDK_USBHX_UAHC_USBLEGSUP(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB32b) usbh#_uahc_usbsts
+ *
+ * XHCI Status Register
+ * For information on this register, refer to the xHCI Specification, v1.0, section 5.4.2.
+ *
+ * This register can be reset by NCB reset,
+ * or USBH()_UCTL_CTL[UAHC_RST],
+ * or USBH()_UAHC_GCTL[CORESOFTRESET],
+ * or USBH()_UAHC_USBCMD[HCRST], or USBH()_UAHC_USBCMD[LHCRST].
+ */
+union bdk_usbhx_uahc_usbsts
+{
+ uint32_t u;
+ struct bdk_usbhx_uahc_usbsts_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint32_t reserved_13_31 : 19;
+ uint32_t hce : 1; /**< [ 12: 12](RO/H) Host controller error. */
+ uint32_t cnr : 1; /**< [ 11: 11](RO/H) Controller not ready. */
+ uint32_t sre : 1; /**< [ 10: 10](R/W1C/H) Save/restore error. */
+ uint32_t rss : 1; /**< [ 9: 9](RO/H) Restore state status. */
+ uint32_t sss : 1; /**< [ 8: 8](RO/H) Save state status. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t pcd : 1; /**< [ 4: 4](R/W1C/H) Port change detect. */
+ uint32_t eint : 1; /**< [ 3: 3](R/W1C/H) Event interrupt. */
+ uint32_t hse : 1; /**< [ 2: 2](R/W1C/H) Host system error. The typical software response to an HSE is to reset the core. */
+ uint32_t reserved_1 : 1;
+ uint32_t hch : 1; /**< [ 0: 0](RO/H) HC halted. */
+#else /* Word 0 - Little Endian */
+ uint32_t hch : 1; /**< [ 0: 0](RO/H) HC halted. */
+ uint32_t reserved_1 : 1;
+ uint32_t hse : 1; /**< [ 2: 2](R/W1C/H) Host system error. The typical software response to an HSE is to reset the core. */
+ uint32_t eint : 1; /**< [ 3: 3](R/W1C/H) Event interrupt. */
+ uint32_t pcd : 1; /**< [ 4: 4](R/W1C/H) Port change detect. */
+ uint32_t reserved_5_7 : 3;
+ uint32_t sss : 1; /**< [ 8: 8](RO/H) Save state status. */
+ uint32_t rss : 1; /**< [ 9: 9](RO/H) Restore state status. */
+ uint32_t sre : 1; /**< [ 10: 10](R/W1C/H) Save/restore error. */
+ uint32_t cnr : 1; /**< [ 11: 11](RO/H) Controller not ready. */
+ uint32_t hce : 1; /**< [ 12: 12](RO/H) Host controller error. */
+ uint32_t reserved_13_31 : 19;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uahc_usbsts_s cn; */
+};
+typedef union bdk_usbhx_uahc_usbsts bdk_usbhx_uahc_usbsts_t;
+
+static inline uint64_t BDK_USBHX_UAHC_USBSTS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UAHC_USBSTS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000000024ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UAHC_USBSTS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UAHC_USBSTS(a) bdk_usbhx_uahc_usbsts_t
+#define bustype_BDK_USBHX_UAHC_USBSTS(a) BDK_CSR_TYPE_NCB32b
+#define basename_BDK_USBHX_UAHC_USBSTS(a) "USBHX_UAHC_USBSTS"
+#define device_bar_BDK_USBHX_UAHC_USBSTS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UAHC_USBSTS(a) (a)
+#define arguments_BDK_USBHX_UAHC_USBSTS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_bist_status
+ *
+ * UCTL BIST Status Register
+ * This register indicates the results from the built-in self-test (BIST) runs of USBH memories.
+ * A 0 indicates pass or never run, a 1 indicates fail. This register can be reset by NCB reset.
+ */
+union bdk_usbhx_uctl_bist_status
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_bist_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_42_63 : 22;
+ uint64_t uctl_xm_r_bist_ndone : 1; /**< [ 41: 41](RO/H) BIST is not complete for the UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_w_bist_ndone : 1; /**< [ 40: 40](RO/H) BIST is not complete for the UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_35_39 : 5;
+ uint64_t uahc_ram2_bist_ndone : 1; /**< [ 34: 34](RO/H) BIST is not complete for the UAHC RxFIFO RAM (RAM2). */
+ uint64_t uahc_ram1_bist_ndone : 1; /**< [ 33: 33](RO/H) BIST is not complete for the UAHC TxFIFO RAM (RAM1). */
+ uint64_t uahc_ram0_bist_ndone : 1; /**< [ 32: 32](RO/H) BIST is not complete for the UAHC descriptor/register cache (RAM0). */
+ uint64_t reserved_10_31 : 22;
+ uint64_t uctl_xm_r_bist_status : 1; /**< [ 9: 9](RO/H) BIST status of the UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_w_bist_status : 1; /**< [ 8: 8](RO/H) BIST status of the UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_3_7 : 5;
+ uint64_t uahc_ram2_bist_status : 1; /**< [ 2: 2](RO/H) BIST status of the UAHC RxFIFO RAM (RAM2). */
+ uint64_t uahc_ram1_bist_status : 1; /**< [ 1: 1](RO/H) BIST status of the UAHC TxFIFO RAM (RAM1). */
+ uint64_t uahc_ram0_bist_status : 1; /**< [ 0: 0](RO/H) BIST status of the UAHC descriptor/register cache (RAM0). */
+#else /* Word 0 - Little Endian */
+ uint64_t uahc_ram0_bist_status : 1; /**< [ 0: 0](RO/H) BIST status of the UAHC descriptor/register cache (RAM0). */
+ uint64_t uahc_ram1_bist_status : 1; /**< [ 1: 1](RO/H) BIST status of the UAHC TxFIFO RAM (RAM1). */
+ uint64_t uahc_ram2_bist_status : 1; /**< [ 2: 2](RO/H) BIST status of the UAHC RxFIFO RAM (RAM2). */
+ uint64_t reserved_3_7 : 5;
+ uint64_t uctl_xm_w_bist_status : 1; /**< [ 8: 8](RO/H) BIST status of the UCTL AxiMaster write-data FIFO. */
+ uint64_t uctl_xm_r_bist_status : 1; /**< [ 9: 9](RO/H) BIST status of the UCTL AxiMaster read-data FIFO. */
+ uint64_t reserved_10_31 : 22;
+ uint64_t uahc_ram0_bist_ndone : 1; /**< [ 32: 32](RO/H) BIST is not complete for the UAHC descriptor/register cache (RAM0). */
+ uint64_t uahc_ram1_bist_ndone : 1; /**< [ 33: 33](RO/H) BIST is not complete for the UAHC TxFIFO RAM (RAM1). */
+ uint64_t uahc_ram2_bist_ndone : 1; /**< [ 34: 34](RO/H) BIST is not complete for the UAHC RxFIFO RAM (RAM2). */
+ uint64_t reserved_35_39 : 5;
+ uint64_t uctl_xm_w_bist_ndone : 1; /**< [ 40: 40](RO/H) BIST is not complete for the UCTL AxiMaster write-data FIFO. */
+ uint64_t uctl_xm_r_bist_ndone : 1; /**< [ 41: 41](RO/H) BIST is not complete for the UCTL AxiMaster read-data FIFO. */
+ uint64_t reserved_42_63 : 22;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_bist_status_s cn; */
+};
+typedef union bdk_usbhx_uctl_bist_status bdk_usbhx_uctl_bist_status_t;
+
+static inline uint64_t BDK_USBHX_UCTL_BIST_STATUS(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_BIST_STATUS(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000100008ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_BIST_STATUS", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_BIST_STATUS(a) bdk_usbhx_uctl_bist_status_t
+#define bustype_BDK_USBHX_UCTL_BIST_STATUS(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_BIST_STATUS(a) "USBHX_UCTL_BIST_STATUS"
+#define device_bar_BDK_USBHX_UCTL_BIST_STATUS(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_BIST_STATUS(a) (a)
+#define arguments_BDK_USBHX_UCTL_BIST_STATUS(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_ctl
+ *
+ * UCTL Control Register
+ * This register controls clocks, resets, power, and BIST.
+ *
+ * This register can be reset by NCB reset.
+ */
+union bdk_usbhx_uctl_ctl
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_ctl_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. A BIST run with this bit set clears all entries in USBH RAMs
+ to 0x0.
+
+ There are two major modes of BIST: full and clear. Full BIST is run by the BIST state
+ machine when [CLEAR_BIST] is deasserted during BIST. Clear BIST is run if [CLEAR_BIST] is
+ asserted during BIST.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts the
+ [CLEAR_BIST] setting into the correct state and then perform another CSR write operation
+ to
+ set the BIST trigger (keeping the [CLEAR_BIST] state constant).
+ CLEAR BIST completion is indicated by USBH()_UCTL_BIST_STATUS. A BIST clear operation
+ takes almost 2,000 controller-clock cycles for the largest RAM. */
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Rising edge starts BIST on the memories in USBH.
+ To run BIST, the controller clock must be both configured and enabled, and should be
+ configured to the maximum available frequency given the available coprocessor clock and
+ dividers.
+ Also, the UCTL, UAHC, and UPHY should be held in software- initiated reset (using
+ [UPHY_RST], [UAHC_RST], [UCTL_RST]) until BIST is complete.
+ BIST defect status can be checked after FULL BIST completion, both of which are indicated
+ in USBH()_UCTL_BIST_STATUS. The full BIST run takes almost 80,000 controller-clock cycles
+ for
+ the largest RAM. */
+ uint64_t ref_clk_sel : 2; /**< [ 61: 60](R/W) Reference clock select. Choose reference-clock source for the SuperSpeed and high-speed
+ PLL blocks.
+ 0x0 = Reference clock source for both PLLs come from the USB pads.
+ 0x1 = Reserved.
+ 0x2 = Reserved.
+ 0x3 = Reserved.
+
+ This value can be changed only during UPHY_RST.
+
+ If REF_CLK_SEL = 0x0, then the reference clock input cannot be spread-spectrum.
+
+ Internal:
+ For the 0x1 selection, reference clock source for SuperSpeed PLL is from the USB
+ pads, reference clock source for HighSpeed PLL is PLL_REF_CLK. But in 78xx, PLL_REF_CLK
+ cannot be routed to USB without violating jitter requirements */
+ uint64_t ssc_en : 1; /**< [ 59: 59](R/W) Spread-spectrum clock enable. Enables spread-spectrum clock production in the SuperSpeed
+ function. If the input reference clock for the SuperSpeed PLL is already spread-spectrum,
+ then do not enable this feature. The clocks sourced to the SuperSpeed function must have
+ spread-spectrum to be compliant with the USB specification.
+ The high-speed PLL cannot support a spread-spectrum input, so REF_CLK_SEL = 0x0 must
+ enable this feature.
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t ssc_range : 3; /**< [ 58: 56](R/W) Spread-spectrum clock range. Selects the range of spread-spectrum modulation when SSC_EN
+ is asserted and the PHY is spreading the SuperSpeed transmit clocks.
+ Applies a fixed offset to the phase accumulator.
+ 0x0 = -4980 ppm downspread of clock.
+ 0x1 = -4492 ppm.
+ 0x2 = -4003 ppm.
+ 0x3-0x7 = reserved.
+
+ All of these settings are within the USB 3.0 specification. The amount of EMI emission
+ reduction might decrease as the [SSC_RANGE] increases; therefore, the [SSC_RANGE] settings
+ can
+ be registered to enable the amount of spreading to be adjusted on a per-application basis.
+ This value can be changed only during UPHY_RST. */
+ uint64_t ssc_ref_clk_sel : 9; /**< [ 55: 47](R/W) Enables non-standard oscillator frequencies to generate targeted MPLL output rates. Input
+ corresponds to the frequency-synthesis coefficient.
+
+ [55:53]: modulus - 1,
+ [52:47]: 2's complement push amount
+
+ Must leave at reset value of 0x0.
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t mpll_multiplier : 7; /**< [ 46: 40](R/W) Multiplies the reference clock to a frequency suitable for intended operating speed. Must
+ leave at reset value of 0x0. This value may only be changed during [UPHY_RST].
+ This value is superseded by the [REF_CLK_FSEL]\<5:3\> selection. */
+ uint64_t ref_ssp_en : 1; /**< [ 39: 39](R/W) Enables reference clock to the prescaler for SuperSpeed function. This should always be
+ enabled since this output clock is used to drive the UAHC suspend-mode clock during
+ low-power states.
+
+ This value can be changed only during UPHY_RST or during low-power states.
+ The reference clock must be running and stable before [UPHY_RST] is deasserted and before
+ [REF_SSP_EN] is asserted. */
+ uint64_t ref_clk_div2 : 1; /**< [ 38: 38](R/W) Divides the reference clock by 2 before feeding it into the REF_CLK_FSEL divider. Must
+ leave at reset value of 0x0.
+
+ This value can be changed only during UPHY_RST. */
+ uint64_t ref_clk_fsel : 6; /**< [ 37: 32](R/W) Selects the reference clock frequency for the SuperSpeed and high-speed PLL blocks. The
+ legal values are as follows:
+ 0x27 = External reference clock 100 MHz.
+
+ All other values are reserved.
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ 0x2A = External reference clock 24 MHz.
+ 0x31 = External reference clock 20 MHz.
+ 0x38 = External reference clock 19.2 MHz. */
+ uint64_t reserved_31 : 1;
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) Controller-clock enable. When set to 1, the controller clock is generated. This also
+ enables access to UCTL registers 0x30-0xF8. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the controller-clock divider.
+ 0 = Use the divided coprocessor clock from the H_CLKDIV divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the controller clock.
+ You must still set H_CLKDIV_EN separately. [H_CLK_BYP_SEL] select should not be changed
+ unless H_CLKDIV_EN is disabled.
+
+ The bypass clock can be selected and running even if the controller-clock dividers are not
+ running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) Controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) Controller clock-frequency-divider select. The controller-clock frequency is the
+ coprocessor-clock frequency divided by [H_CLKDIV_SEL] and must be at or below 300 MHz.
+ The divider values are the following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 4.
+ 0x3 = divide by 6.
+ 0x4 = divide by 8.
+ 0x5 = divide by 16.
+ 0x6 = divide by 24.
+ 0x7 = divide by 32.
+
+ For USB3:
+ * The controller-clock frequency must be at or above 125 MHz for any USB3 operation.
+ * The controller-clock frequency must be at or above
+ 150 MHz for full-rate USB3 operation.
+
+ For USB2:
+ * The controller-clock frequency must be at or above 62.5 MHz for any USB2 operation.
+ * The controller-clock frequency must be at or above
+ 90 MHz for full-rate USB2 operation.
+
+ This field can be changed only when [H_CLKDIV_RST] = 1.
+
+ Internal:
+ 150MHz is from the maximum of Synopsys DWC_usb3 Databook v2.50a, table A-16, row
+ 1, col 12. Synopsys DWC_usb3 Databook v2.50a, table A-17, row 7, col 9. Synopsys DWC_usb3
+ Databook v2.50a, table A-16, row 7, col 9. HOST2\>62.5MHz in HOST mode is from Synopsys
+ DWC_usb3 Databook v2.50a, section A.12.5, 3rd bullet in Note on page 894. HOST2\>90MHz was
+ arrived at from some math: 62.5MHz + (diff between row 1 and 2, col 12 of table A-16). */
+ uint64_t reserved_22_23 : 2;
+ uint64_t usb3_port_perm_attach : 1; /**< [ 21: 21](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t usb2_port_perm_attach : 1; /**< [ 20: 20](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t reserved_19 : 1;
+ uint64_t usb3_port_disable : 1; /**< [ 18: 18](R/W) Disables the USB3 (SuperSpeed) portion of this PHY. When set to 1, this signal stops
+ reporting connect/disconnect events on the port and keeps the port in disabled state. This
+ could be used for security reasons where hardware can disable a port regardless of whether
+ xHCI driver enables a port or not.
+ USBH()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should be modified only when [UPHY_RST] is asserted. */
+ uint64_t reserved_17 : 1;
+ uint64_t usb2_port_disable : 1; /**< [ 16: 16](R/W) Disables USB2 (high-speed/full-speed/low-speed) portion of this PHY. When set to 1, this
+ signal stops reporting connect/disconnect events on the port and keeps the port in
+ disabled state. This could be used for security reasons where hardware can disable a port
+ regardless of whether xHCI driver enables a port or not.
+ USBH()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted.
+ If Port0 is required to be disabled, ensure that the utmi_clk[0] is running at the normal
+ speed. Also, all the enabled USB2.0 ports should have the same clock frequency as Port0. */
+ uint64_t reserved_15 : 1;
+ uint64_t ss_power_en : 1; /**< [ 14: 14](R/W) PHY SuperSpeed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_13 : 1;
+ uint64_t hs_power_en : 1; /**< [ 12: 12](R/W) PHY high-speed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_5_11 : 7;
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the USB UCTL interface clock (coprocessor clock). This enables access to UAHC
+ and UCTL registers starting from 0x30. */
+ uint64_t reserved_3 : 1;
+ uint64_t uphy_rst : 1; /**< [ 2: 2](R/W) PHY reset; resets UPHY; active-high. */
+ uint64_t uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UAHC DMA and register shims. Resets UCTL registers 0x30-0xF8.
+ Does not reset UCTL registers 0x0-0x28.
+ UCTL registers starting from 0x30 can be accessed only after the controller clock is
+ active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and CIB protocols. */
+#else /* Word 0 - Little Endian */
+ uint64_t uctl_rst : 1; /**< [ 0: 0](R/W) Software reset; resets UCTL; active-high.
+ Resets UAHC DMA and register shims. Resets UCTL registers 0x30-0xF8.
+ Does not reset UCTL registers 0x0-0x28.
+ UCTL registers starting from 0x30 can be accessed only after the controller clock is
+ active and [UCTL_RST] is deasserted.
+
+ Internal:
+ Note that soft-resetting the UCTL while it is active may cause violations of
+ RSL, NCB, and CIB protocols. */
+ uint64_t uahc_rst : 1; /**< [ 1: 1](R/W) Software reset; resets UAHC; active-high.
+ Internal:
+ Note that soft-resetting the UAHC while it is active may cause violations of RSL
+ or NCB protocols. */
+ uint64_t uphy_rst : 1; /**< [ 2: 2](R/W) PHY reset; resets UPHY; active-high. */
+ uint64_t reserved_3 : 1;
+ uint64_t csclk_en : 1; /**< [ 4: 4](R/W) Turns on the USB UCTL interface clock (coprocessor clock). This enables access to UAHC
+ and UCTL registers starting from 0x30. */
+ uint64_t reserved_5_11 : 7;
+ uint64_t hs_power_en : 1; /**< [ 12: 12](R/W) PHY high-speed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_13 : 1;
+ uint64_t ss_power_en : 1; /**< [ 14: 14](R/W) PHY SuperSpeed block power enable.
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted. */
+ uint64_t reserved_15 : 1;
+ uint64_t usb2_port_disable : 1; /**< [ 16: 16](R/W) Disables USB2 (high-speed/full-speed/low-speed) portion of this PHY. When set to 1, this
+ signal stops reporting connect/disconnect events on the port and keeps the port in
+ disabled state. This could be used for security reasons where hardware can disable a port
+ regardless of whether xHCI driver enables a port or not.
+ USBH()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should only be modified when [UPHY_RST] is asserted.
+ If Port0 is required to be disabled, ensure that the utmi_clk[0] is running at the normal
+ speed. Also, all the enabled USB2.0 ports should have the same clock frequency as Port0. */
+ uint64_t reserved_17 : 1;
+ uint64_t usb3_port_disable : 1; /**< [ 18: 18](R/W) Disables the USB3 (SuperSpeed) portion of this PHY. When set to 1, this signal stops
+ reporting connect/disconnect events on the port and keeps the port in disabled state. This
+ could be used for security reasons where hardware can disable a port regardless of whether
+ xHCI driver enables a port or not.
+ USBH()_UAHC_HCSPARAMS1[MAXPORTS] is not affected by this signal.
+
+ This is a strap signal; it should be modified only when [UPHY_RST] is asserted. */
+ uint64_t reserved_19 : 1;
+ uint64_t usb2_port_perm_attach : 1; /**< [ 20: 20](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t usb3_port_perm_attach : 1; /**< [ 21: 21](R/W) Indicates this port is permanently attached. This is a strap signal; it should be modified
+ only when [UPHY_RST] is asserted. */
+ uint64_t reserved_22_23 : 2;
+ uint64_t h_clkdiv_sel : 3; /**< [ 26: 24](R/W) Controller clock-frequency-divider select. The controller-clock frequency is the
+ coprocessor-clock frequency divided by [H_CLKDIV_SEL] and must be at or below 300 MHz.
+ The divider values are the following:
+ 0x0 = divide by 1.
+ 0x1 = divide by 2.
+ 0x2 = divide by 4.
+ 0x3 = divide by 6.
+ 0x4 = divide by 8.
+ 0x5 = divide by 16.
+ 0x6 = divide by 24.
+ 0x7 = divide by 32.
+
+ For USB3:
+ * The controller-clock frequency must be at or above 125 MHz for any USB3 operation.
+ * The controller-clock frequency must be at or above
+ 150 MHz for full-rate USB3 operation.
+
+ For USB2:
+ * The controller-clock frequency must be at or above 62.5 MHz for any USB2 operation.
+ * The controller-clock frequency must be at or above
+ 90 MHz for full-rate USB2 operation.
+
+ This field can be changed only when [H_CLKDIV_RST] = 1.
+
+ Internal:
+ 150MHz is from the maximum of Synopsys DWC_usb3 Databook v2.50a, table A-16, row
+ 1, col 12. Synopsys DWC_usb3 Databook v2.50a, table A-17, row 7, col 9. Synopsys DWC_usb3
+ Databook v2.50a, table A-16, row 7, col 9. HOST2\>62.5MHz in HOST mode is from Synopsys
+ DWC_usb3 Databook v2.50a, section A.12.5, 3rd bullet in Note on page 894. HOST2\>90MHz was
+ arrived at from some math: 62.5MHz + (diff between row 1 and 2, col 12 of table A-16). */
+ uint64_t reserved_27 : 1;
+ uint64_t h_clkdiv_rst : 1; /**< [ 28: 28](R/W) Controller clock divider reset. Divided clocks are not generated while the divider is
+ being reset.
+ This also resets the suspend-clock divider. */
+ uint64_t h_clk_byp_sel : 1; /**< [ 29: 29](R/W) Select the bypass input to the controller-clock divider.
+ 0 = Use the divided coprocessor clock from the H_CLKDIV divider.
+ 1 = Use the bypass clock from the GPIO pins.
+
+ This signal is just a multiplexer-select signal; it does not enable the controller clock.
+ You must still set H_CLKDIV_EN separately. [H_CLK_BYP_SEL] select should not be changed
+ unless H_CLKDIV_EN is disabled.
+
+ The bypass clock can be selected and running even if the controller-clock dividers are not
+ running.
+
+ Internal:
+ Generally bypass is only used for scan purposes. */
+ uint64_t h_clk_en : 1; /**< [ 30: 30](R/W) Controller-clock enable. When set to 1, the controller clock is generated. This also
+ enables access to UCTL registers 0x30-0xF8. */
+ uint64_t reserved_31 : 1;
+ uint64_t ref_clk_fsel : 6; /**< [ 37: 32](R/W) Selects the reference clock frequency for the SuperSpeed and high-speed PLL blocks. The
+ legal values are as follows:
+ 0x27 = External reference clock 100 MHz.
+
+ All other values are reserved.
+ This value may only be changed during [UPHY_RST].
+
+ Internal:
+ 0x2A = External reference clock 24 MHz.
+ 0x31 = External reference clock 20 MHz.
+ 0x38 = External reference clock 19.2 MHz. */
+ uint64_t ref_clk_div2 : 1; /**< [ 38: 38](R/W) Divides the reference clock by 2 before feeding it into the REF_CLK_FSEL divider. Must
+ leave at reset value of 0x0.
+
+ This value can be changed only during UPHY_RST. */
+ uint64_t ref_ssp_en : 1; /**< [ 39: 39](R/W) Enables reference clock to the prescaler for SuperSpeed function. This should always be
+ enabled since this output clock is used to drive the UAHC suspend-mode clock during
+ low-power states.
+
+ This value can be changed only during UPHY_RST or during low-power states.
+ The reference clock must be running and stable before [UPHY_RST] is deasserted and before
+ [REF_SSP_EN] is asserted. */
+ uint64_t mpll_multiplier : 7; /**< [ 46: 40](R/W) Multiplies the reference clock to a frequency suitable for intended operating speed. Must
+ leave at reset value of 0x0. This value may only be changed during [UPHY_RST].
+ This value is superseded by the [REF_CLK_FSEL]\<5:3\> selection. */
+ uint64_t ssc_ref_clk_sel : 9; /**< [ 55: 47](R/W) Enables non-standard oscillator frequencies to generate targeted MPLL output rates. Input
+ corresponds to the frequency-synthesis coefficient.
+
+ [55:53]: modulus - 1,
+ [52:47]: 2's complement push amount
+
+ Must leave at reset value of 0x0.
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t ssc_range : 3; /**< [ 58: 56](R/W) Spread-spectrum clock range. Selects the range of spread-spectrum modulation when SSC_EN
+ is asserted and the PHY is spreading the SuperSpeed transmit clocks.
+ Applies a fixed offset to the phase accumulator.
+ 0x0 = -4980 ppm downspread of clock.
+ 0x1 = -4492 ppm.
+ 0x2 = -4003 ppm.
+ 0x3-0x7 = reserved.
+
+ All of these settings are within the USB 3.0 specification. The amount of EMI emission
+ reduction might decrease as the [SSC_RANGE] increases; therefore, the [SSC_RANGE] settings
+ can
+ be registered to enable the amount of spreading to be adjusted on a per-application basis.
+ This value can be changed only during UPHY_RST. */
+ uint64_t ssc_en : 1; /**< [ 59: 59](R/W) Spread-spectrum clock enable. Enables spread-spectrum clock production in the SuperSpeed
+ function. If the input reference clock for the SuperSpeed PLL is already spread-spectrum,
+ then do not enable this feature. The clocks sourced to the SuperSpeed function must have
+ spread-spectrum to be compliant with the USB specification.
+ The high-speed PLL cannot support a spread-spectrum input, so REF_CLK_SEL = 0x0 must
+ enable this feature.
+ This value may only be changed during [UPHY_RST]. */
+ uint64_t ref_clk_sel : 2; /**< [ 61: 60](R/W) Reference clock select. Choose reference-clock source for the SuperSpeed and high-speed
+ PLL blocks.
+ 0x0 = Reference clock source for both PLLs come from the USB pads.
+ 0x1 = Reserved.
+ 0x2 = Reserved.
+ 0x3 = Reserved.
+
+ This value can be changed only during UPHY_RST.
+
+ If REF_CLK_SEL = 0x0, then the reference clock input cannot be spread-spectrum.
+
+ Internal:
+ For the 0x1 selection, reference clock source for SuperSpeed PLL is from the USB
+ pads, reference clock source for HighSpeed PLL is PLL_REF_CLK. But in 78xx, PLL_REF_CLK
+ cannot be routed to USB without violating jitter requirements */
+ uint64_t start_bist : 1; /**< [ 62: 62](R/W) Rising edge starts BIST on the memories in USBH.
+ To run BIST, the controller clock must be both configured and enabled, and should be
+ configured to the maximum available frequency given the available coprocessor clock and
+ dividers.
+ Also, the UCTL, UAHC, and UPHY should be held in software- initiated reset (using
+ [UPHY_RST], [UAHC_RST], [UCTL_RST]) until BIST is complete.
+ BIST defect status can be checked after FULL BIST completion, both of which are indicated
+ in USBH()_UCTL_BIST_STATUS. The full BIST run takes almost 80,000 controller-clock cycles
+ for
+ the largest RAM. */
+ uint64_t clear_bist : 1; /**< [ 63: 63](R/W) BIST fast-clear mode select. A BIST run with this bit set clears all entries in USBH RAMs
+ to 0x0.
+
+ There are two major modes of BIST: full and clear. Full BIST is run by the BIST state
+ machine when [CLEAR_BIST] is deasserted during BIST. Clear BIST is run if [CLEAR_BIST] is
+ asserted during BIST.
+
+ To avoid race conditions, software must first perform a CSR write operation that puts the
+ [CLEAR_BIST] setting into the correct state and then perform another CSR write operation
+ to
+ set the BIST trigger (keeping the [CLEAR_BIST] state constant).
+ CLEAR BIST completion is indicated by USBH()_UCTL_BIST_STATUS. A BIST clear operation
+ takes almost 2,000 controller-clock cycles for the largest RAM. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_ctl_s cn; */
+};
+typedef union bdk_usbhx_uctl_ctl bdk_usbhx_uctl_ctl_t;
+
+static inline uint64_t BDK_USBHX_UCTL_CTL(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_CTL(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000100000ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_CTL", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_CTL(a) bdk_usbhx_uctl_ctl_t
+#define bustype_BDK_USBHX_UCTL_CTL(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_CTL(a) "USBHX_UCTL_CTL"
+#define device_bar_BDK_USBHX_UCTL_CTL(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_CTL(a) (a)
+#define arguments_BDK_USBHX_UCTL_CTL(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_ecc
+ *
+ * UCTL ECC Control Register
+ * This register can be used to disable ECC correction, insert ECC errors, and debug ECC
+ * failures.
+ * * The ECC_ERR* fields are captured when there are no outstanding ECC errors indicated in
+ * INTSTAT and a new ECC error arrives. Prioritization for multiple events occurring on the same
+ * cycle is indicated by the ECC_ERR_SOURCE enumeration: highest encoded value has highest
+ * priority.
+ * * The *ECC_*_DIS fields disable ECC correction; SBE and DBE errors are still reported. If
+ * *ECC_*_DIS = 0x1, then no data-correction occurs.
+ * * The *ECC_FLIP_SYND fields flip the syndrome\<1:0\> bits to generate single-bit/double-bit
+ * error for testing.
+ *
+ * 0x0 = Normal operation.
+ * 0x1 = SBE on bit[0].
+ * 0x2 = SBE on bit[1].
+ * 0x3 = DBE on bit[1:0].
+ *
+ * This register is accessible only when USB()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbhx_uctl_ecc
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_ecc_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t ecc_err_source : 4; /**< [ 59: 56](RO/H) Source of ECC error, see UCTL_ECC_ERR_SOURCE_E. */
+ uint64_t ecc_err_syndrome : 8; /**< [ 55: 48](RO/H) Syndrome bits of the ECC error. */
+ uint64_t ecc_err_address : 16; /**< [ 47: 32](RO/H) RAM address of the ECC error. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t uctl_xm_r_ecc_flip_synd : 2;/**< [ 20: 19](R/W) Insert ECC error for testing purposes. */
+ uint64_t uctl_xm_r_ecc_cor_dis : 1; /**< [ 18: 18](R/W) Enables ECC correction on UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_w_ecc_flip_synd : 2;/**< [ 17: 16](R/W) Insert ECC error for testing purposes. */
+ uint64_t uctl_xm_w_ecc_cor_dis : 1; /**< [ 15: 15](R/W) Enables ECC correction on UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_9_14 : 6;
+ uint64_t uahc_ram2_ecc_flip_synd : 2;/**< [ 8: 7](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_ram2_ecc_cor_dis : 1; /**< [ 6: 6](R/W) Enables ECC correction on UAHC RxFIFO RAMs (RAM2). */
+ uint64_t uahc_ram1_ecc_flip_synd : 2;/**< [ 5: 4](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_ram1_ecc_cor_dis : 1; /**< [ 3: 3](R/W) Enables ECC correction on UAHC TxFIFO RAMs (RAM1). */
+ uint64_t uahc_ram0_ecc_flip_synd : 2;/**< [ 2: 1](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_ram0_ecc_cor_dis : 1; /**< [ 0: 0](R/W) Enables ECC correction on UAHC Desc/Reg cache (RAM0). */
+#else /* Word 0 - Little Endian */
+ uint64_t uahc_ram0_ecc_cor_dis : 1; /**< [ 0: 0](R/W) Enables ECC correction on UAHC Desc/Reg cache (RAM0). */
+ uint64_t uahc_ram0_ecc_flip_synd : 2;/**< [ 2: 1](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_ram1_ecc_cor_dis : 1; /**< [ 3: 3](R/W) Enables ECC correction on UAHC TxFIFO RAMs (RAM1). */
+ uint64_t uahc_ram1_ecc_flip_synd : 2;/**< [ 5: 4](R/W) Insert ECC error for testing purposes. */
+ uint64_t uahc_ram2_ecc_cor_dis : 1; /**< [ 6: 6](R/W) Enables ECC correction on UAHC RxFIFO RAMs (RAM2). */
+ uint64_t uahc_ram2_ecc_flip_synd : 2;/**< [ 8: 7](R/W) Insert ECC error for testing purposes. */
+ uint64_t reserved_9_14 : 6;
+ uint64_t uctl_xm_w_ecc_cor_dis : 1; /**< [ 15: 15](R/W) Enables ECC correction on UCTL AxiMaster write-data FIFO. */
+ uint64_t uctl_xm_w_ecc_flip_synd : 2;/**< [ 17: 16](R/W) Insert ECC error for testing purposes. */
+ uint64_t uctl_xm_r_ecc_cor_dis : 1; /**< [ 18: 18](R/W) Enables ECC correction on UCTL AxiMaster read-data FIFO. */
+ uint64_t uctl_xm_r_ecc_flip_synd : 2;/**< [ 20: 19](R/W) Insert ECC error for testing purposes. */
+ uint64_t reserved_21_31 : 11;
+ uint64_t ecc_err_address : 16; /**< [ 47: 32](RO/H) RAM address of the ECC error. */
+ uint64_t ecc_err_syndrome : 8; /**< [ 55: 48](RO/H) Syndrome bits of the ECC error. */
+ uint64_t ecc_err_source : 4; /**< [ 59: 56](RO/H) Source of ECC error, see UCTL_ECC_ERR_SOURCE_E. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_ecc_s cn; */
+};
+typedef union bdk_usbhx_uctl_ecc bdk_usbhx_uctl_ecc_t;
+
+static inline uint64_t BDK_USBHX_UCTL_ECC(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_ECC(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x8680001000f0ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_ECC", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_ECC(a) bdk_usbhx_uctl_ecc_t
+#define bustype_BDK_USBHX_UCTL_ECC(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_ECC(a) "USBHX_UCTL_ECC"
+#define device_bar_BDK_USBHX_UCTL_ECC(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_ECC(a) (a)
+#define arguments_BDK_USBHX_UCTL_ECC(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_host_cfg
+ *
+ * UCTL Host Controller Configuration Register
+ * This register allows configuration of various host controller (UAHC) features. Most of these
+ * are strap signals and should be modified only while the controller is not running.
+ *
+ * This register is accessible only when USB()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbhx_uctl_host_cfg
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_host_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_60_63 : 4;
+ uint64_t host_current_belt : 12; /**< [ 59: 48](RO) This signal indicates the minimum value of all received BELT values and the BELT that is
+ set by the Set LTV command. */
+ uint64_t reserved_38_47 : 10;
+ uint64_t fla : 6; /**< [ 37: 32](R/W) High-speed jitter adjustment. Indicates the correction required to accommodate mac3 clock
+ and utmi clock jitter to measure 125 us duration. With FLA tied to 0x0, the high-speed
+ 125 us micro-frame is counted for 123933 ns. The value needs to be programmed in terms of
+ high-speed bit times in a 30 MHz cycle. Default value that needs to be driven is 0x20
+ (assuming 30 MHz perfect clock).
+
+ FLA connects to the FLADJ register defined in the xHCI spec in the PCI configuration
+ space. Each count is equal to 16 high-speed bit times. By default when this register is
+ set to 0x20, it gives 125 us interval. Now, based on the clock accuracy, you can decrement
+ the count or increment the count to get the 125 us uSOF window.
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t reserved_29_31 : 3;
+ uint64_t bme : 1; /**< [ 28: 28](R/W) Bus-master enable. This signal is used to disable the bus-mastering capability of the
+ host. Disabling this capability stalls DMA accesses. */
+ uint64_t oci_en : 1; /**< [ 27: 27](R/W) Overcurrent-indication enable. When enabled, OCI input to UAHC is taken from the GPIO
+ signals and sense-converted based on OCI_ACTIVE_HIGH_EN. The MIO GPIO multiplexer must be
+ programmed accordingly.
+
+ When disabled, OCI input to UAHC is forced to the correct inactive state based on
+ OCI_ACTIVE_HIGH_EN.
+
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t oci_active_high_en : 1; /**< [ 26: 26](R/W) Overcurrent sense selection. The off-chip sense (high/low) is converted to match the
+ controller's active-high sense.
+ 0 = Overcurrent indication from off-chip source is active-low.
+ 1 = Overcurrent indication from off-chip source is active-high.
+
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t ppc_en : 1; /**< [ 25: 25](R/W) Port-power-control enable.
+ 0 = USBH()_UAHC_HCCPARAMS[PPC] report port-power-control feature is unavailable.
+ 1 = USBH()_UAHC_HCCPARAMS[PPC] reports port-power-control feature is available. PPC output
+ from UAHC is taken to the GPIO signals and sense-converted based on PPC_ACTIVE_HIGH_EN.
+
+ The MIO GPIO multiplexer must be programmed accordingly.
+
+ This is a strap signal; it should only be modified when either the UCTL_CTL[UAHC] or
+ UAHC_GCTL[CoreSoftReset] is asserted. */
+ uint64_t ppc_active_high_en : 1; /**< [ 24: 24](R/W) Port power control sense selection. The active-high port-power-control output to off-chip
+ source is converted to match the off-chip sense.
+ 0 = Port-power control to off-chip source is active-low.
+ 1 = Port-power control to off-chip source is active-high.
+
+ This is a strap signal; it should only be modified when either the UCTL_CTL[UAHC] or
+ UAHC_GCTL[CoreSoftReset] is asserted. */
+ uint64_t reserved_0_23 : 24;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_23 : 24;
+ uint64_t ppc_active_high_en : 1; /**< [ 24: 24](R/W) Port power control sense selection. The active-high port-power-control output to off-chip
+ source is converted to match the off-chip sense.
+ 0 = Port-power control to off-chip source is active-low.
+ 1 = Port-power control to off-chip source is active-high.
+
+ This is a strap signal; it should only be modified when either the UCTL_CTL[UAHC] or
+ UAHC_GCTL[CoreSoftReset] is asserted. */
+ uint64_t ppc_en : 1; /**< [ 25: 25](R/W) Port-power-control enable.
+ 0 = USBH()_UAHC_HCCPARAMS[PPC] report port-power-control feature is unavailable.
+ 1 = USBH()_UAHC_HCCPARAMS[PPC] reports port-power-control feature is available. PPC output
+ from UAHC is taken to the GPIO signals and sense-converted based on PPC_ACTIVE_HIGH_EN.
+
+ The MIO GPIO multiplexer must be programmed accordingly.
+
+ This is a strap signal; it should only be modified when either the UCTL_CTL[UAHC] or
+ UAHC_GCTL[CoreSoftReset] is asserted. */
+ uint64_t oci_active_high_en : 1; /**< [ 26: 26](R/W) Overcurrent sense selection. The off-chip sense (high/low) is converted to match the
+ controller's active-high sense.
+ 0 = Overcurrent indication from off-chip source is active-low.
+ 1 = Overcurrent indication from off-chip source is active-high.
+
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t oci_en : 1; /**< [ 27: 27](R/W) Overcurrent-indication enable. When enabled, OCI input to UAHC is taken from the GPIO
+ signals and sense-converted based on OCI_ACTIVE_HIGH_EN. The MIO GPIO multiplexer must be
+ programmed accordingly.
+
+ When disabled, OCI input to UAHC is forced to the correct inactive state based on
+ OCI_ACTIVE_HIGH_EN.
+
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t bme : 1; /**< [ 28: 28](R/W) Bus-master enable. This signal is used to disable the bus-mastering capability of the
+ host. Disabling this capability stalls DMA accesses. */
+ uint64_t reserved_29_31 : 3;
+ uint64_t fla : 6; /**< [ 37: 32](R/W) High-speed jitter adjustment. Indicates the correction required to accommodate mac3 clock
+ and utmi clock jitter to measure 125 us duration. With FLA tied to 0x0, the high-speed
+ 125 us micro-frame is counted for 123933 ns. The value needs to be programmed in terms of
+ high-speed bit times in a 30 MHz cycle. Default value that needs to be driven is 0x20
+ (assuming 30 MHz perfect clock).
+
+ FLA connects to the FLADJ register defined in the xHCI spec in the PCI configuration
+ space. Each count is equal to 16 high-speed bit times. By default when this register is
+ set to 0x20, it gives 125 us interval. Now, based on the clock accuracy, you can decrement
+ the count or increment the count to get the 125 us uSOF window.
+ This is a strap signal; it should only be modified when UAHC is in reset (soft-reset
+ okay). */
+ uint64_t reserved_38_47 : 10;
+ uint64_t host_current_belt : 12; /**< [ 59: 48](RO) This signal indicates the minimum value of all received BELT values and the BELT that is
+ set by the Set LTV command. */
+ uint64_t reserved_60_63 : 4;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_host_cfg_s cn; */
+};
+typedef union bdk_usbhx_uctl_host_cfg bdk_usbhx_uctl_host_cfg_t;
+
+static inline uint64_t BDK_USBHX_UCTL_HOST_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_HOST_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x8680001000e0ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_HOST_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_HOST_CFG(a) bdk_usbhx_uctl_host_cfg_t
+#define bustype_BDK_USBHX_UCTL_HOST_CFG(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_HOST_CFG(a) "USBHX_UCTL_HOST_CFG"
+#define device_bar_BDK_USBHX_UCTL_HOST_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_HOST_CFG(a) (a)
+#define arguments_BDK_USBHX_UCTL_HOST_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_intena_w1c
+ *
+ * UCTL Interrupt Status Register
+ * This register clears interrupt enable bits.
+ */
+union bdk_usbhx_uctl_intena_w1c
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_intena_w1c_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Reads or clears enable for USBH(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_intena_w1c_s cn; */
+};
+typedef union bdk_usbhx_uctl_intena_w1c bdk_usbhx_uctl_intena_w1c_t;
+
+static inline uint64_t BDK_USBHX_UCTL_INTENA_W1C(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_INTENA_W1C(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000100040ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_INTENA_W1C", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_INTENA_W1C(a) bdk_usbhx_uctl_intena_w1c_t
+#define bustype_BDK_USBHX_UCTL_INTENA_W1C(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_INTENA_W1C(a) "USBHX_UCTL_INTENA_W1C"
+#define device_bar_BDK_USBHX_UCTL_INTENA_W1C(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_INTENA_W1C(a) (a)
+#define arguments_BDK_USBHX_UCTL_INTENA_W1C(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_intena_w1s
+ *
+ * UCTL Interrupt Status Register
+ * This register sets interrupt enable bits.
+ */
+union bdk_usbhx_uctl_intena_w1s
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_intena_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets enable for USBH(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_intena_w1s_s cn; */
+};
+typedef union bdk_usbhx_uctl_intena_w1s bdk_usbhx_uctl_intena_w1s_t;
+
+static inline uint64_t BDK_USBHX_UCTL_INTENA_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_INTENA_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000100048ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_INTENA_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_INTENA_W1S(a) bdk_usbhx_uctl_intena_w1s_t
+#define bustype_BDK_USBHX_UCTL_INTENA_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_INTENA_W1S(a) "USBHX_UCTL_INTENA_W1S"
+#define device_bar_BDK_USBHX_UCTL_INTENA_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_INTENA_W1S(a) (a)
+#define arguments_BDK_USBHX_UCTL_INTENA_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_intstat
+ *
+ * UCTL Interrupt Status Register
+ * This register provides a summary of interrupts. DBEs are detected and
+ * SBE are corrected. For debugging output for ECC DBEs/SBEs, see USBH()_UCTL_ECC. This register
+ * can
+ * be reset by NCB reset.
+ */
+union bdk_usbhx_uctl_intstat
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_intstat_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Detected double-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Detected single-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Detected double-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Detected single-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Detected double-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Detected single-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Detected double-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Detected single-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Detected double-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Detected single-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response error from NCBO */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response error from NCBO */
+ uint64_t reserved_3_5 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Detected bad DMA access from UAHC to NCB. Error information is logged in
+ USBH()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates the
+ assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see the description in
+ USBH()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE]. The hardware does not translate the request
+ correctly
+ and results may violate NCB protocols. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. Error information is logged in USBH()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1C/H) Detected out-of-bound register access to UAHC over NCB. The UAHC defines 1MB of register
+ space, starting at offset 0x0. Any accesses outside of this register space cause this bit
+ to be set to 1. Error information is logged in USBH()_UCTL_SHIM_CFG[XS_NCB_OOB_*]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1C/H) Detected bad DMA access from UAHC to NCB. Error information is logged in
+ USBH()_UCTL_SHIM_CFG[XM_BAD_DMA_*]. Received a DMA request from UAHC that violates the
+ assumptions made by the AXI-to-NCB shim. Such scenarios include: illegal length/size
+ combinations and address out-of-bounds.
+
+ For more information on exact failures, see the description in
+ USBH()_UCTL_SHIM_CFG[XM_BAD_DMA_TYPE]. The hardware does not translate the request
+ correctly
+ and results may violate NCB protocols. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1C/H) Received DMA write response error from NCBO */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1C/H) Received DMA read response error from NCBO */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1C/H) Detected single-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1C/H) Detected double-bit error on the UAHC Desc/Reg Cache (RAM0). */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1C/H) Detected single-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1C/H) Detected double-bit error on the UAHC TxFIFO RAMs (RAM1). */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1C/H) Detected single-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1C/H) Detected double-bit error on the UAHC RxFIFO RAMs (RAM2). */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1C/H) Detected single-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1C/H) Detected double-bit error on the UCTL AxiMaster write-data FIFO. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1C/H) Detected single-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1C/H) Detected double-bit error on the UCTL AxiMaster read-data FIFO. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_intstat_s cn; */
+};
+typedef union bdk_usbhx_uctl_intstat bdk_usbhx_uctl_intstat_t;
+
+static inline uint64_t BDK_USBHX_UCTL_INTSTAT(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_INTSTAT(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000100030ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_INTSTAT", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_INTSTAT(a) bdk_usbhx_uctl_intstat_t
+#define bustype_BDK_USBHX_UCTL_INTSTAT(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_INTSTAT(a) "USBHX_UCTL_INTSTAT"
+#define device_bar_BDK_USBHX_UCTL_INTSTAT(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_INTSTAT(a) (a)
+#define arguments_BDK_USBHX_UCTL_INTSTAT(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_intstat_w1s
+ *
+ * UCTL Interrupt Status Register
+ * This register sets interrupt bits.
+ */
+union bdk_usbhx_uctl_intstat_w1s
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_intstat_w1s_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_30_63 : 34;
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t reserved_0 : 1;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0 : 1;
+ uint64_t xs_ncb_oob : 1; /**< [ 1: 1](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XS_NCB_OOB]. */
+ uint64_t xm_bad_dma : 1; /**< [ 2: 2](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XM_BAD_DMA]. */
+ uint64_t reserved_3_5 : 3;
+ uint64_t dma_wr_err : 1; /**< [ 6: 6](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[DMA_WR_ERR]. */
+ uint64_t dma_rd_err : 1; /**< [ 7: 7](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[DMA_RD_ERR]. */
+ uint64_t reserved_8_15 : 8;
+ uint64_t ram0_sbe : 1; /**< [ 16: 16](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM0_SBE]. */
+ uint64_t ram0_dbe : 1; /**< [ 17: 17](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM0_DBE]. */
+ uint64_t ram1_sbe : 1; /**< [ 18: 18](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM1_SBE]. */
+ uint64_t ram1_dbe : 1; /**< [ 19: 19](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM1_DBE]. */
+ uint64_t ram2_sbe : 1; /**< [ 20: 20](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM2_SBE]. */
+ uint64_t ram2_dbe : 1; /**< [ 21: 21](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[RAM2_DBE]. */
+ uint64_t reserved_22_25 : 4;
+ uint64_t xm_w_sbe : 1; /**< [ 26: 26](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XM_W_SBE]. */
+ uint64_t xm_w_dbe : 1; /**< [ 27: 27](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XM_W_DBE]. */
+ uint64_t xm_r_sbe : 1; /**< [ 28: 28](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XM_R_SBE]. */
+ uint64_t xm_r_dbe : 1; /**< [ 29: 29](R/W1S/H) Reads or sets USBH(0..1)_UCTL_INTSTAT[XM_R_DBE]. */
+ uint64_t reserved_30_63 : 34;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_intstat_w1s_s cn; */
+};
+typedef union bdk_usbhx_uctl_intstat_w1s bdk_usbhx_uctl_intstat_w1s_t;
+
+static inline uint64_t BDK_USBHX_UCTL_INTSTAT_W1S(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_INTSTAT_W1S(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000100038ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_INTSTAT_W1S", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_INTSTAT_W1S(a) bdk_usbhx_uctl_intstat_w1s_t
+#define bustype_BDK_USBHX_UCTL_INTSTAT_W1S(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_INTSTAT_W1S(a) "USBHX_UCTL_INTSTAT_W1S"
+#define device_bar_BDK_USBHX_UCTL_INTSTAT_W1S(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_INTSTAT_W1S(a) (a)
+#define arguments_BDK_USBHX_UCTL_INTSTAT_W1S(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_port#_cfg_hs
+ *
+ * UCTL Port Configuration HighSpeed Register
+ * This register controls configuration and test controls for the HS port 0 PHY.
+ *
+ * This register is accessible only when USB()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset.
+ *
+ * Internal:
+ * INTERNAL: All these settings are for HS functionality, connect on DVDD power domain.
+ */
+union bdk_usbhx_uctl_portx_cfg_hs
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_portx_cfg_hs_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_58_63 : 6;
+ uint64_t comp_dis_tune : 3; /**< [ 57: 55](R/W) Disconnect threshold voltage. Adjusts the voltage level for the threshold used to detect a
+ disconnect event at the host.
+ A positive binary bit setting change results in a +1.5% incremental change in the
+ threshold voltage level, while a negative binary bit setting change results in a -1.5%
+ incremental change in the threshold voltage level. */
+ uint64_t sq_rx_tune : 3; /**< [ 54: 52](R/W) Squelch threshold adjustment. Adjusts the voltage level for the threshold used to detect
+ valid high-speed data.
+ A positive binary bit setting change results in a -5% incremental change in threshold
+ voltage level, while a negative binary bit setting change results in a +5% incremental
+ change in threshold voltage level. */
+ uint64_t tx_fsls_tune : 4; /**< [ 51: 48](R/W) Low-speed/full-speed source impedance adjustment. Adjusts the low- and full-speed single-
+ ended source impedance while driving high. This parameter control is encoded in
+ thermometer code.
+ A positive thermometer code change results in a -2.5% incremental change in source
+ impedance. A negative thermometer code change results in +2.5% incremental change in
+ source impedance. Any non-thermometer code setting (that is, 0x9) is not supported and
+ reserved. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t tx_hs_xv_tune : 2; /**< [ 45: 44](R/W) Transmitter high-speed crossover adjustment. This bus adjusts the voltage at which the DP0
+ and DM0 signals cross while transmitting in high-speed mode.
+ 0x3 = default setting.
+ 0x2 = +15 mV.
+ 0x1 = -15 mV.
+ 0x0 = reserved. */
+ uint64_t tx_preemp_amp_tune : 2; /**< [ 43: 42](R/W) High-speed transmitter preemphasis current control. Controls the amount of current
+ sourced to DP0 and DM0 after a J-to-K or K-to-J transition. The high-speed transmitter
+ preemphasis current is defined in terms of unit amounts. One unit amount is approximately
+ 600 A and is defined as 1* preemphasis current.
+ 0x0 = High-speed TX preemphasis is disabled.
+ 0x1 = High-speed TX preemphasis circuit sources 1* preemphasis current.
+ 0x2 = High-speed TX preemphasis circuit sources 2* preemphasis current.
+ 0x3 = High-speed TX preemphasis circuit sources 3* preemphasis current.
+
+ If these signals are not used, set them to 0x0. */
+ uint64_t reserved_41 : 1;
+ uint64_t tx_preemp_pulse_tune : 1; /**< [ 40: 40](R/W) High-speed transmitter preemphasis duration control. Controls the duration for which the
+ high-speed preemphasis current is sourced onto DP0 or DM0. The high-speed transmitter
+ preemphasis duration is defined in terms of unit amounts. One unit of preemphasis duration
+ is approximately 580 ps and is defined as 1* preemphasis duration. This signal is valid
+ only if either TX_PREEMP_AMP_TUNE0[1] or TX_PREEMP_AMP_TUNE0[0] is set to 1.
+ 0 = 2*, long preemphasis current duration (design default).
+ 1 = 1*, short preemphasis current duration.
+
+ If this signal is not used, set it to 0. */
+ uint64_t tx_res_tune : 2; /**< [ 39: 38](R/W) USB source-impedance adjustment. Some applications require additional devices to be added
+ on the USB, such as a series switch, which can add significant series resistance. This bus
+ adjusts the driver source impedance to compensate for added series resistance on the USB.
+ 0x0 = source impedance is decreased by approximately 1.5 ohm.
+ 0x1 = design default.
+ 0x2 = source impedance is decreased by approximately 2 ohm.
+ 0x3 = source impedance is decreased by approximately 4 ohm.
+
+ Any setting other than the default can result in source-impedance variation across
+ process, voltage, and temperature conditions that does not meet USB 2.0 specification
+ limits. If this bus is not used, leave it at the default setting. */
+ uint64_t tx_rise_tune : 2; /**< [ 37: 36](R/W) High-speed transmitter rise-/fall-time adjustment. Adjusts the rise/fall times of the
+ high-speed waveform. A positive binary bit setting change results in a -4% incremental
+ change in the high-speed rise/fall time. A negative binary bit setting change results in a
+ +4% incremental change in the high-speed rise/fall time. */
+ uint64_t tx_vref_tune : 4; /**< [ 35: 32](R/W) High-speed DC voltage-level adjustment. Adjusts the high-speed DC level voltage.
+ A positive binary-bit-setting change results in a +1.25% incremental change in high-speed
+ DC voltage level, while a negative binary-bit-setting change results in a -1.25%
+ incremental change in high-speed DC voltage level.
+
+ The default bit setting is intended to create a HighSpeed transmit
+ DC level of approximately 400mV. */
+ uint64_t reserved_4_31 : 28;
+ uint64_t vatest_enable : 2; /**< [ 3: 2](R/W) Analog test-pin select. Enables analog test voltages to be placed on the ID0 pin.
+ 0x0 = Test functionality disabled.
+ 0x1 = Test functionality enabled.
+ 0x2, 0x3 = Reserved, invalid settings.
+
+ See also the PHY databook for details on how to select which analog test voltage. */
+ uint64_t loopback_enable : 1; /**< [ 1: 1](R/W) Places the high-speed PHY in loopback mode, which concurrently enables high-speed receive
+ and transmit logic. */
+ uint64_t atereset : 1; /**< [ 0: 0](R/W) Per-PHY ATE reset. When the USB core is powered up (not in suspend mode), an automatic
+ tester can use this to disable PHYCLOCK and FREECLK, then re-enable them with an aligned
+ phase.
+ 0 = PHYCLOCK and FREECLK are available within a specific period after ATERESET is
+ deasserted.
+ 1 = PHYCLOCK and FREECLK outputs are disabled. */
+#else /* Word 0 - Little Endian */
+ uint64_t atereset : 1; /**< [ 0: 0](R/W) Per-PHY ATE reset. When the USB core is powered up (not in suspend mode), an automatic
+ tester can use this to disable PHYCLOCK and FREECLK, then re-enable them with an aligned
+ phase.
+ 0 = PHYCLOCK and FREECLK are available within a specific period after ATERESET is
+ deasserted.
+ 1 = PHYCLOCK and FREECLK outputs are disabled. */
+ uint64_t loopback_enable : 1; /**< [ 1: 1](R/W) Places the high-speed PHY in loopback mode, which concurrently enables high-speed receive
+ and transmit logic. */
+ uint64_t vatest_enable : 2; /**< [ 3: 2](R/W) Analog test-pin select. Enables analog test voltages to be placed on the ID0 pin.
+ 0x0 = Test functionality disabled.
+ 0x1 = Test functionality enabled.
+ 0x2, 0x3 = Reserved, invalid settings.
+
+ See also the PHY databook for details on how to select which analog test voltage. */
+ uint64_t reserved_4_31 : 28;
+ uint64_t tx_vref_tune : 4; /**< [ 35: 32](R/W) High-speed DC voltage-level adjustment. Adjusts the high-speed DC level voltage.
+ A positive binary-bit-setting change results in a +1.25% incremental change in high-speed
+ DC voltage level, while a negative binary-bit-setting change results in a -1.25%
+ incremental change in high-speed DC voltage level.
+
+ The default bit setting is intended to create a HighSpeed transmit
+ DC level of approximately 400mV. */
+ uint64_t tx_rise_tune : 2; /**< [ 37: 36](R/W) High-speed transmitter rise-/fall-time adjustment. Adjusts the rise/fall times of the
+ high-speed waveform. A positive binary bit setting change results in a -4% incremental
+ change in the high-speed rise/fall time. A negative binary bit setting change results in a
+ +4% incremental change in the high-speed rise/fall time. */
+ uint64_t tx_res_tune : 2; /**< [ 39: 38](R/W) USB source-impedance adjustment. Some applications require additional devices to be added
+ on the USB, such as a series switch, which can add significant series resistance. This bus
+ adjusts the driver source impedance to compensate for added series resistance on the USB.
+ 0x0 = source impedance is decreased by approximately 1.5 ohm.
+ 0x1 = design default.
+ 0x2 = source impedance is decreased by approximately 2 ohm.
+ 0x3 = source impedance is decreased by approximately 4 ohm.
+
+ Any setting other than the default can result in source-impedance variation across
+ process, voltage, and temperature conditions that does not meet USB 2.0 specification
+ limits. If this bus is not used, leave it at the default setting. */
+ uint64_t tx_preemp_pulse_tune : 1; /**< [ 40: 40](R/W) High-speed transmitter preemphasis duration control. Controls the duration for which the
+ high-speed preemphasis current is sourced onto DP0 or DM0. The high-speed transmitter
+ preemphasis duration is defined in terms of unit amounts. One unit of preemphasis duration
+ is approximately 580 ps and is defined as 1* preemphasis duration. This signal is valid
+ only if either TX_PREEMP_AMP_TUNE0[1] or TX_PREEMP_AMP_TUNE0[0] is set to 1.
+ 0 = 2*, long preemphasis current duration (design default).
+ 1 = 1*, short preemphasis current duration.
+
+ If this signal is not used, set it to 0. */
+ uint64_t reserved_41 : 1;
+ uint64_t tx_preemp_amp_tune : 2; /**< [ 43: 42](R/W) High-speed transmitter preemphasis current control. Controls the amount of current
+ sourced to DP0 and DM0 after a J-to-K or K-to-J transition. The high-speed transmitter
+ preemphasis current is defined in terms of unit amounts. One unit amount is approximately
+ 600 A and is defined as 1* preemphasis current.
+ 0x0 = High-speed TX preemphasis is disabled.
+ 0x1 = High-speed TX preemphasis circuit sources 1* preemphasis current.
+ 0x2 = High-speed TX preemphasis circuit sources 2* preemphasis current.
+ 0x3 = High-speed TX preemphasis circuit sources 3* preemphasis current.
+
+ If these signals are not used, set them to 0x0. */
+ uint64_t tx_hs_xv_tune : 2; /**< [ 45: 44](R/W) Transmitter high-speed crossover adjustment. This bus adjusts the voltage at which the DP0
+ and DM0 signals cross while transmitting in high-speed mode.
+ 0x3 = default setting.
+ 0x2 = +15 mV.
+ 0x1 = -15 mV.
+ 0x0 = reserved. */
+ uint64_t reserved_46_47 : 2;
+ uint64_t tx_fsls_tune : 4; /**< [ 51: 48](R/W) Low-speed/full-speed source impedance adjustment. Adjusts the low- and full-speed single-
+ ended source impedance while driving high. This parameter control is encoded in
+ thermometer code.
+ A positive thermometer code change results in a -2.5% incremental change in source
+ impedance. A negative thermometer code change results in +2.5% incremental change in
+ source impedance. Any non-thermometer code setting (that is, 0x9) is not supported and
+ reserved. */
+ uint64_t sq_rx_tune : 3; /**< [ 54: 52](R/W) Squelch threshold adjustment. Adjusts the voltage level for the threshold used to detect
+ valid high-speed data.
+ A positive binary bit setting change results in a -5% incremental change in threshold
+ voltage level, while a negative binary bit setting change results in a +5% incremental
+ change in threshold voltage level. */
+ uint64_t comp_dis_tune : 3; /**< [ 57: 55](R/W) Disconnect threshold voltage. Adjusts the voltage level for the threshold used to detect a
+ disconnect event at the host.
+ A positive binary bit setting change results in a +1.5% incremental change in the
+ threshold voltage level, while a negative binary bit setting change results in a -1.5%
+ incremental change in the threshold voltage level. */
+ uint64_t reserved_58_63 : 6;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_portx_cfg_hs_s cn; */
+};
+typedef union bdk_usbhx_uctl_portx_cfg_hs bdk_usbhx_uctl_portx_cfg_hs_t;
+
+static inline uint64_t BDK_USBHX_UCTL_PORTX_CFG_HS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_PORTX_CFG_HS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000100050ll + 0x1000000000ll * ((a) & 0x1) + 0x20ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UCTL_PORTX_CFG_HS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_PORTX_CFG_HS(a,b) bdk_usbhx_uctl_portx_cfg_hs_t
+#define bustype_BDK_USBHX_UCTL_PORTX_CFG_HS(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_PORTX_CFG_HS(a,b) "USBHX_UCTL_PORTX_CFG_HS"
+#define device_bar_BDK_USBHX_UCTL_PORTX_CFG_HS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_PORTX_CFG_HS(a,b) (a)
+#define arguments_BDK_USBHX_UCTL_PORTX_CFG_HS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_port#_cfg_ss
+ *
+ * UCTL Port Configuration SuperSpeed Register
+ * This register controls configuration and test controls for the SS port 0 PHY.
+ *
+ * This register is accessible only when USB()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UCTL_RST].
+ *
+ * Internal:
+ * All these settings are for high-speed functionality, connect on DVDD power domain.
+ */
+union bdk_usbhx_uctl_portx_cfg_ss
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_portx_cfg_ss_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t tx_vboost_lvl : 3; /**< [ 63: 61](R/W) TX voltage-boost level. Sets the boosted transmit launch amplitude (mVppd). The default
+ bit setting is intended to set the launch amplitude to approximately 1,008 mVppd. A
+ single, positive binary bit setting change results in a +156 mVppd change in the TX launch
+ amplitude.
+ A single, negative binary bit setting change results in a -156 mVppd change in the TX
+ launch amplitude. All settings more than one binary bit change should not be used.
+ 0x3 = 0.844 V launch amplitude.
+ 0x4 = 1.008 V launch amplitude.
+ 0x5 = 1.156 V launch amplitude.
+ All others values are invalid. */
+ uint64_t los_bias : 3; /**< [ 60: 58](R/W) Loss-of-signal detector threshold-level control. A positive, binary bit setting change
+ results in a +15 mVp incremental change in the LOS threshold.
+ A negative binary bit setting change results in a -15 mVp incremental change in the LOS
+ threshold. The 0x0 setting is reserved and must not be used. The default 0x5 setting
+ corresponds to approximately 105 mVp.
+ 0x0 = invalid.
+ 0x1 = 45 mV.
+ 0x2 = 60 mV.
+ 0x3 = 75 mV.
+ 0x4 = 90 mV.
+ 0x5 = 105 mV (default).
+ 0x6 = 120 mV.
+ 0x7 = 135 mV. */
+ uint64_t lane0_ext_pclk_req : 1; /**< [ 57: 57](R/W) When asserted, this signal enables the pipe0_pclk output regardless of power state (along
+ with the associated increase in power consumption). You can use this input to enable
+ pipe0_pclk in the P3 state without going through a complete boot sequence. */
+ uint64_t lane0_tx2rx_loopbk : 1; /**< [ 56: 56](R/W) When asserted, data from TX predriver is looped back to RX slicers. LOS is bypassed and
+ based on the tx0_en input so that rx0_los = !tx_data_en. */
+ uint64_t reserved_42_55 : 14;
+ uint64_t pcs_rx_los_mask_val : 10; /**< [ 41: 32](R/W) Configurable loss-of-signal mask width. Sets the number of reference clock cycles to mask
+ the incoming LFPS in U3 and U2 states. Masks the incoming LFPS for the number of reference
+ clock cycles equal to the value of pcs_rx_los_mask_val\<9:0\>. This control filters out
+ short, non-compliant LFPS glitches sent by a noncompliant host.
+
+ For normal operation, set to a targeted mask interval of 10us (value = 10us / Tref_clk).
+ If the USBH()_UCTL_CTL[REF_CLK_DIV2] is used, then
+ (value = 10us / (2 * Tref_clk)). These equations are based on the SuperSpeed reference
+ clock frequency. The value of PCS_RX_LOS_MASK_VAL should be as follows:
+
+ \<pre\>
+ Frequency DIV2 LOS_MASK
+ --------- --- --------
+ 200 MHz 1 0x3E8
+ 125 MHz 0 0x4E2
+ 104 MHz 0 0x410
+ 100 MHz 0 0x3E8
+ 96 MHz 0 0x3C0
+ 76.8 MHz 1 0x180
+ 52 MHz 0 0x208
+ 50 MHz 0 0x1F4
+ 48 MHz 0 0x1E0
+ 40 MHz 1 0x0C8
+ 38.4 MHz 0 0x180
+ 26 MHz 0 0x104
+ 25 MHz 0 0x0FA
+ 24 MHz 0 0x0F0
+ 20 MHz 0 0x0C8
+ 19.2 MHz 0 0x0C0
+ \</pre\>
+
+ Setting this bus to 0x0 disables masking. The value should be defined when the PHY is in
+ reset. Changing this value during operation might disrupt normal operation of the link. */
+ uint64_t pcs_tx_deemph_3p5db : 6; /**< [ 31: 26](R/W) Fine-tune transmitter driver deemphasis when set to 3.5db.
+ This static value sets the TX driver deemphasis value when
+ USBH()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] is set to
+ 0x1 (according to the PIPE3 specification). The values for transmit deemphasis are derived
+ from the following equation:
+
+ _ TX de-emphasis (db) = 20 * log_base_10((128 - 2 * pcs_tx_deemph)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBH()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t pcs_tx_deemph_6db : 6; /**< [ 25: 20](R/W) Fine-tune transmitter driver deemphasis when set to 6 db.
+ This static value sets the TX driver deemphasis value when
+ USBH()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] is set to
+ 0x2 (according to the PIPE3 specification). This bus is provided for completeness and as a
+ second potential launch amplitude. The values for transmit deemphasis are derived from the
+ following equation:
+
+ _ TX deemphasis (db) = 20 * log_base_10((128 - 2 * pcs_tx_deemph)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBH()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t pcs_tx_swing_full : 7; /**< [ 19: 13](R/W) Launch amplitude of the transmitter. Sets the launch amplitude of the transmitter. The
+ values for transmit amplitude are derived from the following equation:
+ TX amplitude (V) = vptx * ((pcs_tx_swing_full + 1)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBH()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t lane0_tx_term_offset : 5; /**< [ 12: 8](R/W) Transmitter termination offset. Reserved, set to 0x0. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t res_tune_ack : 1; /**< [ 5: 5](RO/H) Resistor tune acknowledge. While asserted, indicates a resistor tune is in progress. */
+ uint64_t res_tune_req : 1; /**< [ 4: 4](R/W) Resistor tune request. The rising edge triggers a resistor tune request (if one is not
+ already in progress). When asserted, [RES_TUNE_ACK] is asserted high until calibration of
+ the termination impedance is complete.
+ Tuning disrupts the normal flow of data; therefore, assert [RES_TUNE_REQ] only when the
+ PHY
+ is inactive. The PHY automatically performs a tune when coming out of PRST. */
+ uint64_t reserved_0_3 : 4;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_3 : 4;
+ uint64_t res_tune_req : 1; /**< [ 4: 4](R/W) Resistor tune request. The rising edge triggers a resistor tune request (if one is not
+ already in progress). When asserted, [RES_TUNE_ACK] is asserted high until calibration of
+ the termination impedance is complete.
+ Tuning disrupts the normal flow of data; therefore, assert [RES_TUNE_REQ] only when the
+ PHY
+ is inactive. The PHY automatically performs a tune when coming out of PRST. */
+ uint64_t res_tune_ack : 1; /**< [ 5: 5](RO/H) Resistor tune acknowledge. While asserted, indicates a resistor tune is in progress. */
+ uint64_t reserved_6_7 : 2;
+ uint64_t lane0_tx_term_offset : 5; /**< [ 12: 8](R/W) Transmitter termination offset. Reserved, set to 0x0. */
+ uint64_t pcs_tx_swing_full : 7; /**< [ 19: 13](R/W) Launch amplitude of the transmitter. Sets the launch amplitude of the transmitter. The
+ values for transmit amplitude are derived from the following equation:
+ TX amplitude (V) = vptx * ((pcs_tx_swing_full + 1)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBH()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t pcs_tx_deemph_6db : 6; /**< [ 25: 20](R/W) Fine-tune transmitter driver deemphasis when set to 6 db.
+ This static value sets the TX driver deemphasis value when
+ USBH()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] is set to
+ 0x2 (according to the PIPE3 specification). This bus is provided for completeness and as a
+ second potential launch amplitude. The values for transmit deemphasis are derived from the
+ following equation:
+
+ _ TX deemphasis (db) = 20 * log_base_10((128 - 2 * pcs_tx_deemph)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBH()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t pcs_tx_deemph_3p5db : 6; /**< [ 31: 26](R/W) Fine-tune transmitter driver deemphasis when set to 3.5db.
+ This static value sets the TX driver deemphasis value when
+ USBH()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] is set to
+ 0x1 (according to the PIPE3 specification). The values for transmit deemphasis are derived
+ from the following equation:
+
+ _ TX de-emphasis (db) = 20 * log_base_10((128 - 2 * pcs_tx_deemph)/128)
+
+ In general, the parameter controls are static signals to be set prior to taking the PHY
+ out of reset. However, you can dynamically change these values on-the-fly for test
+ purposes. In this case, changes to the transmitter to reflect the current value occur only
+ after USBH()_UAHC_GUSB3PIPECTL()[TXDEEMPHASIS] changes.
+
+ Internal:
+ Default value is package dependant. */
+ uint64_t pcs_rx_los_mask_val : 10; /**< [ 41: 32](R/W) Configurable loss-of-signal mask width. Sets the number of reference clock cycles to mask
+ the incoming LFPS in U3 and U2 states. Masks the incoming LFPS for the number of reference
+ clock cycles equal to the value of pcs_rx_los_mask_val\<9:0\>. This control filters out
+ short, non-compliant LFPS glitches sent by a noncompliant host.
+
+ For normal operation, set to a targeted mask interval of 10us (value = 10us / Tref_clk).
+ If the USBH()_UCTL_CTL[REF_CLK_DIV2] is used, then
+ (value = 10us / (2 * Tref_clk)). These equations are based on the SuperSpeed reference
+ clock frequency. The value of PCS_RX_LOS_MASK_VAL should be as follows:
+
+ \<pre\>
+ Frequency DIV2 LOS_MASK
+ --------- --- --------
+ 200 MHz 1 0x3E8
+ 125 MHz 0 0x4E2
+ 104 MHz 0 0x410
+ 100 MHz 0 0x3E8
+ 96 MHz 0 0x3C0
+ 76.8 MHz 1 0x180
+ 52 MHz 0 0x208
+ 50 MHz 0 0x1F4
+ 48 MHz 0 0x1E0
+ 40 MHz 1 0x0C8
+ 38.4 MHz 0 0x180
+ 26 MHz 0 0x104
+ 25 MHz 0 0x0FA
+ 24 MHz 0 0x0F0
+ 20 MHz 0 0x0C8
+ 19.2 MHz 0 0x0C0
+ \</pre\>
+
+ Setting this bus to 0x0 disables masking. The value should be defined when the PHY is in
+ reset. Changing this value during operation might disrupt normal operation of the link. */
+ uint64_t reserved_42_55 : 14;
+ uint64_t lane0_tx2rx_loopbk : 1; /**< [ 56: 56](R/W) When asserted, data from TX predriver is looped back to RX slicers. LOS is bypassed and
+ based on the tx0_en input so that rx0_los = !tx_data_en. */
+ uint64_t lane0_ext_pclk_req : 1; /**< [ 57: 57](R/W) When asserted, this signal enables the pipe0_pclk output regardless of power state (along
+ with the associated increase in power consumption). You can use this input to enable
+ pipe0_pclk in the P3 state without going through a complete boot sequence. */
+ uint64_t los_bias : 3; /**< [ 60: 58](R/W) Loss-of-signal detector threshold-level control. A positive, binary bit setting change
+ results in a +15 mVp incremental change in the LOS threshold.
+ A negative binary bit setting change results in a -15 mVp incremental change in the LOS
+ threshold. The 0x0 setting is reserved and must not be used. The default 0x5 setting
+ corresponds to approximately 105 mVp.
+ 0x0 = invalid.
+ 0x1 = 45 mV.
+ 0x2 = 60 mV.
+ 0x3 = 75 mV.
+ 0x4 = 90 mV.
+ 0x5 = 105 mV (default).
+ 0x6 = 120 mV.
+ 0x7 = 135 mV. */
+ uint64_t tx_vboost_lvl : 3; /**< [ 63: 61](R/W) TX voltage-boost level. Sets the boosted transmit launch amplitude (mVppd). The default
+ bit setting is intended to set the launch amplitude to approximately 1,008 mVppd. A
+ single, positive binary bit setting change results in a +156 mVppd change in the TX launch
+ amplitude.
+ A single, negative binary bit setting change results in a -156 mVppd change in the TX
+ launch amplitude. All settings more than one binary bit change should not be used.
+ 0x3 = 0.844 V launch amplitude.
+ 0x4 = 1.008 V launch amplitude.
+ 0x5 = 1.156 V launch amplitude.
+ All others values are invalid. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_portx_cfg_ss_s cn; */
+};
+typedef union bdk_usbhx_uctl_portx_cfg_ss bdk_usbhx_uctl_portx_cfg_ss_t;
+
+static inline uint64_t BDK_USBHX_UCTL_PORTX_CFG_SS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_PORTX_CFG_SS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000100058ll + 0x1000000000ll * ((a) & 0x1) + 0x20ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UCTL_PORTX_CFG_SS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_PORTX_CFG_SS(a,b) bdk_usbhx_uctl_portx_cfg_ss_t
+#define bustype_BDK_USBHX_UCTL_PORTX_CFG_SS(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_PORTX_CFG_SS(a,b) "USBHX_UCTL_PORTX_CFG_SS"
+#define device_bar_BDK_USBHX_UCTL_PORTX_CFG_SS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_PORTX_CFG_SS(a,b) (a)
+#define arguments_BDK_USBHX_UCTL_PORTX_CFG_SS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_port#_cr_dbg_cfg
+ *
+ * UCTL Port Debug Configuration Register
+ * This register allows indirect access to the configuration and test controls for the port 0
+ * PHY.
+ *
+ * This register is accessible only when USB()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UCTL_RST].
+ *
+ * Internal:
+ * (In body of HRM)
+ * To access the PHY registers indirectly through the CR interface, the HCLK must be running,
+ * UCTL_RST must be deasserted, and UPHY_RST must be deasserted. Software is responsible for
+ * ensuring that only one indirect access is ongoing at a time.
+ *
+ * To read a PHY register via indirect CR interface:
+ * 1. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the \<\<address\>\> of the register,
+ * * [CAP_ADDR], [CAP_DATA], [READ], and [WRITE] fields 0x0.
+ * 2. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the \<\<address\>\> of the register,
+ * * [CAP_ADDR] field 0x1,
+ * * [CAP_DATA], [READ], and [WRITE] fields 0x0.
+ * 3. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x1.
+ * 4. Write UCTL_PORTn_CR_DBG_CFG with all 0x0's.
+ * 5. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x0.
+ * 6. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [READ] field 0x1,
+ * * [DATA_IN], [CAP_ADDR], [CAP_DATA], and [WRITE] fields 0x0.
+ * 7. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x1.
+ * 8. Read UCTL_PORTn_CR_DBG_STATUS[DATA_OUT]. This is the \<\<read data\>\>.
+ * 9. Write UCTL_PORTn_CR_DBG_CFG with all 0x0's.
+ * 10. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x0.
+ *
+ * To write a PHY register via indirect CR interface:
+ * 1. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the \<\<address\>\> of the register,
+ * * [CAP_ADDR], [CAP_DATA], [READ], and [WRITE] fields 0x0.
+ * 2. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the \<\<address\>\> of the register,
+ * * [CAP_ADDR] field 0x1,
+ * * [CAP_DATA], [READ], and [WRITE] fields 0x0.
+ * 3. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x1.
+ * 4. Write UCTL_PORTn_CR_DBG_CFG with all 0x0's.
+ * 5. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x0.
+ * 6. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the \<\<write data\>\>,
+ * * [CAP_ADDR], [CAP_DATA], [READ], and [WRITE] fields 0x0.
+ * 7. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [DATA_IN] with the write data,
+ * * [CAP_DATA] field 0x1,
+ * * [CAP_ADDR], [READ], and [WRITE] fields 0x0.
+ * 8. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x1.
+ * 9. Write UCTL_PORTn_CR_DBG_CFG with all 0x0's.
+ * 10. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x0.
+ * 11. Write UCTL_PORTn_CR_DBG_CFG with:
+ * * [WRITE] field 0x1,
+ * * [DATA_IN], [CAP_ADDR], and [READ] fields 0x0.
+ * 12. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x1.
+ * 13. Write UCTL_PORTn_CR_DBG_CFG with all 0x0's.
+ * 14. Poll for UCTL_PORTn_CR_DBG_STATUS[ACK] 0x0.
+ *
+ * For partial writes, a read-modify write is required. Note that the CAP_ADDR steps (1-5)
+ * do not have to be repeated until the address needs changed.
+ */
+union bdk_usbhx_uctl_portx_cr_dbg_cfg
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_portx_cr_dbg_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t data_in : 16; /**< [ 47: 32](R/W) Address or data to be written to the CR interface. */
+ uint64_t reserved_4_31 : 28;
+ uint64_t cap_addr : 1; /**< [ 3: 3](R/W) Rising edge triggers the [DATA_IN] field to be captured as the address. */
+ uint64_t cap_data : 1; /**< [ 2: 2](R/W) Rising edge triggers the [DATA_IN] field to be captured as the write data. */
+ uint64_t read : 1; /**< [ 1: 1](R/W) Rising edge triggers a register read operation of the captured address. */
+ uint64_t write : 1; /**< [ 0: 0](R/W) Rising edge triggers a register write operation of the captured address with the captured data. */
+#else /* Word 0 - Little Endian */
+ uint64_t write : 1; /**< [ 0: 0](R/W) Rising edge triggers a register write operation of the captured address with the captured data. */
+ uint64_t read : 1; /**< [ 1: 1](R/W) Rising edge triggers a register read operation of the captured address. */
+ uint64_t cap_data : 1; /**< [ 2: 2](R/W) Rising edge triggers the [DATA_IN] field to be captured as the write data. */
+ uint64_t cap_addr : 1; /**< [ 3: 3](R/W) Rising edge triggers the [DATA_IN] field to be captured as the address. */
+ uint64_t reserved_4_31 : 28;
+ uint64_t data_in : 16; /**< [ 47: 32](R/W) Address or data to be written to the CR interface. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_portx_cr_dbg_cfg_s cn; */
+};
+typedef union bdk_usbhx_uctl_portx_cr_dbg_cfg bdk_usbhx_uctl_portx_cr_dbg_cfg_t;
+
+static inline uint64_t BDK_USBHX_UCTL_PORTX_CR_DBG_CFG(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_PORTX_CR_DBG_CFG(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000100060ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UCTL_PORTX_CR_DBG_CFG", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_PORTX_CR_DBG_CFG(a,b) bdk_usbhx_uctl_portx_cr_dbg_cfg_t
+#define bustype_BDK_USBHX_UCTL_PORTX_CR_DBG_CFG(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_PORTX_CR_DBG_CFG(a,b) "USBHX_UCTL_PORTX_CR_DBG_CFG"
+#define device_bar_BDK_USBHX_UCTL_PORTX_CR_DBG_CFG(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_PORTX_CR_DBG_CFG(a,b) (a)
+#define arguments_BDK_USBHX_UCTL_PORTX_CR_DBG_CFG(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_port#_cr_dbg_status
+ *
+ * UCTL Port Debug Status Register
+ * This register allows indirect access to the configuration and test controls for the port 0
+ * PHY.
+ *
+ * This register is accessible only when USB()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbhx_uctl_portx_cr_dbg_status
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_portx_cr_dbg_status_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t reserved_48_63 : 16;
+ uint64_t data_out : 16; /**< [ 47: 32](RO/H) Last data read from the CR interface. */
+ uint64_t reserved_1_31 : 31;
+ uint64_t ack : 1; /**< [ 0: 0](RO/H) Acknowledge that the CAP_ADDR, CAP_DATA, READ, WRITE commands have completed. */
+#else /* Word 0 - Little Endian */
+ uint64_t ack : 1; /**< [ 0: 0](RO/H) Acknowledge that the CAP_ADDR, CAP_DATA, READ, WRITE commands have completed. */
+ uint64_t reserved_1_31 : 31;
+ uint64_t data_out : 16; /**< [ 47: 32](RO/H) Last data read from the CR interface. */
+ uint64_t reserved_48_63 : 16;
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_portx_cr_dbg_status_s cn; */
+};
+typedef union bdk_usbhx_uctl_portx_cr_dbg_status bdk_usbhx_uctl_portx_cr_dbg_status_t;
+
+static inline uint64_t BDK_USBHX_UCTL_PORTX_CR_DBG_STATUS(unsigned long a, unsigned long b) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_PORTX_CR_DBG_STATUS(unsigned long a, unsigned long b)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && ((a<=1) && (b==0)))
+ return 0x868000100068ll + 0x1000000000ll * ((a) & 0x1) + 0ll * ((b) & 0x0);
+ __bdk_csr_fatal("USBHX_UCTL_PORTX_CR_DBG_STATUS", 2, a, b, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_PORTX_CR_DBG_STATUS(a,b) bdk_usbhx_uctl_portx_cr_dbg_status_t
+#define bustype_BDK_USBHX_UCTL_PORTX_CR_DBG_STATUS(a,b) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_PORTX_CR_DBG_STATUS(a,b) "USBHX_UCTL_PORTX_CR_DBG_STATUS"
+#define device_bar_BDK_USBHX_UCTL_PORTX_CR_DBG_STATUS(a,b) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_PORTX_CR_DBG_STATUS(a,b) (a)
+#define arguments_BDK_USBHX_UCTL_PORTX_CR_DBG_STATUS(a,b) (a),(b),-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_shim_cfg
+ *
+ * UCTL Shim Configuration Register
+ * This register allows configuration of various shim (UCTL) features. The fields XS_NCB_OOB_*
+ * are captured when there are no outstanding OOB errors indicated in INTSTAT and a new OOB error
+ * arrives. The fields XS_BAD_DMA_* are captured when there are no outstanding DMA errors
+ * indicated in INTSTAT and a new DMA error arrives.
+ *
+ * This register is accessible only when USB()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbhx_uctl_shim_cfg
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_shim_cfg_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t xs_ncb_oob_wrn : 1; /**< [ 63: 63](RO/H) Read/write error log for out-of-bound UAHC register access.
+ 0 = read, 1 = write. */
+ uint64_t reserved_60_62 : 3;
+ uint64_t xs_ncb_oob_osrc : 12; /**< [ 59: 48](RO/H) SRCID error log for out-of-bound UAHC register access. The NCB outbound SRCID for the OOB
+ error.
+ \<59:58\> = chipID.
+ \<57\> = Request source: 0 = core, 1 = NCB-device.
+ \<56:51\> = Core/NCB-device number. Note that for NCB devices, \<56\> is always 0.
+ \<50:48\> = SubID. */
+ uint64_t xm_bad_dma_wrn : 1; /**< [ 47: 47](RO/H) Read/write error log for bad DMA access from UAHC.
+ 0 = read error log, 1 = write error log. */
+ uint64_t reserved_44_46 : 3;
+ uint64_t xm_bad_dma_type : 4; /**< [ 43: 40](RO/H) ErrType error log for bad DMA access from UAHC. Encodes the type of error encountered
+ (error largest encoded value has priority). See UCTL_XM_BAD_DMA_TYPE_E. */
+ uint64_t reserved_14_39 : 26;
+ uint64_t dma_read_cmd : 2; /**< [ 13: 12](R/W) Selects the NCB read command used by DMA accesses. See UCTL_DMA_READ_CMD_E. */
+ uint64_t reserved_11 : 1;
+ uint64_t dma_write_cmd : 1; /**< [ 10: 10](R/W) Selects the NCB write command used by DMA accesses. See UCTL_DMA_WRITE_CMD_E. */
+ uint64_t reserved_0_9 : 10;
+#else /* Word 0 - Little Endian */
+ uint64_t reserved_0_9 : 10;
+ uint64_t dma_write_cmd : 1; /**< [ 10: 10](R/W) Selects the NCB write command used by DMA accesses. See UCTL_DMA_WRITE_CMD_E. */
+ uint64_t reserved_11 : 1;
+ uint64_t dma_read_cmd : 2; /**< [ 13: 12](R/W) Selects the NCB read command used by DMA accesses. See UCTL_DMA_READ_CMD_E. */
+ uint64_t reserved_14_39 : 26;
+ uint64_t xm_bad_dma_type : 4; /**< [ 43: 40](RO/H) ErrType error log for bad DMA access from UAHC. Encodes the type of error encountered
+ (error largest encoded value has priority). See UCTL_XM_BAD_DMA_TYPE_E. */
+ uint64_t reserved_44_46 : 3;
+ uint64_t xm_bad_dma_wrn : 1; /**< [ 47: 47](RO/H) Read/write error log for bad DMA access from UAHC.
+ 0 = read error log, 1 = write error log. */
+ uint64_t xs_ncb_oob_osrc : 12; /**< [ 59: 48](RO/H) SRCID error log for out-of-bound UAHC register access. The NCB outbound SRCID for the OOB
+ error.
+ \<59:58\> = chipID.
+ \<57\> = Request source: 0 = core, 1 = NCB-device.
+ \<56:51\> = Core/NCB-device number. Note that for NCB devices, \<56\> is always 0.
+ \<50:48\> = SubID. */
+ uint64_t reserved_60_62 : 3;
+ uint64_t xs_ncb_oob_wrn : 1; /**< [ 63: 63](RO/H) Read/write error log for out-of-bound UAHC register access.
+ 0 = read, 1 = write. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_shim_cfg_s cn; */
+};
+typedef union bdk_usbhx_uctl_shim_cfg bdk_usbhx_uctl_shim_cfg_t;
+
+static inline uint64_t BDK_USBHX_UCTL_SHIM_CFG(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_SHIM_CFG(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x8680001000e8ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_SHIM_CFG", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_SHIM_CFG(a) bdk_usbhx_uctl_shim_cfg_t
+#define bustype_BDK_USBHX_UCTL_SHIM_CFG(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_SHIM_CFG(a) "USBHX_UCTL_SHIM_CFG"
+#define device_bar_BDK_USBHX_UCTL_SHIM_CFG(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_SHIM_CFG(a) (a)
+#define arguments_BDK_USBHX_UCTL_SHIM_CFG(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_spare0
+ *
+ * UCTL Spare Register 0
+ * This register is a spare register. This register can be reset by NCB reset.
+ */
+union bdk_usbhx_uctl_spare0
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_spare0_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#else /* Word 0 - Little Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_spare0_s cn; */
+};
+typedef union bdk_usbhx_uctl_spare0 bdk_usbhx_uctl_spare0_t;
+
+static inline uint64_t BDK_USBHX_UCTL_SPARE0(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_SPARE0(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x868000100010ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_SPARE0", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_SPARE0(a) bdk_usbhx_uctl_spare0_t
+#define bustype_BDK_USBHX_UCTL_SPARE0(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_SPARE0(a) "USBHX_UCTL_SPARE0"
+#define device_bar_BDK_USBHX_UCTL_SPARE0(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_SPARE0(a) (a)
+#define arguments_BDK_USBHX_UCTL_SPARE0(a) (a),-1,-1,-1
+
+/**
+ * Register (NCB) usbh#_uctl_spare1
+ *
+ * UCTL Spare Register 1
+ * This register is accessible only when USB()_UCTL_CTL[H_CLK_EN] = 1.
+ *
+ * This register can be reset by NCB reset or with USBH()_UCTL_CTL[UCTL_RST].
+ */
+union bdk_usbhx_uctl_spare1
+{
+ uint64_t u;
+ struct bdk_usbhx_uctl_spare1_s
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN /* Word 0 - Big Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#else /* Word 0 - Little Endian */
+ uint64_t spare : 64; /**< [ 63: 0](R/W) Spare. */
+#endif /* Word 0 - End */
+ } s;
+ /* struct bdk_usbhx_uctl_spare1_s cn; */
+};
+typedef union bdk_usbhx_uctl_spare1 bdk_usbhx_uctl_spare1_t;
+
+static inline uint64_t BDK_USBHX_UCTL_SPARE1(unsigned long a) __attribute__ ((pure, always_inline));
+static inline uint64_t BDK_USBHX_UCTL_SPARE1(unsigned long a)
+{
+ if (CAVIUM_IS_MODEL(CAVIUM_CN88XX) && (a<=1))
+ return 0x8680001000f8ll + 0x1000000000ll * ((a) & 0x1);
+ __bdk_csr_fatal("USBHX_UCTL_SPARE1", 1, a, 0, 0, 0);
+}
+
+#define typedef_BDK_USBHX_UCTL_SPARE1(a) bdk_usbhx_uctl_spare1_t
+#define bustype_BDK_USBHX_UCTL_SPARE1(a) BDK_CSR_TYPE_NCB
+#define basename_BDK_USBHX_UCTL_SPARE1(a) "USBHX_UCTL_SPARE1"
+#define device_bar_BDK_USBHX_UCTL_SPARE1(a) 0x0 /* PF_BAR0 */
+#define busnum_BDK_USBHX_UCTL_SPARE1(a) (a)
+#define arguments_BDK_USBHX_UCTL_SPARE1(a) (a),-1,-1,-1
+
+#endif /* __BDK_CSRS_USBH_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-fuse.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-fuse.h
index d9c0ce955e..bd2dbfa6b8 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-fuse.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-fuse.h
@@ -1,3 +1,6 @@
+#ifndef __CB_BDK_FUSE_H__
+#define __CB_BDK_FUSE_H__
+
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -115,3 +118,4 @@ extern int bdk_fuse_field_soft_blow(bdk_node_t node, int fuse);
*/
extern int bdk_fuse_field_hard_blow(bdk_node_t node, int start_fuse, uint64_t fuses0, uint64_t fuses1);
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-lmt.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-lmt.h
index 49a69dfb76..20c3959faf 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-lmt.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-lmt.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_LMT_H__
+#define __CB_BDK_LMT_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -98,3 +100,4 @@ static inline int bdk_lmt_submit(uint64_t io_address)
}
/** @} */
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-model.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-model.h
index fc50514038..0c236306d4 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-model.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-model.h
@@ -1,3 +1,6 @@
+#ifndef __CB_BDK_MODEL_H__
+#define __CB_BDK_MODEL_H__
+
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -88,8 +91,8 @@
#define CAVIUM_CN8XXX (CAVIUM_CN88XX_PASS1_0 | __OM_IGNORE_MODEL)
#define CAVIUM_CN9XXX (CAVIUM_CN93XX_PASS1_0 | __OM_IGNORE_MODEL)
-static inline uint64_t cavium_get_model() __attribute__ ((pure, always_inline));
-static inline uint64_t cavium_get_model()
+static inline uint64_t cavium_get_model(void) __attribute__ ((pure, always_inline));
+static inline uint64_t cavium_get_model(void)
{
#ifdef BDK_BUILD_HOST
extern uint32_t thunder_remote_get_model(void) __attribute__ ((pure));
@@ -158,13 +161,5 @@ static inline int CAVIUM_IS_MODEL(uint32_t arg_model)
*/
extern int cavium_is_altpkg(uint32_t arg_model);
-/**
- * Return the SKU string for a chip
- *
- * @param node Node to get SKU for
- *
- * @return Chip's SKU
- */
-extern const char* bdk_model_get_sku(int node);
-
/** @} */
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-numa.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-numa.h
index cd5b420876..9583034938 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-numa.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-numa.h
@@ -1,3 +1,6 @@
+#ifndef __CB_BDK_NUMA_H__
+#define __CB_BDK_NUMA_H__
+
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -37,6 +40,10 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+/* FIXME(dhendricks): added */
+#include <libbdk-arch/bdk-asm.h>
+#include <libbdk-arch/bdk-model.h>
+
/**
* @file
*
@@ -113,7 +120,7 @@ extern int bdk_numa_exists(bdk_node_t node);
*
* @return
*/
-extern int bdk_numa_is_only_one();
+extern int bdk_numa_is_only_one(void);
/**
* Given a physical address without a node, return the proper physical address
@@ -136,4 +143,4 @@ static inline uint64_t bdk_numa_get_address(bdk_node_t node, uint64_t pa)
return pa;
}
-
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-require.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-require.h
index ac65134077..eea2530cee 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-require.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-require.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_REQUIRE_H__
+#define __CB_BDK_REQIORE_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -102,6 +104,8 @@
* reference to bdk_requires_depends() which then contains strong
* references to all needed components.
*/
-extern void __bdk_require_depends(void);
+// FIXME(dhendrix): leave it out if possible */
+//extern void __bdk_require_depends(void);
/** @} */
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-swap.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-swap.h
deleted file mode 100644
index 2e5ccc60c1..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-swap.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Utility functions for endian swapping
- *
- * <hr>$Revision: 32636 $<hr>
- *
- * @addtogroup hal
- * @{
- */
-
-/**
- * Byte swap a 16 bit number
- *
- * @param x 16 bit number
- * @return Byte swapped result
- */
-static inline uint16_t bdk_swap16(uint16_t x)
-{
- return ((uint16_t)((((uint16_t)(x) & (uint16_t)0x00ffU) << 8) |
- (((uint16_t)(x) & (uint16_t)0xff00U) >> 8) ));
-}
-
-
-/**
- * Byte swap a 32 bit number
- *
- * @param x 32 bit number
- * @return Byte swapped result
- */
-static inline uint32_t bdk_swap32(uint32_t x)
-{
-#if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))
- return __builtin_bswap32(x);
-#else
- x = ((x<<8)&0xFF00FF00) | ((x>>8)&0x00FF00FF);
- x = (x>>16) | (x<<16);
- return x;
-#endif
-}
-
-
-/**
- * Byte swap a 64 bit number
- *
- * @param x 64 bit number
- * @return Byte swapped result
- */
-static inline uint64_t bdk_swap64(uint64_t x)
-{
-#if (__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ >= 3))
- return __builtin_bswap64(x);
-#else
- x = ((x<< 8)&0xFF00FF00FF00FF00ULL) | ((x>> 8)&0x00FF00FF00FF00FFULL);
- x = ((x<<16)&0xFFFF0000FFFF0000ULL) | ((x>>16)&0x0000FFFF0000FFFFULL);
- return (x>>32) | (x<<32);
-#endif
-}
-
-
-#if __BYTE_ORDER == __BIG_ENDIAN
-
-#define bdk_cpu_to_le16(x) bdk_swap16(x)
-#define bdk_cpu_to_le32(x) bdk_swap32(x)
-#define bdk_cpu_to_le64(x) bdk_swap64(x)
-
-#define bdk_cpu_to_be16(x) (x)
-#define bdk_cpu_to_be32(x) (x)
-#define bdk_cpu_to_be64(x) (x)
-
-#else
-
-#define bdk_cpu_to_le16(x) (x)
-#define bdk_cpu_to_le32(x) (x)
-#define bdk_cpu_to_le64(x) (x)
-
-#define bdk_cpu_to_be16(x) bdk_swap16(x)
-#define bdk_cpu_to_be32(x) bdk_swap32(x)
-#define bdk_cpu_to_be64(x) bdk_swap64(x)
-
-#endif
-
-#define bdk_le16_to_cpu(x) bdk_cpu_to_le16(x)
-#define bdk_le32_to_cpu(x) bdk_cpu_to_le32(x)
-#define bdk_le64_to_cpu(x) bdk_cpu_to_le64(x)
-
-#define bdk_be16_to_cpu(x) bdk_cpu_to_be16(x)
-#define bdk_be32_to_cpu(x) bdk_cpu_to_be32(x)
-#define bdk_be64_to_cpu(x) bdk_cpu_to_be64(x)
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-warn.h b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-warn.h
index 685c812e20..84f9c3b55a 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-warn.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-arch/bdk-warn.h
@@ -1,3 +1,6 @@
+#ifndef __CB_BDK_WARN_H__
+#define __CB_BDK_WARN_H__
+
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -37,6 +40,8 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <console/console.h>
+
/**
* @file
*
@@ -48,11 +53,14 @@
* @{
*/
-extern void __bdk_die(void) __attribute__ ((noreturn));
-extern void bdk_fatal(const char *format, ...) __attribute__ ((noreturn, format(printf, 1, 2)));
-extern void bdk_error(const char *format, ...) __attribute__ ((format(printf, 1, 2)));
-extern void bdk_warn(const char *format, ...) __attribute__ ((format(printf, 1, 2)));
-#define bdk_warn_if(expression, format, ...) if (bdk_unlikely(expression)) bdk_warn(format, ##__VA_ARGS__)
+#define bdk_warn(format, ...) printk(BIOS_WARNING, format, ##__VA_ARGS__)
+#define bdk_error(format, ...) printk(BIOS_ERR, format, ##__VA_ARGS__)
+#define bdk_fatal(format, ...) \
+ do { \
+ printk(BIOS_CRIT, format, ##__VA_ARGS__); \
+ while (1) \
+ ; \
+ } while (0)
/* The following defines control detailed tracing of various parts of the
BDK. Each one can be enabled(1) or disabled(0) independently. These
@@ -87,18 +95,29 @@ typedef enum
__BDK_TRACE_ENABLE_LAST, /* Must always be last value */
} bdk_trace_enable_t;
-/* See bdk-config.c to change the trace level for before config files are loaded */
-extern uint64_t bdk_trace_enables;
-
/**
* Macro for low level tracing of BDK functions. When enabled,
* these translate to printf() calls. The "area" is a string
* that is appended to "BDK_TRACE_ENABLE_" to figure out which
* enable macro to use. The macro expects a ';' after it.
*/
-#define BDK_TRACE(area, format, ...) do { \
- if (bdk_trace_enables & (1ull << BDK_TRACE_ENABLE_##area)) \
- printf(#area ": " format, ##__VA_ARGS__); \
-} while (0)
+#define BDK_TRACE(area, format, ...) do { \
+ if ((BDK_TRACE_ENABLE_INIT == BDK_TRACE_ENABLE_##area && \
+ IS_ENABLED(CONFIG_CAVIUM_BDK_VERBOSE_INIT)) || \
+ (BDK_TRACE_ENABLE_DRAM == BDK_TRACE_ENABLE_##area && \
+ IS_ENABLED(CONFIG_CAVIUM_BDK_VERBOSE_DRAM)) || \
+ (BDK_TRACE_ENABLE_DRAM_TEST == BDK_TRACE_ENABLE_##area && \
+ IS_ENABLED(CONFIG_CAVIUM_BDK_VERBOSE_DRAM_TEST)) || \
+ (BDK_TRACE_ENABLE_QLM == BDK_TRACE_ENABLE_##area && \
+ IS_ENABLED(CONFIG_CAVIUM_BDK_VERBOSE_QLM)) || \
+ (BDK_TRACE_ENABLE_PCIE_CONFIG == BDK_TRACE_ENABLE_##area && \
+ IS_ENABLED(CONFIG_CAVIUM_BDK_VERBOSE_PCIE_CONFIG)) || \
+ (BDK_TRACE_ENABLE_PCIE == BDK_TRACE_ENABLE_##area && \
+ IS_ENABLED(CONFIG_CAVIUM_BDK_VERBOSE_PCIE)) || \
+ (BDK_TRACE_ENABLE_PHY == BDK_TRACE_ENABLE_##area && \
+ IS_ENABLED(CONFIG_CAVIUM_BDK_VERBOSE_PHY))) \
+ printk(BIOS_DEBUG, #area ": " format, ##__VA_ARGS__); \
+ } while (0)
/** @} */
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-bist/bist.h b/src/vendorcode/cavium/include/bdk/libbdk-bist/bist.h
deleted file mode 100644
index b11e0c4595..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-bist/bist.h
+++ /dev/null
@@ -1,43 +0,0 @@
-#ifndef __LIBBDK_BIST_H
-#define __LIBBDK_BIST_H
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-void bdk_bist_check();
-#endif
-
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-bist/efuse-read.h b/src/vendorcode/cavium/include/bdk/libbdk-bist/efuse-read.h
deleted file mode 100644
index 0b05bd081e..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-bist/efuse-read.h
+++ /dev/null
@@ -1,41 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-void efuse_read_all_o3(unsigned char *efuse_ptr, int cached_read);
-void dump_fuses(void);
-int num_fuses(void);
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-ccpi.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-ccpi.h
deleted file mode 100644
index a457f8c0d0..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-ccpi.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Boot services for CCPI
- *
- * @addtogroup boot
- * @{
- */
-
-/**
- * Initialize the CCPI links and bringup the other nodes
- */
-extern void bdk_boot_ccpi(void);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-dram.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-dram.h
deleted file mode 100644
index 716efc3c3a..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-dram.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Boot services for DRAM
- *
- * @addtogroup boot
- * @{
- */
-
-/**
- * Configure DRAM on a specific node
- *
- * @param node Node to configure
- * @param override_for_speed
- * If non-zero, the DRAM init code will use this for the
- * DRAM clock speed. This is used for SLT and should not
- * be used otherwise.
- */
-extern void bdk_boot_dram(bdk_node_t node, int override_for_speed);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-mdio.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-mdio.h
deleted file mode 100644
index e99be3ffd6..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-mdio.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Boot services for MDIO
- *
- * @addtogroup boot
- * @{
- */
-
-/**
- * Configure MDIO on all nodes as part of booting
- */
-extern void bdk_boot_mdio(void);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-status.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-status.h
index 2a0896fe10..f88ff2baf7 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-status.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-status.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_BOOT_STATUS_H__
+#define __CB_BDK_BOOT_STATUS_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -92,3 +94,4 @@ typedef enum
extern void bdk_boot_status(bdk_boot_status_t status);
/** @} */
+#endif /* !__CB_BDK_BOOT_STATUS_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-twsi.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-twsi.h
deleted file mode 100644
index 155509f3c1..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-twsi.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Boot services for TWSI
- *
- * @addtogroup boot
- * @{
- */
-
-/**
- * Configure TWSI on all nodes as part of booting
- */
-extern void bdk_boot_twsi(void);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-usb.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-usb.h
index 169047499b..20a049653d 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-usb.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot-usb.h
@@ -1,3 +1,5 @@
+#ifndef __BDK_BOOT_USB_H__
+#define __BDK_BOOT_USB_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -49,6 +51,7 @@
/**
* Configure USB on all nodes as part of booting
*/
-extern void bdk_boot_usb(void);
+void bdk_boot_usb(void);
/** @} */
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot.h
index e40b8cebc2..46ec4b8679 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-boot.h
@@ -51,19 +51,9 @@
* @defgroup boot Boot related support functions
*/
-#include "bdk-boot-bgx.h"
-#include "bdk-boot-ccpi.h"
-#include "bdk-boot-dram.h"
-#include "bdk-boot-gpio.h"
-#include "bdk-boot-info.h"
-#include "bdk-boot-mdio.h"
-#include "bdk-boot-pcie.h"
-#include "bdk-boot-qlm.h"
#include "bdk-boot-status.h"
-#include "bdk-boot-twsi.h"
-#include "bdk-boot-usb.h"
-#include "bdk-image.h"
#include "bdk-watchdog.h"
-#include "bdk-xmodem.h"
+
+void bdk_boot(void);
#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-image.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-image.h
deleted file mode 100644
index 725453c8a0..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-image.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Utility functions for handling binary images
- *
- * <hr>$Revision: 49448 $<hr>
- *
- * @defgroup image Binary image utility functions
- * @{
- */
-
-/**
- * Structure present at the beginning of BDK images
- */
-typedef struct
-{
- uint32_t instruction; /* Raw instruction for skipping header */
- uint32_t length; /* Length of the image, includes header */
- uint64_t magic; /* Magic string "THUNDERX" */
- uint32_t crc32; /* CRC32 of image + header. These bytes are zero when calculating the CRC */
- uint32_t reserved1; /* Zero, reserved for future use */
- char name[64]; /* ASCII Image name. Must always end in zero */
- char version[32]; /* ASCII Version. Must always end in zero */
- uint64_t reserved[17]; /* Zero, reserved for future use */
-} BDK_LITTLE_ENDIAN_STRUCT bdk_image_header_t;
-
-/**
- * Validate image header
- *
- * @param header Header to validate
- *
- * @return 1 if header is valid, zero if invalid
- */
-extern int bdk_image_header_is_valid(const bdk_image_header_t *header);
-
-/**
- * Verify image at the given address is good
- *
- * @param image Pointer to the image
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_image_verify(const void *image);
-
-/**
- * Read a image header from a file
- *
- * @param handle File handel to read from
- * @param header Pointer to header structure to fill
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_image_read_header(FILE *handle, bdk_image_header_t *header);
-
-/**
- * Read a image from a file and boot it, replacing the current running BDK image
- *
- * @param filename File to read the image from
- * @param loc Offset into file for image. This is normally zero for normal files. Device
- * files, such as /dev/mem, will use this to locate the image.
- *
- * @return Negative on failure. On success this function never returns.
- */
-extern int bdk_image_boot(const char *filename, uint64_t loc);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-watchdog.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-watchdog.h
index 45f6efb537..0ab773e9c3 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-watchdog.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-watchdog.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_WATCHDOG_H__
+#define __CB_BDK_WATCHDOG_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -79,3 +81,4 @@ extern void bdk_watchdog_disable(void);
extern int bdk_watchdog_is_running(void);
/** @} */
+#endif /* !__CB_BDK_WATCHDOG_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-xmodem.h b/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-xmodem.h
deleted file mode 100644
index 3caff397c5..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-boot/bdk-xmodem.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Boot services for XMODEM transfers
- *
- * @addtogroup boot
- * @{
- */
-
-/**
- * Receive a file through Xmodem and write it to an internal file.
- *
- * @param dest_file File to write to
- * @param offset Offset into the file to write
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_xmodem_upload(const char *dest_file, uint64_t offset);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-config.h b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-config.h
index 84488597b4..d50a76dd32 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-config.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-config.h
@@ -1,3 +1,6 @@
+#ifndef __CB_BDK_DRAM_CONFIG_H__
+#define __CB_BDK_DRAM_CONFIG_H__
+
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -47,7 +50,11 @@
* @defgroup dram DRAM related functions
* @{
*/
+#if 0
+/* FIXME(dhendrix): this was cavium's #include */
#include "../libdram/libdram.h"
+#endif
+#include <libdram/libdram.h>
/**
* Lookup a DRAM configuration by name and intialize dram with it
@@ -116,3 +123,5 @@ extern uint32_t __bdk_dram_get_col_mask(bdk_node_t node, int lmc);
extern int __bdk_dram_get_num_bank_bits(bdk_node_t node, int lmc);
/** @} */
+
+#endif /* !__CB_BDK_DRAM_CONFIG_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-test.h b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-test.h
index f6be005995..60f07fa0c5 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-test.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram-test.h
@@ -1,3 +1,6 @@
+#ifndef __CB_BDK_DRAM_TEST_H__
+#define __CB_BDK_DRAM_TEST_H__
+
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -145,7 +148,8 @@ extern void bdk_dram_test_inject_error(uint64_t address, int bit);
/* Keep the counts per memory channel (LMC) for more detail. */
#define BDK_MAX_MEM_CHANS 4
extern int64_t __bdk_dram_ecc_single_bit_errors[BDK_MAX_MEM_CHANS];
-extern int64_t __bdk_dram_ecc_double_bit_errors[BDK_MAX_MEM_CHANS];
+/* FIXME(dhendrix): redundant declaration in original BDK */
+//extern int64_t __bdk_dram_ecc_double_bit_errors[BDK_MAX_MEM_CHANS];
/* These are internal support functions */
extern void __bdk_dram_flush_to_mem(uint64_t address);
@@ -196,3 +200,5 @@ extern int __bdk_dram_test_fast_scan(uint64_t area, uint64_t max_address, int bu
/** @} */
+
+#endif /* !__CB_BDK_DRAM_TEST_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram.h b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram.h
index a4eb32805c..99393154e6 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-dram/bdk-dram.h
@@ -1,3 +1,6 @@
+#ifndef __CB_BDK_DRAM_H__
+#define __CB_BDK_DRAM_H__
+
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -52,3 +55,5 @@
#include "bdk-dram-test.h"
/** @} */
+
+#endif /* !__CB_BDK_DRAM_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access-native.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access-native.h
deleted file mode 100644
index aa9d87bf37..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access-native.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- * Functions for accessing memory and CSRs when we are compiling
- * natively.
- *
- * <hr>$Revision: 38306 $<hr>
-*/
-
-/**
- * Convert a memory pointer (void*) into a hardware compatible
- * memory address (uint64_t). Cavium hardware widgets don't
- * understand logical addresses.
- *
- * @param ptr C style memory pointer
- * @return Hardware physical address
- */
-static inline uint64_t bdk_ptr_to_phys(void *ptr) __attribute__ ((pure, always_inline));
-static inline uint64_t bdk_ptr_to_phys(void *ptr)
-{
- bdk_warn_if(!ptr, "bdk_ptr_to_phys() passed a NULL\n");
- return (long)ptr;
-}
-
-
-/**
- * Convert a hardware physical address (uint64_t) into a
- * memory pointer (void *).
- *
- * @param physical_address
- * Hardware physical address to memory
- * @return Pointer to memory
- */
-static inline void *bdk_phys_to_ptr(uint64_t physical_address) __attribute__ ((pure, always_inline));
-static inline void *bdk_phys_to_ptr(uint64_t physical_address)
-{
- bdk_warn_if(physical_address==0, "bdk_phys_to_ptr() passed a zero address\n");
- return (void*)(long)physical_address;
-}
-
-
-/* We have a full 64bit ABI. Writing to a 64bit address can be done with
- a simple volatile pointer */
-#define BDK_BUILD_WRITE64(TYPE) \
-static inline void bdk_write64_##TYPE(uint64_t addr, TYPE##_t val) __attribute__ ((always_inline)); \
-static inline void bdk_write64_##TYPE(uint64_t addr, TYPE##_t val) \
-{ \
- *(volatile TYPE##_t *)bdk_phys_to_ptr(addr) = val; \
-}
-
-/* We have a full 64bit ABI. Writing to a 64bit address can be done with
- a simple volatile pointer */
-#define BDK_BUILD_READ64(TYPE) \
-static inline TYPE##_t bdk_read64_##TYPE(uint64_t addr) __attribute__ ((always_inline)); \
-static inline TYPE##_t bdk_read64_##TYPE(uint64_t addr) \
-{ \
- return *(volatile TYPE##_t *)bdk_phys_to_ptr(addr); \
-}
-
-/* The following defines 8 functions for writing to a 64bit address. Each
- takes two arguments, the address and the value to write.
- bdk_write64_int64 bdk_write64_uint64
- bdk_write64_int32 bdk_write64_uint32
- bdk_write64_int16 bdk_write64_uint16
- bdk_write64_int8 bdk_write64_uint8 */
-BDK_BUILD_WRITE64(int64)
-BDK_BUILD_WRITE64(int32)
-BDK_BUILD_WRITE64(int16)
-BDK_BUILD_WRITE64(int8)
-BDK_BUILD_WRITE64(uint64)
-BDK_BUILD_WRITE64(uint32)
-BDK_BUILD_WRITE64(uint16)
-BDK_BUILD_WRITE64(uint8)
-
-/* The following defines 8 functions for reading from a 64bit address. Each
- takes the address as the only argument
- bdk_read64_int64 bdk_read64_uint64
- bdk_read64_int32 bdk_read64_uint32
- bdk_read64_int16 bdk_read64_uint16
- bdk_read64_int8 bdk_read64_uint8 */
-BDK_BUILD_READ64(int64)
-BDK_BUILD_READ64(int32)
-BDK_BUILD_READ64(int16)
-BDK_BUILD_READ64(int8)
-BDK_BUILD_READ64(uint64)
-BDK_BUILD_READ64(uint32)
-BDK_BUILD_READ64(uint16)
-BDK_BUILD_READ64(uint8)
-
-
-/**
- * Returns the number of bits set in the provided value.
- * Simple wrapper for POP instruction.
- *
- * @param val 32 bit value to count set bits in
- *
- * @return Number of bits set
- */
-static inline uint32_t bdk_pop(uint32_t val)
-{
- return __builtin_popcount(val);
-}
-
-
-/**
- * Returns the number of bits set in the provided value.
- * Simple wrapper for DPOP instruction.
- *
- * @param val 64 bit value to count set bits in
- *
- * @return Number of bits set
- */
-static inline int bdk_dpop(uint64_t val)
-{
- return __builtin_popcountl(val);
-}
-
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access.h
index d50ecd7e5c..30078d3f8e 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-access.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_ACCESS_H__
+#define __CB_BDK_ACCESS_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -47,87 +49,14 @@
* @{
*/
-#define BDK_FUNCTION static inline
-
-/**
- * Convert a memory pointer (void*) into a hardware compatible
- * memory address (uint64_t). Cavium hardware widgets don't
- * understand logical addresses.
- *
- * @param ptr C style memory pointer
- * @return Hardware physical address
- */
-BDK_FUNCTION uint64_t bdk_ptr_to_phys(void *ptr);
-
-/**
- * Convert a hardware physical address (uint64_t) into a
- * memory pointer (void *).
- *
- * @param physical_address
- * Hardware physical address to memory
- * @return Pointer to memory
- */
-BDK_FUNCTION void *bdk_phys_to_ptr(uint64_t physical_address);
-
-BDK_FUNCTION void bdk_write64_int64(uint64_t address, int64_t value);
-BDK_FUNCTION void bdk_write64_uint64(uint64_t address, uint64_t value);
-BDK_FUNCTION void bdk_write64_int32(uint64_t address, int32_t value);
-BDK_FUNCTION void bdk_write64_uint32(uint64_t address, uint32_t value);
-BDK_FUNCTION void bdk_write64_int16(uint64_t address, int16_t value);
-BDK_FUNCTION void bdk_write64_uint16(uint64_t address, uint16_t value);
-BDK_FUNCTION void bdk_write64_int8(uint64_t address, int8_t value);
-BDK_FUNCTION void bdk_write64_uint8(uint64_t address, uint8_t value);
-
-BDK_FUNCTION int64_t bdk_read64_int64(uint64_t address);
-BDK_FUNCTION uint64_t bdk_read64_uint64(uint64_t address);
-BDK_FUNCTION int32_t bdk_read64_int32(uint64_t address);
-BDK_FUNCTION uint32_t bdk_read64_uint32(uint64_t address);
-BDK_FUNCTION int16_t bdk_read64_int16(uint64_t address);
-BDK_FUNCTION uint16_t bdk_read64_uint16(uint64_t address);
-BDK_FUNCTION int8_t bdk_read64_int8(uint64_t address);
-BDK_FUNCTION uint8_t bdk_read64_uint8(uint64_t address);
-
-/**
- * Returns the number of bits set in the provided value.
- * Simple wrapper for POP instruction.
- *
- * @param val 32 bit value to count set bits in
- *
- * @return Number of bits set
- */
-BDK_FUNCTION uint32_t bdk_pop(uint32_t val);
-
-/**
- * Returns the number of bits set in the provided value.
- * Simple wrapper for DPOP instruction.
- *
- * @param val 64 bit value to count set bits in
- *
- * @return Number of bits set
- */
-BDK_FUNCTION int bdk_dpop(uint64_t val);
-
-/**
- * Wait for the specified number of core clock cycles
- *
- * @param cycles
- */
-extern void bdk_wait(uint64_t cycles);
-
-/**
- * Wait for the specified number of micro seconds
- *
- * @param usec micro seconds to wait
- */
-extern void bdk_wait_usec(uint64_t usec);
+#include <bdk-coreboot.h>
/**
* Perform a soft reset of the chip
*
* @return
*/
-extern void bdk_reset_chip(bdk_node_t node);
-
-#undef BDK_FUNCTION
+void bdk_reset_chip(bdk_node_t node);
/** @} */
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-atomic.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-atomic.h
index 7f521a67e2..1b8f7847fd 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-atomic.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-atomic.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_ATOMIC_H__
+#define __CB_BDK_ATOMIC_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -197,26 +199,12 @@ static inline int bdk_atomic_compare_and_store32_nosync(uint32_t *ptr, uint32_t
/* CN88XX pass 1.x has errata AP-22500: GlobalSync request during a multi-cycle ATOMIC stalls forever
Don't use compare and swap on these chips */
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
- {
- asm volatile ("1: ldxr %w[v], [%[b]] \n"
- " cmp %w[v], %w[o] \n"
- " b.ne 2f \n"
- " stxr %w[v], %w[n], [%[b]]\n" /* Returns zero on success */
- " cbnz %w[v], 1b \n"
- " mov %w[v], %w[o] \n"
- "2: \n"
- : [mem] "+m" (*ptr), [v] "=&r" (val)
- : [b] "r" (ptr), [n] "r" (new_val), [o] "r" (old_val)
- : );
- }
- else
- {
+
asm volatile ("cas %w[o], %w[n], [%[b]]"
: [mem] "+m" (*ptr), [o] "+r" (val)
: [b] "r" (ptr), [n] "r" (new_val)
: );
- }
+
return old_val == val;
}
@@ -239,26 +227,12 @@ static inline int bdk_atomic_compare_and_store32(uint32_t *ptr, uint32_t old_val
/* CN88XX pass 1.x has errata AP-22500: GlobalSync request during a multi-cycle ATOMIC stalls forever
Don't use compare and swap on these chips */
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
- {
- asm volatile ("1: ldaxr %w[v], [%[b]] \n"
- " cmp %w[v], %w[o] \n"
- " b.ne 2f \n"
- " stlxr %w[v], %w[n], [%[b]]\n" /* Returns zero on success */
- " cbnz %w[v], 1b \n"
- " mov %w[v], %w[o] \n"
- "2: \n"
- : [mem] "+m" (*ptr), [v] "=&r" (val)
- : [b] "r" (ptr), [n] "r" (new_val), [o] "r" (old_val)
- : );
- }
- else
- {
+
asm volatile ("casal %w[o], %w[n], [%[b]]"
: [mem] "+m" (*ptr), [o] "+r" (val)
: [b] "r" (ptr), [n] "r" (new_val)
: );
- }
+
return old_val == val;
}
@@ -281,26 +255,12 @@ static inline int bdk_atomic_compare_and_store64_nosync(uint64_t *ptr, uint64_t
/* CN88XX pass 1.x has errata AP-22500: GlobalSync request during a multi-cycle ATOMIC stalls forever
Don't use compare and swap on these chips */
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
- {
- asm volatile ("1: ldxr %x[v], [%[b]] \n"
- " cmp %x[v], %x[o] \n"
- " b.ne 2f \n"
- " stxr %x[v], %x[n], [%[b]]\n" /* Returns zero on success */
- " cbnz %x[v], 1b \n"
- " mov %x[v], %x[o] \n"
- "2: \n"
- : [mem] "+m" (*ptr), [v] "=&r" (val)
- : [b] "r" (ptr), [n] "r" (new_val), [o] "r" (old_val)
- : );
- }
- else
- {
+
asm volatile ("cas %x[o], %x[n], [%[b]]"
: [mem] "+m" (*ptr), [o] "+r" (val)
: [b] "r" (ptr), [n] "r" (new_val)
: );
- }
+
return old_val == val;
}
@@ -323,26 +283,11 @@ static inline int bdk_atomic_compare_and_store64(uint64_t *ptr, uint64_t old_val
/* CN88XX pass 1.x has errata AP-22500: GlobalSync request during a multi-cycle ATOMIC stalls forever
Don't use compare and swap on these chips */
- if (CAVIUM_IS_MODEL(CAVIUM_CN88XX_PASS1_X))
- {
- asm volatile ("1: ldaxr %x[v], [%[b]] \n"
- " cmp %x[v], %x[o] \n"
- " b.ne 2f \n"
- " stlxr %x[v], %x[n], [%[b]]\n" /* Returns zero on success */
- " cbnz %x[v], 1b \n"
- " mov %x[v], %x[o] \n"
- "2: \n"
- : [mem] "+m" (*ptr), [v] "=&r" (val)
- : [b] "r" (ptr), [n] "r" (new_val), [o] "r" (old_val)
- : );
- }
- else
- {
+
asm volatile ("casal %x[o], %x[n], [%[b]]"
: [mem] "+m" (*ptr), [o] "+r" (val)
: [b] "r" (ptr), [n] "r" (new_val)
: );
- }
return old_val == val;
}
@@ -539,3 +484,4 @@ static inline uint32_t bdk_atomic_fetch_and_bclr32_nosync(uint32_t *ptr, uint32_
}
/** @} */
+#endif /* !__CB_BDK_ATOMIC_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-clock.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-clock.h
index d0d117c590..47345bd5a1 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-clock.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-clock.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_CLOCK_H__
+#define __CB_BDK_CLOCK_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -37,6 +39,13 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+/* FIXME(dhendrix): added */
+#include <libbdk-arch/bdk-asm.h>
+#include <libbdk-arch/bdk-numa.h>
+/* FIXME(prudolph): added */
+
+#include <soc/clock.h>
+
/**
* @file
*
@@ -48,7 +57,7 @@
* @{
*/
-#define BDK_GTI_RATE 100000000ull
+#define BDK_GTI_RATE 1000000ull
/**
* Enumeration of different Clocks.
@@ -60,10 +69,30 @@ typedef enum{
BDK_CLOCK_SCLK, /**< Clock used by IO blocks. */
} bdk_clock_t;
+static inline uint64_t clock_get_rate_slow(bdk_clock_t clock)
+{
+ const uint64_t REF_CLOCK = 50000000;
+
+ switch (clock) {
+ case BDK_CLOCK_TIME:
+ return BDK_GTI_RATE; /* Programed as part of setup */
+ case BDK_CLOCK_MAIN_REF:
+ return REF_CLOCK;
+ case BDK_CLOCK_RCLK:
+ return thunderx_get_core_clock();
+ case BDK_CLOCK_SCLK:
+ return thunderx_get_io_clock();
+ }
+ return 0;
+}
+
/**
* Called in __bdk_init to setup the global timer
*/
-extern void bdk_clock_setup();
+extern void bdk_clock_setup(bdk_node_t node); /* FIXME(dhendrix): added arg */
+
+extern uint64_t __bdk_clock_get_count_slow(bdk_clock_t clock);
+extern uint64_t __bdk_clock_get_rate_slow(bdk_node_t node, bdk_clock_t clock);
/**
* Get cycle count based on the clock type.
@@ -74,7 +103,6 @@ extern void bdk_clock_setup();
static inline uint64_t bdk_clock_get_count(bdk_clock_t clock) __attribute__ ((always_inline));
static inline uint64_t bdk_clock_get_count(bdk_clock_t clock)
{
- extern uint64_t __bdk_clock_get_count_slow(bdk_clock_t clock);
if (clock == BDK_CLOCK_TIME)
{
uint64_t clk;
@@ -95,11 +123,11 @@ static inline uint64_t bdk_clock_get_count(bdk_clock_t clock)
static inline uint64_t bdk_clock_get_rate(bdk_node_t node, bdk_clock_t clock) __attribute__ ((always_inline, pure));
static inline uint64_t bdk_clock_get_rate(bdk_node_t node, bdk_clock_t clock)
{
- extern uint64_t __bdk_clock_get_rate_slow(bdk_node_t node, bdk_clock_t clock) __attribute__ ((pure));
if (clock == BDK_CLOCK_TIME)
return BDK_GTI_RATE; /* Programed as part of setup */
else
- return __bdk_clock_get_rate_slow(node, clock);
+ return clock_get_rate_slow(clock);
}
/** @} */
+#endif /* !__CB_BDK_CLOCK_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-config.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-config.h
index 6848fd687c..9aa1a47251 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-config.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-config.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_CONFIG_H__
+#define __CB_BDK_CONFIG_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -37,6 +39,9 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <inttypes.h>
+#include <bdk-devicetree.h>
+
/**
* @file
*
@@ -90,6 +95,7 @@ typedef enum
BDK_CONFIG_PCIE_PRESET_REQUEST_VECTOR, /* Parameters: Node, Port */
BDK_CONFIG_PCIE_WIDTH, /* Parameters: Node, Port */
BDK_CONFIG_PCIE_PHYSICAL_SLOT, /* Parameters: Node, Port */
+ BDK_CONFIG_PCIE_SKIP_LINK_TRAIN, /* Parameters: Node, Port */
BDK_CONFIG_PCIE_FLASH, /* Parameters: Node, Port */
BDK_CONFIG_CCPI_LANE_REVERSE, /* No parameters */
BDK_CONFIG_CHIP_SKU, /* Parameter: Node */
@@ -201,21 +207,6 @@ typedef enum
} bdk_config_t;
/**
- * Internal BDK function to initialize the config system. Must be called before
- * any configuration functions are called
- */
-extern void __bdk_config_init(void);
-
-/**
- * Return a help string for the given configuration parameter
- *
- * @param cfg_item Configuration parameter to get help for
- *
- * @return Help string for the user
- */
-extern const char *bdk_config_get_help(bdk_config_t cfg_item);
-
-/**
* Get an integer configuration item
*
* @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
@@ -223,49 +214,7 @@ extern const char *bdk_config_get_help(bdk_config_t cfg_item);
*
* @return The value of the configuration item, or def_value if the item is not set
*/
-extern int64_t bdk_config_get_int(bdk_config_t cfg_item, ...);
-
-/**
- * Get a string configuration item
- *
- * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- *
- * @return The value of the configuration item, or def_value if the item is not set
- */
-extern const char *bdk_config_get_str(bdk_config_t cfg_item, ...);
-
-/**
- * Get a binary blob
- *
- * @param blob_size Integer to receive the size of the blob
- * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- *
- * @return The value of the configuration item, or def_value if the item is not set
- */
-extern const void *bdk_config_get_blob(int *blob_size, bdk_config_t cfg_item, ...);
-
-/**
- * Set an integer configuration item. Note this only sets the item in memory,
- * persistent storage is not updated. The optional parameters for the setting are
- * not supplied, meaning this function only changes the global default.
- *
- * @param value Configuration item value
- * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- */
-extern void bdk_config_set_int_no_param(int64_t value, bdk_config_t cfg_item);
-
-/**
- * Set an integer configuration item. Note this only sets the item in memory,
- * persistent storage is not updated.
- *
- * @param value Configuration item value
- * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- */
-extern void bdk_config_set_int(int64_t value, bdk_config_t cfg_item, ...);
+int64_t bdk_config_get_int(bdk_config_t cfg_item, ...);
/**
* Set an integer configuration item. Note this only sets the item in memory,
@@ -275,63 +224,17 @@ extern void bdk_config_set_int(int64_t value, bdk_config_t cfg_item, ...);
* @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
* parameters are listed following cfg_item.
*/
-extern void bdk_config_set_str(const char *value, bdk_config_t cfg_item, ...);
+void bdk_config_set_int(int64_t value, bdk_config_t cfg_item, ...);
/**
- * Set a blob configuration item. Note this only sets the
- * item in memory, persistent storage is not updated. The optional
- * parameters for the setting are not supplied, meaning this function
- * only changes the global default.
- *
- * @param size Size of the item in bytes. A size of zero removes the device tree field
- * @param value Configuration item value
- * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- */
-extern void bdk_config_set_blob_no_param(int size, const void *value, bdk_config_t cfg_item);
-
-/**
- * Set a blob configuration item. Note this only sets the
- * item in memory, persistent storage is not updated.
- *
- * @param size Size of the item in bytes. A size of zero removes the device tree field
- * @param value Configuration item value
- * @param cfg_item Config item to set. If the item takes parameters (see bdk_config_t), then the
- * parameters are listed following cfg_item.
- */
-extern void bdk_config_set_blob(int size, const void *value, bdk_config_t cfg_item, ...);
-
-/**
- * Display the active configuration
- */
-extern void bdk_config_show(void);
-
-/**
- * Display a list of all posssible config items with help text
- */
-extern void bdk_config_help(void);
-
-/**
- * Save the current configuration to flash
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_config_save(void);
-
-/**
- * Takes the current live device tree and exports it to a memory address suitable
- * for passing to the next binary in register X1.
+ * Get a string configuration item
*
- * @return Physical address of the device tree, or 0 on failure
- */
-extern uint64_t __bdk_config_export_to_mem(void);
-
-/**
- * Return a pointer to the device tree used for configuration
+ * @param cfg_item Config item to get. If the item takes parameters (see bdk_config_t), then the
+ * parameters are listed following cfg_item.
*
- * @return FDT or NULL on failure
+ * @return The value of the configuration item, or def_value if the item is not set
*/
-extern void* bdk_config_get_fdt(void);
+const char *bdk_config_get_str(bdk_config_t cfg_item, ...);
/**
* Set the device tree used for configuration
@@ -341,17 +244,26 @@ extern void* bdk_config_get_fdt(void);
*
* @return Zero on success, negative on failure
*/
-extern int bdk_config_set_fdt(void *fdt);
+int bdk_config_set_fdt(const struct bdk_devicetree_key_value *fdt);
-/**
- * Write all default values to a FDT. Missing config items get defaults in the
- * BDK config, this function adds those defaults to the FDT. This way other code
- * gets the default value without needing special code.
- *
- * @param fdt FDT structure to fill defaults into
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_config_expand_defaults(void *fdt);
+typedef enum
+{
+ BDK_CONFIG_TYPE_INT,
+ BDK_CONFIG_TYPE_STR,
+ BDK_CONFIG_TYPE_STR_LIST,
+ BDK_CONFIG_TYPE_BINARY,
+} bdk_config_type_t;
+
+typedef struct
+{
+ const char *format; /* Printf style format string to create the item name */
+ const bdk_config_type_t ctype;/* Type of this item */
+ int64_t default_value; /* Default value when no present. String defaults are cast to pointers from this */
+ const int64_t min_value;/* Minimum valid value for INT parameters. Unused for Strings */
+ const int64_t max_value;/* Maximum valid value for INT parameters. Unused for Strings */
+} bdk_config_info_t;
+
+extern bdk_config_info_t config_info[];
/** @} */
+#endif /* !__CB_BDK_CONFIG_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-crc.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-crc.h
deleted file mode 100644
index 05fc59a378..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-crc.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Module to support CRC.
- *
- * <hr>$Revision: 49448 $<hr>
- *
- * @addtogroup hal
- * @{
- */
-
-uint32_t bdk_crc32(const void *ptr, int len, uint32_t iv);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-ecam.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-ecam.h
index 3f90d11dc2..f6aa09601d 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-ecam.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-ecam.h
@@ -37,6 +37,8 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <libbdk-hal/device/bdk-device.h>
+
/**
* @file
*
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-error-report.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-error-report.h
deleted file mode 100644
index 520780fa0a..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-error-report.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to hardware error reporting.
- *
- * <hr>$Revision: 49448 $<hr>
- *
- * @addtogroup hal
- * @{
- */
-
-/**
- * Call this function to check if any error interrupts are
- * set in the chip.
- */
-extern void (*bdk_error_check)(bdk_node_t node) BDK_WEAK;
-
-/**
- * Call this function to setup error enables.
- */
-extern void bdk_error_enable(bdk_node_t node) BDK_WEAK;
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-fpa.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-fpa.h
deleted file mode 100644
index 8094cd0fa0..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-fpa.h
+++ /dev/null
@@ -1,162 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to the hardware Free Pool Allocator.
- *
- * @addtogroup hal
- * @{
- */
-#define BDK_FPA_NUM_AURAS 128 /* Must not be bigger than FPA_CONST[auras] or NPA_AF_LFX_AURAS_CFG[loc_aura_size] */
-
-/**
- * This enum represents the FPA hardware pools in use by the BDK
- */
-typedef enum
-{
- BDK_FPA_PACKET_POOL, /* Stores PKI packet buffers */
- BDK_FPA_SSO_POOL, /* Used for internal storage in the SSO, CN83XX and CN9XXX */
- BDK_FPA_PKO_POOL, /* Used for queue storage in the CN83XX PKO and CN9XXX SQB */
- BDK_FPA_NUM_POOLS = 16 /* Can be 16, or 32 for CN83XX. Must not be bigger than FPA_CONST[pools] */
-} bdk_fpa_pool_t;
-
-/**
- * Structure representing the global state of the FPA
- */
-typedef struct
-{
- uint16_t buffer_size_pool[BDK_FPA_NUM_POOLS];
- uint16_t buffer_size_aura[BDK_FPA_NUM_AURAS];
- int next_free_aura;
- int next_free_lf; /* Used on CN9XXX for RVU PF allocation */
- void *npa_auras_ptr; /* Pointer to Aura Context Table: BDK_FPA_NUM_AURAS * (Aura HW context) */
- void *npa_pools_ptr; /* Pointer to Pool Context Table: BDK_FPA_NUM_POOLS * (Pool HW context) */
-} __bdk_fpa_state_t;
-
-extern __bdk_fpa_state_t __bdk_fpa_node_state[BDK_NUMA_MAX_NODES];
-
-/**
- * Get a new block from an aura
- *
- * @param node Node to use in a Numa setup. See bdk-numa.h
- * @param aura Aura to get the block from (0-BDK_FPA_NUM_AURAS)
- *
- * @return Pointer to the block or NULL on failure
- */
-extern void* (*bdk_fpa_alloc)(bdk_node_t node, int aura);
-
-/**
- * Free a block allocated with bdk_fpa_alloc(). Does NOT provide memory ordering
- * for core stores. Software must insure all pending writes are flushed before
- * calling this function.
- *
- * @param node Node to use in a Numa setup. See bdk-numa.h
- * @param address Physical address to free to the FPA aura
- * @param aura Aura number to free to (0-BDK_FPA_NUM_AURAS)
- * @param num_cache_lines
- * Cache lines to invalidate. Use this if the data in the buffer is no longer
- * requires cache coherency. Normally best to set this to zero.
- */
-extern void (*__bdk_fpa_raw_free)(bdk_node_t node, uint64_t address, int aura, int num_cache_lines);
-
-/**
- * Fill a pool with buffers
- *
- * @param node Node to use in a Numa setup. See bdk-numa.h
- * @param pool Pool to initialize (0 <= pool < BDK_FPA_NUM_POOLS)
- * @param num_blocks Number of blocks
- *
- * @return Zero on Success, negative on failure
- */
-extern int (*bdk_fpa_fill_pool)(bdk_node_t node, bdk_fpa_pool_t pool, int num_blocks);
-
-/**
- * Initialize an Aura for a specific pool
- *
- * @param node Node to use in a Numa setup. See bdk-numa.h
- * @param aura Aura to initialize, or -1 to dynamically allocate a free aura
- * @param pool Pool this aura is for (0 <= pool < BDK_FPA_NUM_POOLS)
- * @param num_blocks Number of buffers to allow this aura to contain. This may be different
- * from the pool
- *
- * @return Aura number or negative on failure
- */
-extern int (*bdk_fpa_init_aura)(bdk_node_t node, int aura, bdk_fpa_pool_t pool, int num_blocks);
-
-/**
- * Free a block allocated with bdk_fpa_alloc(). Provides memory ordering
- * for core stores.
- *
- * @param node Node to use in a Numa setup. See bdk-numa.h
- * @param ptr Pointer to the block to free
- * @param aura Aura number to free to (0-BDK_FPA_NUM_AURAS)
- * @param num_cache_lines
- * Cache lines to invalidate. Use this if the data in the buffer is no longer
- * requires cache coherency. Normally best to set this to zero.
- */
-static inline void bdk_fpa_free(bdk_node_t node, void *ptr, int aura, int num_cache_lines)
-{
- BDK_WMB;
- __bdk_fpa_raw_free(node, bdk_ptr_to_phys(ptr), aura, num_cache_lines);
-}
-
-/**
- * Get the size of blocks controlled by the aura
- *
- * @param node Node to use in a Numa setup. See bdk-numa.h
- * @param aura Aura number to access (0-BDK_FPA_NUM_AURAS)
- *
- * @return Size of the block in bytes
- */
-static inline int bdk_fpa_get_block_size(bdk_node_t node, int aura)
-{
- __bdk_fpa_state_t *fpa_state = &__bdk_fpa_node_state[node];
- return fpa_state->buffer_size_aura[aura];
-}
-
-/**
- * Global FPA initialization
- *
- * @return Zero on success, negative on failure
- */
-int bdk_fpa_init(bdk_node_t node);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-gpio.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-gpio.h
index 8be3a4c1ad..21a57cde3d 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-gpio.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-gpio.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_GPIO_H__
+#define __CB_BDK_GPIO_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -109,3 +111,4 @@ extern void bdk_gpio_select_pin(bdk_node_t node, int gpio, int pin);
extern int bdk_gpio_get_num(void);
/** @} */
+#endif /* !__CB_BDK_GPIO_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-hal.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-hal.h
deleted file mode 100644
index 458ffede7c..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-hal.h
+++ /dev/null
@@ -1,98 +0,0 @@
-#ifndef __BDK_HAL_H__
-#define __BDK_HAL_H__
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Master include file for hardware support. Use bdk.h instead
- * of including this file directly.
- *
- * <hr>$Revision: 49448 $<hr>
- *
- * @defgroup hal Hardware abstraction layer
- */
-
-/* Global define to control if the BDK configures units to send
- don't write back requests for freed buffers. Set to 1 to enable
- DWB, 0 to disable them. As the BDK normally fits inside L2, sending
- DWB just causes more L2 operations without benefit */
-#define BDK_USE_DWB 0
-
-#include "bdk-access.h"
-#include "bdk-utils.h"
-#include "bdk-config.h"
-#include "bdk-atomic.h"
-#include "bdk-spinlock.h"
-#include "bdk-rvu.h"
-#include "bdk-clock.h"
-#include "bdk-crc.h"
-#include "bdk-error-report.h"
-#include "bdk-gpio.h"
-#include "device/bdk-device.h"
-#include "if/bdk-if.h"
-#include "usb/bdk-usb-xhci-intf.h"
-#include "bdk-ecam.h"
-#include "bdk-fpa.h"
-#include "bdk-pbus-flash.h"
-#include "bdk-pki.h"
-#include "bdk-pko.h"
-#include "bdk-power-burn.h"
-#include "bdk-sso.h"
-#include "bdk-nic.h"
-#include "bdk-nix.h"
-#include "bdk-key.h"
-#include "bdk-l2c.h"
-#include "bdk-mdio.h"
-#include "bdk-mpi.h"
-#include "bdk-mmc.h"
-#include "bdk-pcie.h"
-#include "bdk-pcie-flash.h"
-#include "bdk-qlm.h"
-#include "qlm/bdk-qlm-errata-cn8xxx.h"
-#include "bdk-rng.h"
-#include "bdk-sata.h"
-#include "bdk-twsi.h"
-#include "bdk-usb.h"
-#include "bdk-access-native.h"
-#include "bdk-tns.h"
-#include "bdk-vrm.h"
-#include "aq_api/bdk-aqr-support.h"
-#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-key.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-key.h
deleted file mode 100644
index c16bfd7559..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-key.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to the on chip key memory. Key memory is
- * 8k on chip that is inaccessible from off chip. It can
- * also be cleared using an external hardware pin.
- *
- * <hr>$Revision: 49448 $<hr>
- *
- * @addtogroup hal
- * @{
- */
-
-#define BDK_KEY_MEM_SIZE 8192 /* Size in bytes */
-
-/**
- * Read from KEY memory
- *
- * @param node Which node to use
- * @param address Address (byte) in key memory to read
- * 0 <= address < BDK_KEY_MEM_SIZE
- * @return Value from key memory
- */
-extern uint64_t bdk_key_read(bdk_node_t node, uint64_t address);
-
-/**
- * Write to KEY memory
- *
- * @param node Which node to use
- * @param address Address (byte) in key memory to write
- * 0 <= address < BDK_KEY_MEM_SIZE
- * @param value Value to write to key memory
- */
-extern void bdk_key_write(bdk_node_t node, uint64_t address, uint64_t value);
-
-/**
- * Allocate an area in key memory for storing data. Return a pointer to the
- * memory on success.
- *
- * @param node Node to allocate on
- * @param size_bytes Number of bytes to allocate
- *
- * @return Pointer to key memory, or NULL on failure
- */
-extern void* bdk_key_alloc(bdk_node_t node, int size_bytes);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-l2c.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-l2c.h
index cf14357f83..bcb82683be 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-l2c.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-l2c.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_L2C_H__
+#define __CB_BDK_L2C_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -37,6 +39,9 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+/* FIXME(dhendricks): added */
+#include <libbdk-arch/bdk-numa.h>
+
/**
* @file
*
@@ -177,3 +182,4 @@ int bdk_l2c_get_num_assoc(bdk_node_t node);
int bdk_l2c_is_locked(bdk_node_t node);
/** @} */
+#endif /* !__CB_BDK_L2C_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mdio.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mdio.h
index 889f8d5d56..0540fa499a 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mdio.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mdio.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_HAL_MDIO_H__
+#define __CB_BDK_HAL_MDIO_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -473,4 +475,7 @@ extern int bdk_mdio_45_read(bdk_node_t node, int bus_id, int phy_id, int device,
extern int bdk_mdio_45_write(bdk_node_t node, int bus_id, int phy_id, int device, int location,
int val);
+int bdk_mdio_init(bdk_node_t node);
+
/** @} */
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mmc.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mmc.h
deleted file mode 100644
index 93f3286379..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mmc.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to the MMC, eMMC, or SD
- *
- * <hr>$Revision: 49448 $<hr>
- *
- * @addtogroup hal
- * @{
- */
-
-/**
- * Initialize a MMC for read/write
- *
- * @author creese (10/14/2013)
- * @param chip_sel Chip select to use
- *
- * @return Size of the SD card, zero on failure
- */
-extern int64_t bdk_mmc_initialize(bdk_node_t node, int chip_sel);
-
-/**
- * Read blocks from a MMC card
- *
- * @author creese (10/14/2013)
- * @param node Node to access
- * @param chip_sel Chip select to use
- * @param address Offset into the card in bytes. Must be a multiple of 512
- * @param buffer Buffer to read into
- * @param length Length to read in bytes. Must be a multiple of 512
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_mmc_read(bdk_node_t node, int chip_sel, uint64_t address, void *buffer, int length);
-
-/**
- * Write blocks to a MMC card
- *
- * @author creese (10/14/2013)
- * @param node Node to access
- * @param chip_sel Chip select to use
- * @param address Offset into the card in bytes. Must be a multiple of 512
- * @param buffer Buffer to write
- * @param length Length to write in bytes. Must be a multiple of 512
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_mmc_write(bdk_node_t node, int chip_sel, uint64_t address, const void *buffer, int length);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mpi.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mpi.h
deleted file mode 100644
index 46da75b019..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-mpi.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to the SPI / MPI bus
- *
- * <hr>$Revision: 49448 $<hr>
- *
- * @addtogroup hal
- * @{
- */
-
-typedef enum
-{
- BDK_MPI_FLAGS_ENABLE_CS0 = 1<<0, /**< Chip select 0 will be needed */
- BDK_MPI_FLAGS_ENABLE_CS1 = 1<<1, /**< Chip select 1 will be needed */
- BDK_MPI_FLAGS_ENABLE_CS2 = 1<<2, /**< Chip select 2 will be needed */
- BDK_MPI_FLAGS_ENABLE_CS3 = 1<<3, /**< Chip select 3 will be needed */
- BDK_MPI_FLAGS_CS_ACTIVE_HI = 1<<4, /**< Chip select is active high, else active low */
- BDK_MPI_FLAGS_ONE_WIRE = 1<<5, /**< Input and output are multiplexed over SPI_DO */
- BDK_MPI_FLAGS_IDLE_CLOCKS = 1<<7, /**< Continue to clock between commands */
- BDK_MPI_FLAGS_IDLE_LOW = 1<<8, /**< Clear the clock is idle high, Set the clock is idle low */
- BDK_MPI_FLAGS_LSB_FIRST = 1<<9, /**< Set to shift the LSB first, otherwise MSB will shift first */
-} bdk_mpi_flags_t;
-
-/**
- * Initialize MPI/SPI for use. The different configuration
- * options are encoded as bitmask inside the flags parameter.
- *
- * @param node Numa node to use
- * @param clock_rate_hz
- * Clock rate in Hz (1-16M)
- * @param flags Setup flags ORed together
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_mpi_initialize(bdk_node_t node, int clock_rate_hz, bdk_mpi_flags_t flags);
-
-/**
- * Perform a SPI/MPI transfer. The transfer can contain tx_count
- * bytes that are transferred out, followed by rx_count bytes
- * that are read in. Both tx_count and rx_count may be zero if
- * no transfer is needed. Transmit data is sent most significant
- * byte first, unless BDK_MPI_FLAGS_LSB_FIRST is set. Receive data
- * is in the return value with the last byte in the least
- * signnificant byte.
- *
- * @param node Numa node to use
- * @param chip_select
- * Which chip select to enable during the transfer
- * @param leave_cs_enabled
- * Leave the chip select assert after the transaction. Normally can
- * be zero. Set to non zero if you want to perform repeated
- * transactions.
- * @param tx_count Number of bytes to transfer before startng the rx/shift data.
- * Can be zero.
- * @param tx_data Data to transmit. The low order bytes are used for the data. Order
- * of shift out is controlled by BDK_MPI_FLAGS_LSB_FIRST
- * @param rx_count Number of bytes to read. These bytes will be in the return value
- * least significant bytes
- *
- * @return Read data
- */
-extern uint64_t bdk_mpi_transfer(bdk_node_t node, int chip_select,
- int leave_cs_enabled, int tx_count, uint64_t tx_data, int rx_count);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nix.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nix.h
deleted file mode 100644
index 118ceaebbb..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-nix.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2016 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to the NIX.
- *
- * @addtogroup hal
- * @{
- */
-
-typedef enum
-{
- BDK_NIX_TYPE_CGX,
- BDK_NIX_TYPE_LBK,
-} bdk_nix_type_t;
-
-/**
- * Configure NIC for a specific port. This is called for each
- * port on every interface that connects to NIC.
- *
- * @param handle Handle for port to config
- * @param ntype Type of LMAC this NIC connects to
- * @param lmac_credits
- * Size of the LMAC buffer in bytes. Used to configure the number of credits to
- * setup between the NIC and LMAC
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_nix_port_init(bdk_if_handle_t handle, bdk_nix_type_t ntype, int lmac_credits);
-
-/**
- * Send a packet
- *
- * @param handle Handle of port to send on
- * @param packet Packet to send
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_nix_transmit(bdk_if_handle_t handle, const bdk_if_packet_t *packet);
-
-/**
- * Get the current TX queue depth. Note that this operation may be slow
- * and adversly affect packet IO performance.
- *
- * @param handle Port to check
- *
- * @return Depth of the queue in packets
- */
-extern int bdk_nix_get_queue_depth(bdk_if_handle_t handle);
-
-/**
- * Query NIC and fill in the transmit stats for the supplied
- * interface handle.
- *
- * @param handle Port handle
- */
-extern void bdk_nix_fill_tx_stats(bdk_if_handle_t handle);
-
-/**
- * Query NIC and fill in the receive stats for the supplied
- * interface handle.
- *
- * @param handle Port handle
- */
-extern void bdk_nix_fill_rx_stats(bdk_if_handle_t handle);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pbus-flash.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pbus-flash.h
deleted file mode 100644
index a5cd2d592d..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pbus-flash.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2016-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * This file provides bootbus flash operations
- *
- * <hr>$Revision: 49448 $<hr>
- *
- * @addtogroup hal
- * @{
- */
-
-typedef struct
-{
- int start_offset;
- int block_size;
- int num_blocks;
-} bdk_pbus_flash_region_t;
-
-/**
- * Initialize the flash access library
- */
-void bdk_pbus_flash_initialize(bdk_node_t node);
-
-/**
- * Return a pointer to the flash chip
- *
- * @param chip_id Chip ID to return
- * @return Zero if the chip doesn't exist
- */
-uint64_t bdk_pbus_flash_get_base(int chip_id);
-
-/**
- * Return the number of erasable regions on the chip
- *
- * @param chip_id Chip to return info for
- * @return Number of regions
- */
-int bdk_pbus_flash_get_num_regions(int chip_id);
-
-/**
- * Return information about a flash chips region
- *
- * @param chip_id Chip to get info for
- * @param region Region to get info for
- * @return Region information
- */
-const bdk_pbus_flash_region_t *bdk_pbus_flash_get_region_info(int chip_id, int region);
-
-/**
- * Erase a block on the flash chip
- *
- * @param chip_id Chip to erase a block on
- * @param region Region to erase a block in
- * @param block Block number to erase
- * @return Zero on success. Negative on failure
- */
-int bdk_pbus_flash_erase_block(int chip_id, int region, int block);
-
-/**
- * Write data to flash. The block must have already been erased. You can call
- * this multiple times on the same block to piecemeal write it.
- *
- * @param chip_id Which flash to write
- * @param offset Offset into device to start write
- * @param data Data to write
- * @param len Length of the data
- *
- * @return Zero on success. Negative on failure
- */
-int bdk_pbus_flash_write(int chip_id, int offset, const void *data, int len);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie-flash.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie-flash.h
deleted file mode 100644
index 6605e419bb..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie-flash.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to programming the PCIe SPI flash used for config overrides
- *
- * @addtogroup hal
- * @{
- */
-
-#define BDK_PCIE_FLASH_PREAMBLE 0x9da1
-#define BDK_PCIE_FLASH_END 0x6a5d
-#define BDK_PCIE_FLASH_MAX_OFFSET 256
-
-/**
- * Determine if access to the PCIe SPI flash is available
- *
- * @param node Numa node request is for
- * @param pcie_port PCIe port to access
- *
- * @return One if available, zero if not
- */
-extern int bdk_pcie_flash_is_available(bdk_node_t node, int pcie_port);
-
-/**
- * Read the specified offset in the PCIe SPI flash and returns its
- * value. In the case the EEPROM isn't there or can't be read -1
- * is returned.
- *
- * @param node Numa node request is for
- * @param pcie_port PCIe port to access
- * @param offset Offset in bytes, Must be a multiple of 8
- *
- * @return Value read or -1 if the read failed
- */
-extern uint64_t bdk_pcie_flash_read(bdk_node_t node, int pcie_port, int offset);
-
-/**
- * Write a value to the PCIe SPI flash. The value should be of the
- * format bdk_pemx_spi_data_t.
- *
- * @param node Numa node request is for
- * @param pcie_port PCIe port to access
- * @param offset Offset to write. Must be a multiple of 8 bytes.
- * @param value Value to write
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pcie_flash_write(bdk_node_t node, int pcie_port, int offset, uint64_t value);
-
-/**
- * Erase the PCIe SPI Flash
- *
- * @param node Numa node request is for
- * @param pcie_port PCIe port to access
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pcie_flash_erase(bdk_node_t node, int pcie_port);
-
-/**
- * Dump the PCIe SPI Flash
- *
- * @param node Numa node request is for
- * @param pcie_port PCIe port to access
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pcie_flash_dump(bdk_node_t node, int pcie_port);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie.h
index d68a6d297f..3661427140 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pcie.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_PCIE_H__
+#define __CB_BDK_PCIE_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -37,6 +39,9 @@
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
+#include <libbdk-arch/bdk-numa.h>
+#include <libbdk-arch/bdk-require.h>
+
/**
* @file
*
@@ -78,6 +83,15 @@ int bdk_pcie_get_num_ports(bdk_node_t node);
int bdk_pcie_rc_initialize(bdk_node_t node, int pcie_port);
/**
+ * Return PCIe state
+ *
+ * @param pcie_port PCIe port to query
+ *
+ * @return True if port is up and running
+ */
+int bdk_pcie_is_running(bdk_node_t node, int pcie_port);
+
+/**
* Shutdown a PCIe port and put it in reset
*
* @param node Node to use in a Numa setup. Can be an exact ID or a special
@@ -221,16 +235,18 @@ uint64_t bdk_pcie_mem_read64(bdk_node_t node, int pcie_port, uint64_t address);
void bdk_pcie_mem_write64(bdk_node_t node, int pcie_port, uint64_t address, uint64_t data);
/**
- * These are the operations defined that can vary per chip generation
+ * @INTERNAL
+ * Build a PCIe config space request address for a device
+ *
+ * @param pcie_port PCIe port to access
+ * @param bus Sub bus
+ * @param dev Device ID
+ * @param fn Device sub function
+ * @param reg Register to access
+ *
+ * @return 64bit IO address
*/
-typedef struct
-{
- int (*get_num_ports)(bdk_node_t node);
- int (*rc_initialize)(bdk_node_t node, int pcie_port);
- int (*rc_shutdown)(bdk_node_t node, int pcie_port);
- uint64_t (*get_base_address)(bdk_node_t node, int pcie_port, bdk_pcie_mem_t mem_type);
- uint64_t (*get_base_size)(bdk_node_t node, int pcie_port, bdk_pcie_mem_t mem_type);
- uint64_t (*build_config_addr)(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg);
-} __bdk_pcie_ops_t;
+uint64_t pcie_build_config_addr(bdk_node_t node, int pcie_port, int bus, int dev, int fn, int reg);
+#endif /* __CB_BDK_PCIE_H__ */
/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pki.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pki.h
deleted file mode 100644
index 3379d26644..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pki.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to the SSO.
- *
- * @addtogroup hal
- * @{
- */
-
-/**
- * One time init of global Packet Input
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pki_global_init(bdk_node_t node);
-
-/**
- * Configure packet input for a specific port. This is called for each
- * port on every interface that is connected to packet input.
- *
- * @param handle Handle for port to config
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pki_port_init(bdk_if_handle_t handle);
-
-/**
- * Enable PKI after all setup is complete
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pki_enable(bdk_node_t node);
-
-/**
- * Query PKI and fill in the receive stats for the supplied interface handle. The
- * interface must use PKI for RX.
- *
- * @param handle Port handle
- *
- * @return
- */
-extern void bdk_pki_fill_rx_stats(bdk_if_handle_t handle);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pko.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pko.h
deleted file mode 100644
index 90cd719b06..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-pko.h
+++ /dev/null
@@ -1,126 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to the PKO.
- *
- * @addtogroup hal
- * @{
- */
-
-/* Maximum number of segments which fit flat lmtstore operation.
- 1) LMTST for PKO can be a maximum of 15 64bit words
- 2) PKO descriptors are 2 64bit words each
- 3) Every send requires PKO_SEND_HDR_S for hardware
- So 15 words / 2 = 7 possible descriptors
- 7 - HDR = 6 descriptors left for GATHER */
-#define BDK_PKO_SEG_LIMIT 6
-
-/**
- * Perform global init of PKO
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pko_global_init(bdk_node_t node);
-
-/**
- * Configure PKO for a specific port. This is called for each
- * port on every interface that connects to PKO.
- *
- * @param handle Handle for port to config
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pko_port_init(bdk_if_handle_t handle);
-
-/**
- * Enable PKO after all setup is complete
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pko_enable(bdk_node_t node);
-
-/**
- * Get the current TX queue depth. Note that this operation may be slow
- * and adversly affect packet IO performance.
- *
- * @param handle Port to check
- *
- * @return Depth of the queue in packets
- */
-extern int bdk_pko_get_queue_depth(bdk_if_handle_t handle);
-
-/**
- * Set PKO shapping as a specific queue level
- *
- * @param node Node to shape
- * @param queue Queue to shape
- * @param level Level in PKO
- * @param is_red Non-zero of the rate is for the yellow/red transition. Zero for the
- * green/yellow transition.
- * @param is_packets Non-zero if the rate is packets/sec, otherwise rate is bits/sec
- * @param rate Desired rate. A rate of zero disables shaping
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pko_shape(bdk_node_t node, int queue, int level, int is_red, int is_packets, uint64_t rate);
-
-/**
- * Send a packet
- *
- * @param handle Handle of port to send on
- * @param packet Packet to send
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_pko_transmit(bdk_if_handle_t handle, const bdk_if_packet_t *packet);
-
-/**
- * Query PKO and fill in the receive stats for the supplied
- * interface handle. The interface must use PKO for TX.
- *
- * @param handle Port handle
- *
- * @return
- */
-extern void bdk_pko_fill_tx_stats(bdk_if_handle_t handle);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-power-burn.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-power-burn.h
deleted file mode 100644
index 570ef1073c..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-power-burn.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-typedef enum
-{
- BDK_POWER_BURN_NONE, /* Disable power burn */
- BDK_POWER_BURN_FULL, /* Continuously burn power */
- BDK_POWER_BURN_CYCLE_10MS, /* Cycle: Burn for 10ms, idle for 10ms */
- BDK_POWER_BURN_CYCLE_1S, /* Cycle: Burn for 1s, idle for 1s */
- BDK_POWER_BURN_CYCLE_5S, /* Cycle: Burn for 5s, idle for 5s */
- BDK_POWER_BURN_CYCLE_1M, /* Cycle: Burn for 1m, idle for 1m */
- BDK_POWER_BURN_CYCLE_5M, /* Cycle: Burn for 5m, idle for 5m */
-} bdk_power_burn_type_t;
-
-/**
- * Set the current power burn mode for a node
- *
- * @param node Node to control power burn for
- * @param burn_type Mode of power burn
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_power_burn(bdk_node_t node, bdk_power_burn_type_t burn_type);
-
-/**
- * Set the throttle level percent for an entire chip
- *
- * @param node Node to set
- * @param throttle Percent of Throttle level (0-100)
- */
-extern void bdk_power_throttle(bdk_node_t node, int throttle_percent);
-
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-qlm.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-qlm.h
index 6cb1364196..1966f5ce85 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-qlm.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-qlm.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_QLM_H__
+#define __CB_BDK_QLM_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -47,6 +49,8 @@
* @addtogroup hal
* @{
*/
+#include <bdk.h>
+#include <libbdk-hal/if/bdk-if.h>
typedef enum
{
@@ -138,37 +142,6 @@ typedef struct
uint32_t data[64][128]; /* Error count at location, saturates as max */
} bdk_qlm_eye_t;
-
-/**
- * How to do the various QLM operations changes greatly
- * between chips. Each chip has its specific operations
- * stored in the structure below. The correct structure
- * is chosen based on the chip we're running on.
- */
-typedef struct
-{
- uint32_t chip_model;
- void (*init)(bdk_node_t node);
- int (*get_num)(bdk_node_t node);
- int (*get_lanes)(bdk_node_t node, int qlm);
- bdk_qlm_modes_t (*get_mode)(bdk_node_t node, int qlm);
- int (*set_mode)(bdk_node_t node, int qlm, bdk_qlm_modes_t mode, int baud_mhz, bdk_qlm_mode_flags_t flags);
- int (*get_gbaud_mhz)(bdk_node_t node, int qlm);
- int (*measure_refclock)(bdk_node_t node, int qlm);
- int (*get_qlm_num)(bdk_node_t node, bdk_if_t iftype, int interface, int index);
- int (*reset)(bdk_node_t node, int qlm);
- int (*enable_prbs)(bdk_node_t node, int qlm, int prbs, bdk_qlm_direction_t dir);
- int (*disable_prbs)(bdk_node_t node, int qlm);
- uint64_t (*get_prbs_errors)(bdk_node_t node, int qlm, int lane, int clear);
- void (*inject_prbs_error)(bdk_node_t node, int qlm, int lane);
- int (*enable_loop)(bdk_node_t node, int qlm, bdk_qlm_loop_t loop);
- int (*auto_config)(bdk_node_t node);
- int (*dip_auto_config)(bdk_node_t node);
- int (*tune_lane_tx)(bdk_node_t node, int qlm, int lane, int tx_swing, int tx_pre, int tx_post, int tx_gain, int tx_vboost);
- int (*rx_equalization)(bdk_node_t node, int qlm, int lane);
- int (*eye_capture)(bdk_node_t node, int qlm, int qlm_lane, bdk_qlm_eye_t *eye);
-} bdk_qlm_ops_t;
-
/**
* Initialize the QLM layer
*/
@@ -194,6 +167,19 @@ extern int bdk_qlm_get_num(bdk_node_t node);
extern int bdk_qlm_get_lanes(bdk_node_t node, int qlm);
/**
+ * Lookup the hardware QLM number for a given interface type and index. This
+ * function will fail with a fatal error if called on invalid interfaces for
+ * a chip. It returns the QLM number for an interface without checking to
+ * see if the QLM is in the correct mode.
+ *
+ * @param iftype Interface type
+ * @param interface Interface index number
+ *
+ * @return QLM number. Dies on a fatal error on failure.
+ */
+int bdk_qlm_get_qlm_num(bdk_node_t node, bdk_if_t iftype, int interface, int index);
+
+/**
* Convert a mode into a configuration variable string value
*
* @param mode Mode to convert
@@ -279,23 +265,6 @@ extern int bdk_qlm_get_gbaud_mhz(bdk_node_t node, int qlm);
extern int bdk_qlm_measure_clock(bdk_node_t node, int qlm);
/**
- * Lookup the hardware QLM number for a given interface type and
- * index. If the associated interface doesn't map to a QLM,
- * returns -1.
- *
- * @param node Node to use in a Numa setup
- * @param iftype Interface type
- * @param interface Interface index number
- * @param index Port on the interface. Most chips use the
- * same mode for all ports, but there are
- * exceptions. For example, BGX2 on CN83XX
- * spans two DLMs.
- *
- * @return QLM number or -1 on failure
- */
-extern int bdk_qlm_get(bdk_node_t node, bdk_if_t iftype, int interface, int index);
-
-/**
* Reset a QLM to its initial state
*
* @param node Node to use in a numa setup
@@ -505,4 +474,5 @@ extern int bdk_qlm_margin_rx_restore(bdk_node_t node, int qlm, int qlm_lane, bdk
extern int bdk_qlm_dip_auto_config(bdk_node_t node);
+#endif /* __CB_BDK_QLM_H__ */
/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rng.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rng.h
index d5a7d0a43b..7f4b74ae70 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rng.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rng.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_RNG_H__
+#define __CB_BDK_RNG_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -76,4 +78,7 @@ extern uint32_t bdk_rng_get_random32(void);
*/
extern uint64_t bdk_rng_get_random64(void);
+
+int bdk_rng_init(bdk_node_t node);
/** @} */
+#endif /* !__CB_BDK_RNG_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rvu.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rvu.h
index 53b73ca4aa..99927dd5df 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rvu.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-rvu.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_RVU_H__
+#define __CB_BDK_RVU_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -64,3 +66,4 @@ static inline int bdk_rvu_alloc_msix(bdk_node_t node, int msix_count)
/** @} */
+#endif /* !__CB_BDK_RVU_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-spinlock.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-spinlock.h
index 5e34a4f7b9..c9b5487700 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-spinlock.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-spinlock.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_SPINLOCK_H__
+#define __CB_BDK_SPINLOCK_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -144,3 +146,4 @@ static inline int bdk_spinlock_trylock(bdk_spinlock_t *lock)
}
/** @} */
+#endif /* !__CB_BDK_SPINLOCK_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sso.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sso.h
deleted file mode 100644
index a04d5ca3cf..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-sso.h
+++ /dev/null
@@ -1,69 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Interface to the SSO.
- *
- * @addtogroup hal
- * @{
- */
-
-/**
- * Initialize the SSO
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_sso_init(bdk_node_t node);
-
-/**
- * Register a bdk-if handle with the SSO code so the SSO can be used to receive
- * traffic from it.
- *
- * @param handle Handle to register
- */
-extern void bdk_sso_register_handle(bdk_if_handle_t handle);
-
-/**
- * Function called during bdk_thread_yield() to process work while we're idle
- */
-extern void bdk_sso_process_work(void);
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-tns.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-tns.h
deleted file mode 100644
index 89264c8604..0000000000
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-tns.h
+++ /dev/null
@@ -1,109 +0,0 @@
-/***********************license start***********************************
-* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
-* reserved.
-*
-*
-* Redistribution and use in source and binary forms, with or without
-* modification, are permitted provided that the following conditions are
-* met:
-*
-* * Redistributions of source code must retain the above copyright
-* notice, this list of conditions and the following disclaimer.
-*
-* * Redistributions in binary form must reproduce the above
-* copyright notice, this list of conditions and the following
-* disclaimer in the documentation and/or other materials provided
-* with the distribution.
-*
-* * Neither the name of Cavium Inc. nor the names of
-* its contributors may be used to endorse or promote products
-* derived from this software without specific prior written
-* permission.
-*
-* This Software, including technical data, may be subject to U.S. export
-* control laws, including the U.S. Export Administration Act and its
-* associated regulations, and may be subject to export or import
-* regulations in other countries.
-*
-* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
-* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
-* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
-* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
-* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
-* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
-* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
-* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
-* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
-* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
-***********************license end**************************************/
-
-/**
- * @file
- *
- * Thunder Network Switch interface.
- *
- * $Revision$
- *
- * @addtogroup hal
- * @{
- */
-
-/**
- * Initialize the TNS block to enable clocks, allow register accesses, and
- * perform some basic initialization in anticipation of future packet
- * processing.
- *
- * TNS at power-up will be in BYPASS mode where packets from the vNIC pipes
- * to the BGX ports will be direct, and this will not change that.
- *
- * This is normally called automatically in bdk-init-main.c.
- *
- * @param node Node to initialize
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_tns_initialize(bdk_node_t node) BDK_WEAK;
-
-/**
- * Disable TNS from processing packets. After this, TNS must be fully
- * initialized. The NIC and BGX blocks must already be stopped before
- * calling this function.
- *
- * Nota Bene: In CN88XX_PASS_1 there is a bug that prevents TNS DataPath
- * from draining packets. So ensure that NIC and BGX have
- * also drained their packet queues.
- *
- * @param node
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_tns_shutdown(bdk_node_t node);
-
-/**
- * Set the TNS 'profile' to passthru. I.e. do the necessary writes
- * to the TNS datapath and TNS sst (Search, SDE, and TxQ) registers
- * to configure the TNS to allow vNIC0..vNIC7 <-> LMAC0..LMAC7 traffic
- * to flow straight through TNS (although the actual enabling of using
- * the TNS is done elsewhere (in traffic-gen.))
- *
- * @param node Node to configure
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_tns_profile_passthru(bdk_node_t node) BDK_WEAK;
-
-/**
- * Set the TNS 'profile' to bgxloopback. I.e. do the necessary writes
- * to the TNS datapath and TNS sst (Search, SDE, and TxQ) registers
- * to configure the TNS to allow any packets received on LMAC0..LMAC7
- * (BGX ports) to be reflected back to the same port after hitting the
- * TNS (although the actual enabling of using the TNS is done elsewhere
- * (in traffic-gen.))
- *
- * @param node Node to configure
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_tns_profile_bgxloopback(bdk_node_t node) BDK_WEAK;
-
-/** @} */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-twsi.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-twsi.h
index 2840ca5c96..0a9911d9b1 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-twsi.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-twsi.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_TWSI_H__
+#define __CB_BDK_TWSI_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -49,16 +51,7 @@
* @addtogroup hal
* @{
*/
-
-/**
- * Initialize the TWSI blocks. This just sets the clock rate.
- * Many times stuff will work without calling this, but some
- * TWSI devices will fail. This is normally called automatically
- * in bdk-init-main.c.
- *
- * @return Zero on success, negative on failure
- */
-extern int bdk_twsix_initialize(bdk_node_t node) BDK_WEAK;
+#include <libbdk-arch/bdk-numa.h>
/**
* Do a twsi read from a 7 bit device address using an (optional)
@@ -100,3 +93,4 @@ extern int64_t bdk_twsix_read_ia(bdk_node_t node, int twsi_id, uint8_t dev_addr,
extern int bdk_twsix_write_ia(bdk_node_t node, int twsi_id, uint8_t dev_addr, uint16_t internal_addr, int num_bytes, int ia_width_bytes, uint64_t data);
/** @} */
+#endif /* !__CB_BDK_TWSI_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-usb.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-usb.h
index 6e78e5db14..facc01f95d 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-usb.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-usb.h
@@ -1,3 +1,5 @@
+#ifndef __BDK_HAL_USB_H__
+#define __BDK_HAL_USB_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -82,7 +84,7 @@ typedef enum
*
* @return Zero on success, negative on failure
*/
-extern int bdk_usb_initialize(bdk_node_t node, int usb_port, bdk_usb_clock_t clock_type);
+int bdk_usb_initialize(bdk_node_t node, int usb_port, bdk_usb_clock_t clock_type);
/**
* Put the USB port into a specific testing mode
@@ -93,7 +95,7 @@ extern int bdk_usb_initialize(bdk_node_t node, int usb_port, bdk_usb_clock_t clo
*
* @return Zero on success, negative on failure
*/
-extern int bdk_usb_test_mode(bdk_node_t node, int usb_port, bdk_usb_test_t test_mode);
+int bdk_usb_test_mode(bdk_node_t node, int usb_port, bdk_usb_test_t test_mode);
/**
* Convert a USB test enumeration into a string for display to the user
@@ -104,6 +106,7 @@ extern int bdk_usb_test_mode(bdk_node_t node, int usb_port, bdk_usb_test_t test_
*
* @return String name of test
*/
-extern const char *bdk_usb_get_test_mode_string(bdk_node_t node, int usb_port, bdk_usb_test_t test_mode);
+const char *bdk_usb_get_test_mode_string(bdk_node_t node, int usb_port, bdk_usb_test_t test_mode);
/** @} */
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-utils.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-utils.h
index c0ed43582e..c83658644c 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-utils.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-utils.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_UTILS_H__
+#define __CB_BDK_UTILS_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -36,8 +38,8 @@
* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
***********************license end**************************************/
-#include "libbdk-arch/bdk-csrs-lmc.h"
#include "libbdk-arch/bdk-csrs-rst.h"
+#include <string.h>
/**
* @file
@@ -204,3 +206,4 @@ static inline void bdk_zero_memory(void *start, uint64_t length)
}
/** @} */
+#endif /* !__CB_BDK_UTILS_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-vrm.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-vrm.h
index 8e6ec38209..0aeae94e30 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-vrm.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/bdk-vrm.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_VRM_H__
+#define __CB_BDK_VRM_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -64,3 +66,4 @@ extern int bdk_vrm_initialize(bdk_node_t node);
extern int bdk_vrm_poll(bdk_node_t node);
/** @} */
+#endif /* !__CB_BDK_VRM_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/device/bdk-device.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/device/bdk-device.h
index 3ca3f00710..3308859733 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-hal/device/bdk-device.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/device/bdk-device.h
@@ -1,3 +1,6 @@
+#ifndef __CB_BDK_DEVICE_H__
+#define __CB_BDK_DEVICE_H__
+
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -250,10 +253,11 @@ extern void bdk_bar_write(const bdk_device_t *device, int bar, int size, uint64_
} else if (bdk_clock_get_count(BDK_CLOCK_TIME) > done) { \
result = -1; \
break; \
- } else \
- bdk_thread_yield(); \
+ } \
} \
} while (0); \
result;})
/** @} */
+
+#endif /* !__CB_BDK_DEVICE_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/if/bdk-if.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/if/bdk-if.h
new file mode 100644
index 0000000000..c3ede0acc8
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/if/bdk-if.h
@@ -0,0 +1,326 @@
+#ifndef __CB_BDK_HAL_IF_BDK_IF_H__
+#define __CB_BDK_HAL_IF_BDK_IF_H__
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+
+/**
+ * @file
+ *
+ * Interface to external interfaces (SGMII, XAUI, etc)
+ *
+ * <hr>$Revision: 45089 $<hr>
+ *
+ * @defgroup packetio Packet Input / Output
+ * @addtogroup packetio
+ * @{
+ */
+
+
+#define BDK_BGX_PKI 0 /* When defined and non zero BGX will use PKI interface for receive */
+#define BDK_BGX_PKO 0 /* When defined and non zero BGX will use PKO interface for transmit */
+
+#define BDK_IF_MAX_GATHER 12 /* CN88XX RX supports 12 at most */
+
+/* PHY address encoding:
+ Bits[31:24]: Node ID, 0xff for node the ethernet device is on
+ Bits[23:16]: Only used for TWSI
+ Bits[15:12]: PHY connection type (0=MDIO, 1=Fixed, 2=TWSI)
+ For MDIO:
+ Bits[31:24]: Node ID, 0xff for node the ethernet device is on
+ Bits[23:16]: 0
+ Bits[15:12]: 0=MDIO
+ Bits[11:8]: MDIO bus number
+ Bits[7:0]: MDIO address
+ For Fixed:
+ Bits[31:24]: 0
+ Bits[23:16]: Zero
+ Bits[15:12]: 1=Fixed
+ Bits[11:0]: 0 = 1Gb, 1 = 100Mb
+ For TWSI:
+ Bits[31:24]: Node ID, 0xff for node the ethernet device is on
+ Bits[23:16]: TWSI internal address width in bytes (0-2)
+ Bits[15:12]: 2=TWSI
+ Bits[11:8]: TWSI bus number
+ Bits[7:0]: TWSI address
+ */
+#define BDK_IF_PHY_TYPE_MASK 0xf000
+#define BDK_IF_PHY_MDIO 0x0000
+#define BDK_IF_PHY_TWSI 0x2000
+#define BDK_IF_PHY_FIXED_1GB 0x1000
+#define BDK_IF_PHY_FIXED_100MB 0x1001
+
+/**
+ * Enumeration of different interfaces.
+ */
+typedef enum
+{
+ BDK_IF_BGX,
+ BDK_IF_PCIE,
+ BDK_IF_FAKE,
+ BDK_IF_LBK,
+ BDK_IF_RGX,
+ BDK_IF_CGX,
+ __BDK_IF_LAST
+} bdk_if_t;
+
+typedef enum
+{
+ BDK_IF_FLAGS_HAS_FCS = 1, /* The device adds and removes an FCS. On thunder we assume the hardware strips FCS */
+ BDK_IF_FLAGS_ENABLED = 2,
+} bdk_if_flags_t;
+
+typedef union
+{
+ uint64_t u64;
+ struct
+ {
+ uint64_t reserved2 : 32;
+ unsigned speed : 24;
+ unsigned lanes : 6;
+ int full_duplex : 1;
+ int up : 1;
+ } s;
+} bdk_if_link_t;
+
+typedef struct
+{
+ uint64_t dropped_octets;
+ uint64_t dropped_packets;
+ uint64_t octets;
+ uint64_t packets;
+ uint64_t errors;
+} __bdk_if_stats_t;
+
+typedef struct
+{
+ __bdk_if_stats_t rx;
+ __bdk_if_stats_t tx;
+} bdk_if_stats_t;
+
+typedef struct __bdk_if_port
+{
+ bdk_if_t iftype : 8;
+ bdk_node_t node : 8;
+ int interface : 8;
+ int index : 8;
+ bdk_if_flags_t flags : 8;
+ int pknd : 8; /* NIC/PKI packet kind */
+ int16_t pki_channel; /* PKI/SSO channel number, or -1 if not used */
+ int16_t pki_dstat; /* PKI DSTAT ID used for receive stats */
+ int16_t pko_queue; /* PKO DQ number, or -1 if not used */
+ int16_t aura; /* FPA aura number */
+ int16_t nic_id; /* NIC ID in use, or -1 if not used. Encoded as NIC_VF * 8 + index */
+ void * receiver; /* This is a bdk_if_packet_receiver_t */
+ void * receiver_arg;
+ bdk_if_stats_t stats;
+ bdk_if_link_t link_info;
+ char name[16];
+ struct __bdk_if_port *next;
+ struct __bdk_if_port *poll_next;
+ char priv[0];
+} __bdk_if_port_t;
+
+typedef __bdk_if_port_t *bdk_if_handle_t;
+
+/**
+ * Format of each gather/segment entry in a packet. This is unrelated to
+ * the underlying hardware format, but is designed to be simple to munge
+ * into a hardware format. Note that only 48 bits are stored for the
+ * address. This address is a physical address not mean for SMMU translation.
+ */
+typedef union
+{
+ uint64_t u;
+ struct
+ {
+#if __BYTE_ORDER == __BIG_ENDIAN
+ uint64_t size : 16;
+ uint64_t address : 48;
+#else
+ uint64_t address : 48;
+ uint64_t size : 16;
+#endif
+ } s;
+} bdk_packet_ptr_t;
+
+typedef enum
+{
+ BDK_IF_TYPE_UNKNOWN, /* Not set */
+ BDK_IF_TYPE_UDP4, /* IPv4 + UDP */
+ BDK_IF_TYPE_TCP4, /* IPv4 + TCP */
+} bdk_if_type_t;
+
+/**
+ * The packet format for the BDK. This structure is designed to be exactly
+ * one cache line to promote alignment and avoid false aliasing. Note that the
+ * packet structure is independent from the packet data and can have a shorter
+ * lifespan. The packet structure is normally on the stack and disappears on
+ * stack unwind. Code requiring its data to stick around needs to copy it, but
+ * not the data in the gather list.
+ */
+typedef struct
+{
+ bdk_if_handle_t if_handle; /* Handle to interface this packet was received on */
+ int length; /* Length of the packet in bytes */
+ int segments; /* Number of segments the packet is spread over */
+ int rx_error; /* Error number when packet was receive or zero for no error */
+ bdk_if_type_t packet_type : 16; /* Type of the packet, so sender doesn't need to walk packet */
+ uint16_t mtu; /* MTU for hardware fragment, such as TSO */
+ uint64_t reserved2; /* Reserved for future use */
+ bdk_packet_ptr_t packet[BDK_IF_MAX_GATHER]; /* List of segements. Each has a physical address and length */
+} bdk_if_packet_t;
+
+typedef enum
+{
+ BDK_IF_LOOPBACK_NONE = 0,
+ BDK_IF_LOOPBACK_INTERNAL = 1,
+ BDK_IF_LOOPBACK_EXTERNAL = 2,
+ BDK_IF_LOOPBACK_INTERNAL_EXTERNAL = 3,
+} bdk_if_loopback_t;
+
+typedef void (*bdk_if_packet_receiver_t)(const bdk_if_packet_t *packet, void *arg);
+
+typedef struct
+{
+ int priv_size;
+ int (*if_num_interfaces)(bdk_node_t node); /* Returns the number of interfaces possible on this chip */
+ int (*if_num_ports)(bdk_node_t node, int interface); /* For given interface, returns the number of ports on it */
+ int (*if_probe)(bdk_if_handle_t handle); /* Probe to see if a port exists */
+ int (*if_init)(bdk_if_handle_t handle); /* One time hardware init */
+ int (*if_enable)(bdk_if_handle_t handle); /* Enable packet IO. must be called after init */
+ int (*if_disable)(bdk_if_handle_t handle); /* Disable packet IO */
+ bdk_if_link_t (*if_link_get)(bdk_if_handle_t handle); /* Get link speed and state */
+ void (*if_link_set)(bdk_if_handle_t handle, bdk_if_link_t link_info); /* Set link speed and state */
+ const bdk_if_stats_t *(*if_get_stats)(bdk_if_handle_t handle); /* Get stats */
+ int (*if_transmit)(bdk_if_handle_t handle, const bdk_if_packet_t *packet); /* TX a packet */
+ int (*if_loopback)(bdk_if_handle_t handle, bdk_if_loopback_t loopback); /* Configure loopback for the port */
+ int (*if_get_queue_depth)(bdk_if_handle_t handle); /* Get the current TX queue depth */
+ uint64_t (*if_get_lane_mask)(bdk_if_handle_t handle); /* Get a mask of the QLM lanes used by this handle */
+} __bdk_if_ops_t;
+
+typedef struct
+{
+ int (*init)(void);
+ uint64_t (*alloc)(int length);
+ void (*free)(uint64_t address, int length);
+} __bdk_if_global_ops_t;
+
+extern int bdk_if_is_configured(void);
+extern int bdk_if_num_interfaces(bdk_node_t node, bdk_if_t iftype);
+extern int bdk_if_num_ports(bdk_node_t node, bdk_if_t iftype, int interface);
+extern bdk_if_handle_t bdk_if_next_port(bdk_if_handle_t handle);
+
+extern int bdk_if_enable(bdk_if_handle_t handle);
+extern int bdk_if_disable(bdk_if_handle_t handle);
+extern int bdk_if_loopback(bdk_if_handle_t handle, bdk_if_loopback_t loopback);
+
+extern const char *bdk_if_name(bdk_if_handle_t handle);
+extern bdk_if_link_t bdk_if_link_get(bdk_if_handle_t handle);
+extern bdk_if_link_t bdk_if_link_autoconf(bdk_if_handle_t handle);
+extern const bdk_if_stats_t *bdk_if_get_stats(bdk_if_handle_t handle);
+extern bdk_if_link_t __bdk_if_phy_get(bdk_node_t dev_node, int phy_addr);
+extern int bdk_if_get_queue_depth(bdk_if_handle_t handle);
+extern int bdk_if_link_wait_all(uint64_t timeout_us);
+extern int bdk_if_phy_setup(bdk_node_t node);
+extern int bdk_if_phy_vetesse_setup(bdk_node_t node, int qlm, int mdio_bus, int phy_addr);
+extern int bdk_if_phy_marvell_setup(bdk_node_t node, int qlm, int mdio_bus, int phy_addr);
+extern int bdk_if_phy_vsc8514_setup(bdk_node_t node, int qlm, int mdio_bus, int phy_addr);
+extern int bdk_if_transmit(bdk_if_handle_t handle, bdk_if_packet_t *packet);
+extern void bdk_if_register_for_packets(bdk_if_handle_t handle, bdk_if_packet_receiver_t receiver, void *arg);
+extern int bdk_if_alloc(bdk_if_packet_t *packet, int length);
+extern void bdk_if_free(bdk_if_packet_t *packet);
+extern void bdk_if_packet_read(const bdk_if_packet_t *packet, int location, int length, void *data);
+extern void bdk_if_packet_write(bdk_if_packet_t *packet, int location, int length, const void *data);
+
+extern uint64_t bdk_update_stat_with_overflow(uint64_t new_value, uint64_t old_value, int bit_size);
+extern void __bdk_if_phy_xs_init(bdk_node_t dev_node, int phy_addr);
+
+/**
+ * Get interface type. Use this function instead of accessing the handle
+ * directly. The handle is considered private and may change.
+ *
+ * @param handle Handle of port to get info for
+ *
+ * @return Interface type
+ */
+static inline bdk_if_t bdk_if_get_type(bdk_if_handle_t handle)
+{
+ return handle->iftype;
+}
+
+/**
+ * Called by each interface driver to process a received packet. After calling
+ * this function, it is the responsibility of each driver to free any resources
+ * used by the packet, probably by calling bdk_if_free().
+ *
+ * @param packet Packet that was received
+ */
+static inline void bdk_if_dispatch_packet(const bdk_if_packet_t *packet)
+{
+ void *receiver_arg = packet->if_handle->receiver_arg;
+ bdk_if_packet_receiver_t receiver = packet->if_handle->receiver;
+ if (receiver)
+ receiver(packet, receiver_arg);
+}
+
+/**
+ * Get the base QLM used by this handle. For network interfaces that uses QLMs,
+ * return the QLM number of lane 0. Note that some network interfaces span multiple
+ * QLM/DLM. This will return the lowest QLM/DLM number.
+ *
+ * @param handle Handle to query
+ *
+ * @return QLM/DLM number, or -1 if handle doesn't use SERDES
+ */
+extern int bdk_if_get_qlm(bdk_if_handle_t handle);
+
+/**
+ * Get a mask of the QLM/DLM lanes used by this handle. A bit is set for each lane
+ * used by the interface. Some ports span multiple QLM/DLM. In this case the bits
+ * set will be wider than the QLM/DLM, signalling that the next QLM/DLM is needed
+ * too.
+ *
+ * @param handle Handle to query
+ *
+ * @return Lane mask, or zero if no SERDES lanes are used
+ */
+extern uint64_t bdk_if_get_lane_mask(bdk_if_handle_t handle);
+
+/** @} */
+#endif
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/qlm/bdk-qlm-common.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/qlm/bdk-qlm-common.h
new file mode 100644
index 0000000000..9bdf1dd40a
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/qlm/bdk-qlm-common.h
@@ -0,0 +1,326 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#ifndef __BDK_QLM_COMMON_H__
+#define __BDK_QLM_COMMON_H__
+
+/* Common QLM code for Thunder based processors. The following chips
+ use the same style SERDES, using the code defined in this header.
+ CN88XX
+ CN83XX
+*/
+
+/* These constants represent the possible QLM reference clock speeds in Hz */
+#define REF_100MHZ 100000000
+#define REF_125MHZ 125000000
+#define REF_156MHZ 156250000
+
+/**
+ * Figure out which lane mode to use for a given reference clock and GBaud
+ *
+ * @param mode_name String name for error messages
+ * @param qlm QlM being configured
+ * @param ref_clk Reference clock in hertz
+ * @param baud_mhz Baud rate in Mhz
+ *
+ * @return Lane mode or -1 on failure
+ */
+extern int __bdk_qlm_get_lane_mode_for_speed_and_ref_clk(const char *mode_name, int qlm, int ref_clk, int baud_mhz);
+
+/**
+ * Setup the PEM to either driver or receive reset from PRST based on RC or EP
+ *
+ * @param node Node to use in a Numa setup
+ * @param pem Which PEM to setuo
+ * @param is_endpoint
+ * Non zero if PEM is a EP
+ */
+extern void __bdk_qlm_setup_pem_reset(bdk_node_t node, int pem, int is_endpoint);
+
+/**
+ * Measure the reference clock of a QLM
+ *
+ * @param qlm QLM to measure
+ *
+ * @return Clock rate in Hz
+ */
+extern int __bdk_qlm_measure_refclock(bdk_node_t node, int qlm);
+
+/**
+ * Put a QLM into hardware reset
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __bdk_qlm_reset(bdk_node_t node, int qlm);
+
+/**
+ * Enable PRBS on a QLM
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ * @param prbs PRBS mode (31, etc)
+ * @param dir Directions to enable. This is so you can enable TX and later
+ * enable RX after TX has run for a time
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __bdk_qlm_enable_prbs(bdk_node_t node, int qlm, int prbs, bdk_qlm_direction_t dir);
+
+/**
+ * Disable PRBS on a QLM
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __bdk_qlm_disable_prbs(bdk_node_t node, int qlm);
+
+/**
+ * Return the number of PRBS errors since PRBS started running
+ *
+ * @param node Node to use in numa setup
+ * @param qlm QLM to use
+ * @param lane Which lane
+ * @param clear Clear counter after return the current value
+ *
+ * @return Number of errors
+ */
+extern uint64_t __bdk_qlm_get_prbs_errors(bdk_node_t node, int qlm, int lane, int clear);
+
+/**
+ * Inject an error into PRBS
+ *
+ * @param node Node to use in numa setup
+ * @param qlm QLM to use
+ * @param lane Which lane
+ */
+extern void __bdk_qlm_inject_prbs_error(bdk_node_t node, int qlm, int lane);
+
+/**
+ * Enable shallow loopback on a QLM
+ *
+ * @param node Node to use in a numa setup
+ * @param qlm QLM to use
+ * @param loop Type of loopback. Not all QLMs support all modes
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __bdk_qlm_enable_loop(bdk_node_t node, int qlm, bdk_qlm_loop_t loop);
+
+/**
+ * Initialize the QLM mode table
+ *
+ * @param node Node to initialize
+ * @param qlm Which QLM
+ * @param ref_clk Reference clock of the QLM in Hz
+ */
+extern void __bdk_qlm_init_mode_table(bdk_node_t node, int qlm, int ref_clk);
+
+/**
+ * Given a valid PEM number, return its speed in Gbaud
+ *
+ * @param node Node to use in numa setup
+ * @param pem PEM to get speed of
+ *
+ * @return Speed in Gbaud. Zero if disabled
+ */
+extern int __bdk_qlm_get_gbaud_mhz_pem(bdk_node_t node, int pem);
+
+/**
+ * Get the speed of a QLM using its LMODE. This can't be used on PCIe QLMs.
+ *
+ * @param node Node to use in numa setup
+ * @param qlm Which QLM
+ *
+ * @return QLM speed on Gbaud
+ */
+extern int __bdk_qlm_get_gbaud_mhz_lmode(bdk_node_t node, int qlm);
+
+/**
+ * Converts a measured reference clock to a likely ideal value. Rounds
+ * clock speed to the nearest REF_*Mhz define.
+ *
+ * @param node Node to use in numa setup
+ * @param qlm Which QLM
+ * @param measured_hz
+ * Measured value
+ *
+ * @return Value exactly matching a define
+ */
+extern int __bdk_qlm_round_refclock(bdk_node_t node, int qlm, int measured_hz);
+
+/**
+ * For Cavium EVB and EBB board, query the MCU to determine the QLM setup. Applying
+ * any configuration found.
+ *
+ * @param node Node to configure
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int bdk_qlm_mcu_auto_config(bdk_node_t node);
+
+/**
+ * Display the current settings of a QLM lane
+ *
+ * @param node Node the QLM is on
+ * @param qlm QLM to display
+ * @param qlm_lane Lane to use
+ * @param show_tx Display TX parameters
+ * @param show_rx Display RX parameters
+ */
+extern void bdk_qlm_display_settings(bdk_node_t node, int qlm, int qlm_lane, bool show_tx, bool show_rx);
+
+/**
+ * Perform RX equalization on a QLM
+ *
+ * @param node Node the QLM is on
+ * @param qlm QLM to perform RX equalization on
+ * @param qlm_lane Lane to use, or -1 for all lanes
+ *
+ * @return Zero on success, negative if any lane failed RX equalization
+ */
+extern int __bdk_qlm_rx_equalization(bdk_node_t node, int qlm, int qlm_lane);
+
+/**
+ * Configure the TX tuning parameters for a QLM lane. The tuning parameters can
+ * be specified as -1 to maintain their current value
+ *
+ * @param node Node to configure
+ * @param qlm QLM to configure
+ * @param lane Lane to configure
+ * @param tx_swing Transmit swing (coef 0) Range 0-31
+ * @param tx_pre Pre cursor emphasis (Coef -1). Range 0-15
+ * @param tx_post Post cursor emphasis (Coef +1). Range 0-31
+ * @param tx_gain Transmit gain. Range 0-7
+ * @param tx_vboost Transmit voltage boost. Range 0-1
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __bdk_qlm_tune_lane_tx(bdk_node_t node, int qlm, int lane, int tx_swing, int tx_pre, int tx_post, int tx_gain, int tx_vboost);
+
+/**
+ * Some QLM speeds need to override the default tuning parameters
+ *
+ * @param node Node to use in a Numa setup
+ * @param qlm QLM to configure
+ * @param mode Desired mode
+ * @param baud_mhz Desired speed
+ */
+extern void __bdk_qlm_tune(bdk_node_t node, int qlm, bdk_qlm_modes_t mode, int baud_mhz);
+
+/**
+ * Capture an eye diagram for the given QLM lane. The output data is written
+ * to "eye".
+ *
+ * @param node Node to use in numa setup
+ * @param qlm QLM to use
+ * @param qlm_lane Which lane
+ * @param eye Output eye data
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __bdk_qlm_eye_capture_cn8xxx(bdk_node_t node, int qlm, int lane, bdk_qlm_eye_t *eye_data) BDK_WEAK;
+
+/**
+ * Disables DFE for the specified QLM lane(s).
+ * This function should only be called for low-loss channels.
+ *
+ * @param node Node to configure
+ * @param qlm QLM to configure
+ * @param lane Lane to configure, or -1 for all lanes
+ */
+extern void __bdk_qlm_dfe_disable(int node, int qlm, int lane);
+
+/**
+ * Check if a specific lane is using KR training. This is used by low level GSER
+ * code to remember which QLMs and lanes need to support KR training for BGX. The
+ * hardware doesn't have a bit set aside to record this, so we repurpose the
+ * register GSERX_SCRATCH.
+ *
+ * @param node Node to check
+ * @param qlm QLM to check
+ * @param lane Lane to check
+ *
+ * @return True if this lane uses KR with BGX, false otherwise
+ */
+extern bool __bdk_qlm_is_lane_kr(bdk_node_t node, int qlm, int lane);
+
+/**
+ * Set if a specific lane is using KR training. This is used by low level GSER
+ * code to remember which QLMs and lanes need to support KR training for BGX. The
+ * hardware doesn't have a bit set aside to record this, so we repurpose the
+ * register GSERX_SCRATCH.
+ *
+ * @param node Node to set
+ * @param qlm QLM to set
+ * @param lane Lane to set
+ * @param is_kr KR (true) or XFI/XLAUI (false)
+ */
+extern void __bdk_qlm_set_lane_kr(bdk_node_t node, int qlm, int lane, bool is_kr);
+
+/**
+ * Initialize a DLM/QLM for use with SATA controllers
+ *
+ * @param node Node to intialize
+ * @param qlm Which DLM/QLM to init
+ * @param baud_mhz QLM speed in Gbaud
+ * @param sata_first First SATA controller connected to this DLM/QLM
+ * @param sata_last Last SATA controller connected to this DLM/QLM (inclusive)
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __bdk_qlm_set_sata_cn8xxx(bdk_node_t node, int qlm, int baud_mhz, int sata_first, int sata_last);
+
+/**
+ * Initialize a DLM/QLM for use with SATA controllers
+ *
+ * @param node Node to intialize
+ * @param qlm Which DLM/QLM to init
+ * @param baud_mhz QLM speed in Gbaud
+ * @param sata_first First SATA controller connected to this DLM/QLM
+ * @param sata_last Last SATA controller connected to this DLM/QLM (inclusive)
+ *
+ * @return Zero on success, negative on failure
+ */
+extern int __bdk_qlm_set_sata_cn9xxx(bdk_node_t node, int qlm, int baud_mhz, int sata_first, int sata_last);
+
+#endif /* __BDK_QLM_COMMON_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.h b/src/vendorcode/cavium/include/bdk/libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.h
new file mode 100644
index 0000000000..8c1b47ab64
--- /dev/null
+++ b/src/vendorcode/cavium/include/bdk/libbdk-hal/qlm/bdk-qlm-errata-cn8xxx.h
@@ -0,0 +1,152 @@
+/***********************license start***********************************
+* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
+* reserved.
+*
+*
+* Redistribution and use in source and binary forms, with or without
+* modification, are permitted provided that the following conditions are
+* met:
+*
+* * Redistributions of source code must retain the above copyright
+* notice, this list of conditions and the following disclaimer.
+*
+* * Redistributions in binary form must reproduce the above
+* copyright notice, this list of conditions and the following
+* disclaimer in the documentation and/or other materials provided
+* with the distribution.
+*
+* * Neither the name of Cavium Inc. nor the names of
+* its contributors may be used to endorse or promote products
+* derived from this software without specific prior written
+* permission.
+*
+* This Software, including technical data, may be subject to U.S. export
+* control laws, including the U.S. Export Administration Act and its
+* associated regulations, and may be subject to export or import
+* regulations in other countries.
+*
+* TO THE MAXIMUM EXTENT PERMITTED BY LAW, THE SOFTWARE IS PROVIDED "AS IS"
+* AND WITH ALL FAULTS AND CAVIUM INC. MAKES NO PROMISES, REPRESENTATIONS OR
+* WARRANTIES, EITHER EXPRESS, IMPLIED, STATUTORY, OR OTHERWISE, WITH RESPECT
+* TO THE SOFTWARE, INCLUDING ITS CONDITION, ITS CONFORMITY TO ANY
+* REPRESENTATION OR DESCRIPTION, OR THE EXISTENCE OF ANY LATENT OR PATENT
+* DEFECTS, AND CAVIUM SPECIFICALLY DISCLAIMS ALL IMPLIED (IF ANY) WARRANTIES
+* OF TITLE, MERCHANTABILITY, NONINFRINGEMENT, FITNESS FOR A PARTICULAR
+* PURPOSE, LACK OF VIRUSES, ACCURACY OR COMPLETENESS, QUIET ENJOYMENT,
+* QUIET POSSESSION OR CORRESPONDENCE TO DESCRIPTION. THE ENTIRE RISK
+* ARISING OUT OF USE OR PERFORMANCE OF THE SOFTWARE LIES WITH YOU.
+***********************license end**************************************/
+#ifndef __BDK_QLM_ERRATA_H__
+#define __BDK_QLM_ERRATA_H__
+
+/**
+ * Errata GSER-25992 - RX EQ Default Settings Update<p>
+ * For all GSER and all lanes when not PCIe EP:
+ * set GSER()_LANE()_RX_CFG_4[CFG_RX_ERRDET_CTRL<13:8>] = 13 (decimal)
+ * set GSER()_LANE()_RX_CTLE_CTRL[PCS_SDS_RX_CTLE_BIAS_CTRL] = 3
+ * Applied when SERDES are configured for 8G and 10G.<p>
+ * Applies to:
+ * CN88XX pass 1.x
+ * Fixed in hardware:
+ * CN88XX pass 2.x
+ * CN81XX
+ * CN83XX
+ *
+ * @param node Node to apply errata fix for
+ * @param qlm QLM to apply errata fix to
+ * @param baud_mhz QLM speed in Mhz
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_errata_gser_25992(bdk_node_t node, int qlm, int baud_mhz);
+
+/**
+ * (GSER-26150) 10G PHY PLL Temperature Failure
+ *
+ * 10 Gb temperature excursions can cause lock failure. Change
+ * the calibration point of the VCO at start up to shift some
+ * available range of the VCO from -deltaT direction to the
+ * +deltaT ramp direction allowing a greater range of VCO
+ * temperatures before experiencing the failure.
+ *
+ * Applies to:
+ * CN88XX pass 1.x
+ * Fix in hardware:
+ * CN88XX pass 2.x
+ * CN81XX
+ * CN83XX
+ *
+ * Only applies to QLMs running 8G and 10G
+ *
+ * @param node Node to apply errata to
+ * @param qlm QLM to apply errata fix to
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_errata_gser_26150(bdk_node_t node, int qlm, int baud_mhz);
+
+/**
+ * Errata (GSER-26636) 10G-KR/40G-KR - Inverted Tx Coefficient Direction Change
+ * Applied to all 10G standards (required for KR) but also applied to other
+ * standards in case software training is used.
+ * Applies to:
+ * CN88XX pass 1.x
+ * Fixed in hardware:
+ * CN88XX pass 2.x
+ * CN81XX
+ * CN83XX
+ *
+ * @param node Node to apply errata fix for
+ * @param qlm QLM to apply errata fix to
+ * @param baud_mhz QLM speed in Mhz
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_errata_gser_26636(bdk_node_t node, int qlm, int baud_mhz);
+
+/**
+ * (GSER-27140) SERDES has temperature drift sensitivity in the RX EQ<p>
+ * SERDES temperature drift sensitivity in receiver. Issues have
+ * been found with the Bit Error Rate (BER) reliability of
+ * 10GBASE-KR links over the commercial temperature range (0 to 100C),
+ * especially when subjected to rapid thermal ramp stress testing.
+ * (See HRM for corresponding case temperature requirements for each speed grade.)<p>
+ * Applies to:
+ * CN88XX pass 1.x
+ * CN88XX pass 2.x
+ * CN83XX pass 1.x
+ * CN81XX pass 1.x
+ * Fixed in hardware:
+ * TBD<p>
+ * Only applies to QLMs running 10G
+ *
+ * @param node Note to apply errata fix to
+ * @param qlm QLM to apply errata fix to
+ * @param baud_mhz QLM baud rate in Mhz
+ * @param channel_loss
+ * Insertion loss at Nyquist rate (e.g. 5.125Ghz for XFI/XLAUI) in dB
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_errata_gser_27140(bdk_node_t node, int qlm, int baud_mhz, int channel_loss);
+
+/**
+ * Errata GSER-27882 -GSER 10GBASE-KR Transmit Equalizer
+ * Training may not update PHY Tx Taps. This function is not static
+ * so we can share it with BGX KR
+ * Applies to:
+ * CN88XX pass 1.x, 2.0, 2.1
+ * Fixed in hardware:
+ * CN88XX pass 2.2 and higher
+ * CN81XX
+ * CN83XX
+ *
+ * @param node Node to apply errata fix for
+ * @param qlm QLM to apply errata fix to
+ * @param lane
+ *
+ * @return Zero on success, negative on failure
+ */
+int __bdk_qlm_errata_gser_27882(bdk_node_t node, int qlm, int lane);
+
+#endif /* __BDK_QLM_ERRATA_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-init.h b/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-init.h
index 834f8970f1..c3563eabd3 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-init.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-init.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_INIT_H__
+#define __CB_BDK_INIT_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -71,98 +73,6 @@ extern int bdk_init_cores(bdk_node_t node, uint64_t coremask);
extern int bdk_reset_cores(bdk_node_t node, uint64_t coremask);
/**
- * Call this function to take secondary nodes and cores out of
- * reset and have them start running threads
- *
- * @param skip_cores If non-zero, cores are not started. Only the nodes are setup
- * @param ccpi_sw_gbaud
- * If CCPI is in software mode, this is the speed the CCPI QLMs will be configured
- * for
- *
- * @return Zero on success, negative on failure.
- */
-extern int bdk_init_nodes(int skip_cores, int ccpi_sw_gbaud);
-
-/**
- * Called very early in during init of both the master and slave. It performs one
- * time init of CCPI QLM and link parameters. It must only be called once per
- * boot.
- *
- * @param is_master Non-zero if the caller is the master node
- */
-extern void __bdk_init_ccpi_early(int is_master);
-
-/**
- * Brings the CCPI lanes and links into an operational state without perofrming
- * node discovery and enumeration. After this function succeeds, CCPI lanes and
- * links are ready for traffic, but node routing has not been setup.
- *
- * Note this function runs on the slave node with the BDK code not at its link
- * address. Many normal BDK functions do not work properly. Be careful.
- *
- * @param is_master Non-zero when run on the master node. Zero when run on the slave
- * @param gbaud Baud rate to run links at. This is only used if the QLMs are in software init
- * mode. If they are strapped for hardware init, the strapping speed is used.
- * @param ccpi_trace Non-zero to enable CCPI tracing. Note that tracing doesn't use the standard
- * bdk-trace functions. This code runs on the secondary node before we are
- * multi-node, and the C library doesn't work right.
- *
- * @return Zero on success, negative on failure. Zero means CCPI lanes and links are
- * functional.
- */
-extern int __bdk_init_ccpi_connection(int is_master, uint64_t gbaud, int ccpi_trace);
-
-/**
- * Brings the CCPI lanes and links into an operational state without enabling
- * multi-node operation. Calling this function when the CCPI links are already
- * up does nothing. This function must return zero before you can go multi-node
- * by calling bdk_init_ccpi_multinode().
- *
- * @param gbaud Baud rate to run links at. This is only used if the QLMs are in software init
- * mode. If they are strapped for hardware init, the strapping speed is used.
- *
- * @return Zero on success, negative on failure. Zero means all CCPI links are functional.
- */
-extern int __bdk_init_ccpi_links(uint64_t gbaud) BDK_WEAK;
-
-/**
- * Once CCPI links are operational, this function transitions the system to a
- * multi-node setup. Note that this function only performs the low level CCPI
- * details, not BDK software setup on the other nodes. Call bdk_init_nodes()
- * for high level access to multi-node.
- *
- * @return Zero on success, negative on failure
- */
-extern int __bdk_init_ccpi_multinode(void) BDK_WEAK;
-
-/**
- * This function is the first function run on all cores once the
- * threading system takes over.
- *
- * @param arg
- * @param arg1
- */
-extern void __bdk_init_main(int arg, void *arg1);
-
-/**
- * Perform one time initialization for a node. Called for each
- * node from the master node.
- */
-extern void __bdk_init_node(bdk_node_t node);
-
-/**
- * Set the baud rate on a UART
- *
- * @param node Node to use in a Numa setup. Can be an exact ID or a special
- * value.
- * @param uart uart to set
- * @param baudrate Baud rate (9600, 19200, 115200, etc)
- * @param use_flow_control
- * Non zero if hardware flow control should be enabled
- */
-extern void bdk_set_baudrate(bdk_node_t node, int uart, int baudrate, int use_flow_control);
-
-/**
* Get the coremask of the cores actively running the BDK. Doesn't count cores
* that aren't booted.
*
@@ -170,7 +80,7 @@ extern void bdk_set_baudrate(bdk_node_t node, int uart, int baudrate, int use_fl
*
* @return 64bit bitmask
*/
-extern uint64_t bdk_get_running_coremask(bdk_node_t node);
+uint64_t bdk_get_running_coremask(bdk_node_t node);
/**
* Return the number of cores actively running in the BDK for the given node
@@ -179,16 +89,8 @@ extern uint64_t bdk_get_running_coremask(bdk_node_t node);
*
* @return Number of cores running. Doesn't count cores that aren't booted
*/
-extern int bdk_get_num_running_cores(bdk_node_t node);
-
-#ifndef BDK_SHOW_BOOT_BANNERS
-#define BDK_SHOW_BOOT_BANNERS 1
-#endif
-
-#define BDK_UART_BAUDRATE 115200
-//#define BDK_UART_BAUDRATE 921600
-
-extern uint64_t __bdk_init_reg_x0; /* The contents of X0 when this image started */
-extern uint64_t __bdk_init_reg_x1; /* The contents of X1 when this image started */
+int bdk_get_num_running_cores(bdk_node_t node);
/** @} */
+
+#endif /* !__CB_BDK_INIT_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-thread.h b/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-thread.h
index ef62dd7fe3..863053ebb0 100644
--- a/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-thread.h
+++ b/src/vendorcode/cavium/include/bdk/libbdk-os/bdk-thread.h
@@ -1,3 +1,5 @@
+#ifndef __CB_BDK_THREAD_H__
+#define __CB_BDK_THREAD_H__
/***********************license start***********************************
* Copyright (c) 2003-2017 Cavium Inc. (support@cavium.com). All rights
* reserved.
@@ -120,3 +122,4 @@ static inline void *bdk_thread_get_id(void)
}
/** @} */
+#endif /* !__CB_BDK_THREAD_H__ */
diff --git a/src/vendorcode/cavium/include/bdk/libdram/libdram-config.h b/src/vendorcode/cavium/include/bdk/libdram/libdram-config.h
index 9415d09f4e..aa5c256d58 100644
--- a/src/vendorcode/cavium/include/bdk/libdram/libdram-config.h
+++ b/src/vendorcode/cavium/include/bdk/libdram/libdram-config.h
@@ -39,6 +39,8 @@
#ifndef __LIBDRAM_CONFIG_H__
#define __LIBDRAM_CONFIG_H__
+#include <libbdk-arch/bdk-csrs-lmc.h>
+
#define DDR_CFG_T_MAX_DIMMS 2 /* ThunderX supports a max of two DIMMs per LMC */
/* Structure that provides DIMM information, either in the form of an SPD TWSI